prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding=utf-8
# Imports the Google Cloud client library
import logging
import string
import sys
import html
import numpy as np
import pandas as pd
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from google.cloud import language
from google.cloud import translate
from google.cloud.language import enums
from google.cloud.language import types
def get_ner_location_of_text(text):
no_random_road_groups = []
no_hospital_loc_groups = []
loc_groups = []
loc_entities = []
loc_entities_indices = []
loc_entities_word_indices = []
biggest_group_index = -1
reference_grouping = False
# Instantiates the clients
client = language.LanguageServiceClient()
translate_client = translate.Client()
# Translate
result = translate_client.translate(text, target_language='en', source_language='iw')
translated_text = result['translatedText']
translated_text = html.unescape(translated_text)
# Pre-processing - from what I saw only the first line has the location
translated_text = list(filter(None, translated_text.split('.')))[0]
# Analyze (Named Entity Recognition)
document = types.Document(content=translated_text, type=enums.Document.Type.PLAIN_TEXT)
response = client.analyze_entities(document=document)
# Getting the location entities and their indices in the text and adding them to a list
translated_text_word_split = list(filter(None, translated_text.split(' ')))
for entity in response.entities:
if entity.type == enums.Entity.Type.LOCATION:
if ' ' in entity.name:
for item in list(filter(None, entity.name.split(' '))):
loc_entities.append(item)
loc_entities_indices.append(translated_text.index(entity.name) + entity.name.index(item))
else:
loc_entities.append(entity.name)
loc_entities_indices.append(translated_text.index(entity.name))
# In case there is a reference to a previous location
if 'city' == entity.name.lower() or 'town' == entity.name.lower() or 'village' == entity.name.lower() or \
'junction' == entity.name.lower() or 'interchange' == entity.name.lower() or \
'intersect' == entity.name.lower() or 'street' == entity.name.lower():
reference_grouping = True
# Sort entities by appearing order in the string
loc_entities = [x for _, x in sorted(zip(loc_entities_indices, loc_entities))]
loc_entities_new = []
for item in loc_entities:
loc_entities_word_indices.append(
[idx for idx, s in enumerate(translated_text_word_split) if item in s][loc_entities_new.count(item)])
loc_entities_new.append(item)
loc_entities = loc_entities_new
# Location grouping - takes the largest group of words indicating location based on distance between groups
if len(loc_entities) >= 1:
diff = [loc_entities_word_indices[i + 1] - loc_entities_word_indices[i] for i in
range(len(loc_entities_word_indices) - 1)]
if diff and max(diff) > 5: # Distance is greater than 5 words
avg = sum(diff) / len(diff)
loc_groups = [[loc_entities_word_indices[0]]]
for x in loc_entities_word_indices[1:]:
if x - loc_groups[-1][-1] < avg:
loc_groups[-1].append(x)
else:
loc_groups.append([x])
# 'road' alone is recognised as a location, so if road is alone in the group, ignore it
no_random_road_groups = [group for group in loc_groups
if
not (len(group) == 1 and 'road' == translated_text_word_split[group[0]].lower())]
# We are not interested in the hospital location, unless the city isn't mentioned elsewhere
no_hospital_loc_groups = [group for group in no_random_road_groups
if not
any('hospital' in translated_text_word_split[item].lower() for item in group)]
bounds_loc_groups = [i[-1] - i[0] for ind, i in enumerate(no_hospital_loc_groups)]
biggest_group_index = bounds_loc_groups.index(max(bounds_loc_groups))
# Entities of the largest group
loc_entities = [translated_text_word_split[item] for item in no_hospital_loc_groups[biggest_group_index]]
# Getting the full string from the text indicating the location and not just entities
translated_location = translated_text[
translated_text.index(loc_entities[0]):translated_text.index(loc_entities[-1]) + len(
loc_entities[-1])]
# If there was a 'the' before the string, add it
if translated_text[translated_text.index(loc_entities[0]) - 4:translated_text.index(loc_entities[0])].lower() \
== 'the ':
translated_location = translated_text[
translated_text.index(loc_entities[0]) - 4:translated_text.index(
loc_entities[-1]) + len(
loc_entities[-1])]
# If a location without name is in the beginning of the string, add the previous word
if translated_location.lower().startswith('street') or translated_location.lower().startswith('interchange') \
or translated_location.lower().startswith('village') or translated_location.lower().startswith('town') \
or translated_location.lower().startswith('city') or translated_location.lower().startswith(
'intersection') \
or translated_location.lower().startswith('junction'):
translated_location = translated_text_word_split[translated_text_word_split.index(loc_entities[0]) - 1] \
+ ' ' + translated_location
reference_grouping = False
# Trying to solve the reference in case there is another group - first without the hospital group
if reference_grouping and len(no_hospital_loc_groups) >= 2:
previous = sys.maxsize
if biggest_group_index > 0:
previous = no_hospital_loc_groups[biggest_group_index][0] - \
no_hospital_loc_groups[biggest_group_index - 1][-1]
# Take the previous group, and from there, the last word, closest road to current group
if previous != sys.maxsize:
text_to_replace = translated_text_word_split[
no_hospital_loc_groups[biggest_group_index - 1][-1]]
if len(no_hospital_loc_groups[biggest_group_index - 1]) > 1:
last = no_hospital_loc_groups[biggest_group_index - 1][-1]
for index, val in enumerate(loc_groups[biggest_group_index - 1][::-1][1:]):
if last - val == 1:
text_to_replace = translated_text_word_split[
no_hospital_loc_groups[biggest_group_index - 1][
-2 - index]] + ' ' + text_to_replace
last = val
else:
break
translated_location = translated_location.replace(
'the junction', text_to_replace).replace(
'the intersect', text_to_replace).replace(
'the interchange', text_to_replace).replace(
'the street', text_to_replace).replace(
'the city', text_to_replace).replace(
'the town', text_to_replace).replace(
'the village', text_to_replace)
# Without hospital there weren't enough groups, so use it as well
elif reference_grouping and len(no_random_road_groups) >= 2:
previous = sys.maxsize
bounds_loc_groups = [i[-1] - i[0] for ind, i in enumerate(no_random_road_groups)]
biggest_group_index = bounds_loc_groups.index(max(bounds_loc_groups))
if biggest_group_index > 0:
previous = no_random_road_groups[biggest_group_index][0] - \
no_random_road_groups[biggest_group_index - 1][-1]
# Take the previous group, and from there, the last word, closest road to current group
if previous != sys.maxsize and 'hospital' not in \
translated_text_word_split[no_random_road_groups[biggest_group_index - 1][-1]].lower():
text_to_replace = translated_text_word_split[
no_random_road_groups[biggest_group_index - 1][-1]]
if len(no_random_road_groups[biggest_group_index - 1]) > 1:
last = no_random_road_groups[biggest_group_index - 1][-1]
for index, val in enumerate(loc_groups[biggest_group_index - 1][::-1][1:]):
if last - val == 1:
text_to_replace = translated_text_word_split[
no_random_road_groups[biggest_group_index - 1][
-2 - index]] + ' ' + text_to_replace
last = val
else:
break
translated_location = translated_location.replace(
'the junction', text_to_replace).replace(
'the intersect', text_to_replace).replace(
'the interchange', text_to_replace).replace(
'the street', text_to_replace).replace(
'the city', text_to_replace).replace(
'the town', text_to_replace).replace(
'the village', text_to_replace)
elif len(loc_entities) == 1:
translated_location = loc_entities
# If there was 'the' before the entity, add it
if translated_text[translated_text.index(loc_entities[0]) - 4:translated_text.index(loc_entities[0])].lower() \
== 'the ':
translated_location = translated_text[
translated_text.index(loc_entities[0]):translated_text.index(loc_entities[0]) + len(
loc_entities[0])]
# If the entity is a location without name, add previous word
if translated_location.lower().startswith('street') or translated_location.lower().startswith('interchange') \
or translated_location.lower().startswith('village') or translated_location.lower().startswith('town') \
or translated_location.lower().startswith('city') or translated_location.lower().startswith(
'intersection') \
or translated_location.lower().startswith('junction'):
translated_location = translated_text_word_split[translated_text_word_split.index(loc_entities[0]) - 1] \
+ ' ' + translated_location
else:
translated_location = ''
# Processing the location
translated_location = translated_location.strip()
if translated_location != '' and ',' == translated_location[-1]:
translated_location = translated_location[:-1]
translated_location = html.unescape(translated_location)
if translated_location == '':
translated_location = 'failed to extract location'
logging.info('location found: ' + translated_location)
return translated_location
def remove_text_inside_brackets(text, brackets="()[]{}"):
count = [0] * (len(brackets) // 2) # count open/close brackets
saved_chars = []
for character in text:
for i, b in enumerate(brackets):
if character == b: # found bracket
kind, is_close = divmod(i, 2)
count[kind] += (-1) ** is_close # `+1`: open, `-1`: close
if count[kind] < 0: # unbalanced bracket
count[kind] = 0 # keep it
else: # found bracket to remove
break
else: # character is not a [balanced] bracket
if not any(count): # outside brackets
saved_chars.append(character)
return ''.join(saved_chars)
def preprocess_text(text, get_first=False):
table_no_dot = str.maketrans(string.punctuation.replace('.', ''),
' ' * len(string.punctuation.replace('.', ''))) # remove punctuation, without '.'
table = str.maketrans(string.punctuation, ' ' * len(string.punctuation)) # remove punctuation
if type(text) != str:
text = str(text)
if any(key in text for key in '()[]{}'):
text = remove_text_inside_brackets(text)
if get_first:
return (' '.join(text.translate(table_no_dot).split())).strip().split('.')[
0] # remove multiple whitespaces and return first sentence
else:
return (' '.join(text.translate(table).split())).strip() # remove multiple whitespaces
def preprocess_intersection(intersections):
intersections = intersections.replace('יישוב', '')
intersections = intersections.replace('ישוב', '')
intersections = intersections.replace('מושבה', '')
intersections = intersections.replace('מושב', '')
intersections = intersections.replace('צומת ל', '')
intersections = intersections.replace('צומת', '')
intersections = intersections.replace('מחלף', '')
intersections = intersections.replace('כניסה ל', '')
intersections = intersections.strip()
return intersections
def process_streets_table(addresses_df):
streets = pd.DataFrame(addresses_df.drop(
['road1', 'road2', 'non_urban_intersection_hebrew'], axis=1))
streets.yishuv_name = streets.yishuv_name.astype(str)
streets.street1_hebrew = streets.street1_hebrew.astype(str)
streets.street2_hebrew = streets.street2_hebrew.astype(str)
streets['city'] = streets.yishuv_name
streets['street1'] = streets.street1_hebrew
streets['street2'] = streets.street2_hebrew
streets.city = streets.city.apply(preprocess_text)
streets.street1 = streets.street1.apply(preprocess_text)
streets.street2 = streets.street2.apply(preprocess_text)
streets = streets[(streets.city != streets.street1) & (streets.city != streets.street2)
& (streets.city != 'NaN')]
streets = streets.replace('NaN', np.nan)
streets = streets.dropna(how='all')
streets = streets.drop_duplicates()
streets = streets.replace(np.nan, 'NaN')
return streets
def process_roads_table(addresses_df):
roads = | pd.DataFrame(addresses_df[['road1', 'road2', 'non_urban_intersection_hebrew']]) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN, KMeans
from sklearn.covariance import EmpiricalCovariance, MinCovDet
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.mixture import GaussianMixture
from dsbox.ml.outliers import CovarianceOutliers, GaussianProcessOutliers
from dsbox.ml.outliers import GMMOutliers, ClusteringOutliers
from dsbox.ml.outliers import KMeansOneClusterOutliers, KMeansIterativeOneClusterOutliers
from dsbox.ml.outliers import MADOutliers, FFTOutliers
class CovarianceOutliersTest(unittest.TestCase):
def test_covarianceoutliers_constructor_should_accept_different_scikit_covariance_estimators(self):
# given
robust_cov = MinCovDet()
emp_cov = EmpiricalCovariance()
# when
cov_outliers_1 = CovarianceOutliers(emp_cov)
cov_outliers_2 = CovarianceOutliers(robust_cov)
# then
self.assertTrue(isinstance(cov_outliers_1, CovarianceOutliers))
self.assertTrue(isinstance(cov_outliers_2, CovarianceOutliers))
def test_covarianceoutliers_predict_proba_gives_biggest_proba_to_biggest_outlier(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
cov_outliers = CovarianceOutliers()
cov_outliers.fit(df)
probas = cov_outliers.predict_proba(df)
outlier_index = np.argmax(probas)
# then
outlier_index_true = 6
self.assertEqual(outlier_index_true, outlier_index)
def test_covarianceoutliers_predict_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
cov_outliers = CovarianceOutliers()
cov_outliers.fit(df)
outliers = cov_outliers.predict(df)
# then
outliers_true = [False, False, False, False, False, False, True, True, False, False, False,
False, False, False]
self.assertListEqual(outliers_true, outliers.tolist())
class GaussianProcessOutliersTest(unittest.TestCase):
def test_gpoutliers_predict_should_return_correct_values(self):
# given
data = np.random.random_sample(1000) * 2 - 1
data[300] = 5
data[700] = -6
df = pd.DataFrame(data)
# when
gp_outliers = GaussianProcessOutliers(GaussianProcessRegressor(alpha=0.9, normalize_y=True), n_samples=100)
gp_outliers.fit(df)
outliers = gp_outliers.predict(df, confidence=0.999)
# then
outlier_positions_true = [300, 700]
self.assertTrue(outliers[outlier_positions_true[0]])
self.assertTrue(outliers[outlier_positions_true[1]])
class KMeansOneClusterOutliersTest(unittest.TestCase):
def test_kmeansonecluster_outliers_predict_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
kmoc_outliers = KMeansOneClusterOutliers()
kmoc_outliers.fit(df)
outliers = kmoc_outliers.predict(df)
# then
outliers_true = [False, False, False, False, False, False, True, True, False, False, False,
False, False, False]
self.assertListEqual(outliers_true, outliers.tolist())
class KMeansIterativeOneClusterOutliersTest(unittest.TestCase):
def test_kmeans_iterative_onecluster_outliers_predict_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
kmoc_outliers = KMeansIterativeOneClusterOutliers()
outliers = kmoc_outliers.fit_predict(df)
# then
outliers_true = [2., 1., 1., 2., 2., -1., 0., 0., 1., 1., -1., 1., 1., 2.]
self.assertListEqual(outliers_true, outliers.tolist())
class GMMOutliersTest(unittest.TestCase):
def test_gmm_outliers_predict_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
gmm_outliers = GMMOutliers()
gmm_outliers.fit(df)
outliers = gmm_outliers.predict(df)
# then
outliers_true = [False, False, False, False, False, False, True, True, False, False, False,
False, False, False]
self.assertListEqual(outliers_true, outliers.tolist())
class ClusteringOutliersTest(unittest.TestCase):
def test_clustering_outliers_predict_proba_with_unclustered_strategy_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
clustering_outliers = ClusteringOutliers(cluster_estimator=DBSCAN(min_samples=2), strategy='unclustered')
clustering_outliers.fit(df)
outliers = clustering_outliers.predict_proba(df)
# then
outliers_true = [0., 0., 0., 0., 1., 0., 1., 1., 1., 1., 0., 0.,
1., 0.]
self.assertListEqual(outliers_true, outliers.tolist())
def test_clustering_outliers_predict_with_unclustered_strategy_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
clustering_outliers = ClusteringOutliers(cluster_estimator=DBSCAN(min_samples=2), strategy='unclustered')
clustering_outliers.fit(df)
outliers = clustering_outliers.predict(df)
# then
outliers_true = [False, False, False, False, True, False, True, True, True, True, False, False,
True, False]
self.assertListEqual(outliers_true, outliers.tolist())
def test_clustering_outliers_predict_proba_with_frontier_strategy_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
clustering_outliers = ClusteringOutliers(cluster_estimator=KMeans(n_clusters=2), strategy='frontier')
clustering_outliers.fit(df)
outliers_probas = clustering_outliers.predict_proba(df)
# then
outliers_true = [0.01861993, 0.02190581, 0.02190581, 0.01861993, 0.0109529, 0.01533406
, 0.00196078, 0.00196078, 0.08324206, 0.03066813, 0.01533406, 0.02190581
, 0.02738226, 0.01861993]
self.assertListEqual(outliers_true, np.round(outliers_probas, 8).tolist())
def test_clustering_outliers_predict_with_frontier_strategy_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
clustering_outliers = ClusteringOutliers(cluster_estimator=KMeans(n_clusters=2), strategy='frontier')
clustering_outliers.fit(df)
outliers = clustering_outliers.predict(df)
# then
outliers_true = [False, False, False, False, False, False, False, False, True, True, False, False,
True, False]
self.assertListEqual(outliers_true, outliers.tolist())
def test_clustering_outliers_predict_proba_with_size_strategy_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
clustering_outliers = ClusteringOutliers(cluster_estimator=KMeans(n_clusters=2), strategy='size')
clustering_outliers.fit(df)
outliers_probas = clustering_outliers.predict_proba(df)
# then
outliers_true = [0.14285714, 0.14285714, 0.14285714, 0.14285714, 0.14285714, 0.14285714
, 0.85714286, 0.85714286, 0.14285714, 0.14285714, 0.14285714, 0.14285714
, 0.14285714, 0.14285714]
self.assertListEqual(outliers_true, np.round(outliers_probas, 8).tolist())
def test_clustering_outliers_predict_with_size_strategy_and_kmeans_should_return_correct_values(self):
# given
df = | pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1]) | pandas.DataFrame |
#Author: <NAME>
#email : <EMAIL>
#
#------------------------------------------------------------------------------------------------------
#
#The content of this project is licensed under the MIT license. 2021 All rights reserved.
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software
#and associated documentation files (the "Software"), to deal with the Software without restriction,
#including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
#and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#Redistributions of source code must retain the above License notice, this list of conditions and
#the following disclaimers.
#
#Redistributions in binary form must reproduce the above License notice, this list of conditions and
#the following disclaimers in the documentation and/or other materials provided with the distribution.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
#LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE CONTRIBUTORS OR LICENSE HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
#OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
#
#------------------------------------------------------------------------------------------------------
#
#These code are writen for a research project, published in OIR. If you use any of them, please cite:
#<NAME>, <NAME>, <NAME>, <NAME>,
#"The Unknown Knowns: A Graph-Based Approach for Temporal COVID-19 Literature Mining",
#Online Information Review (OIR), COVID-19 Special Issue, 2021.
#
#------------------------------------------------------------------------------------------------------
# This code fills the empty cells in the original metadata file using the original texts
# Applying an extensive set of operations - as metadata has plenty of empty cells
# Returns a new metadata dataframe and another dataframe that is the subset of the new metadata with bad timestamps
import glob
import pandas as pd
import json
import numpy as np
from fuzzywuzzy import process, fuzz
def collectOnlyFilenames(files_list):
list_filenames = []
for curr_file in files_list:
if not isNaN(curr_file):
tmp1 = curr_file.split('/')[-1]
tmp2 = tmp1.split('.')[:-1]
tmp2 = '.'.join(tmp2)
list_filenames.append(tmp2)
else:
list_filenames.append(curr_file)
return list_filenames
def stripSingleFilename(file_):
tmp1 = file_.split('/')[-1]
tmp2 = tmp1.split('.')[:-1]
tmp2 = '.'.join(tmp2)
return tmp2
def getUniqueFilename(curr_file):
tmp1 = curr_file.split('/')[-2:]
return '_'.join(tmp1)
def isNaN(num):
return num != num
def getAuthors(obj):
num_authors = len(obj['metadata']['authors'])
auth_list = []
for i_auth in range(num_authors):
curr_dict = obj['metadata']['authors'][i_auth]
auth_list.append( curr_dict['last'] + ', ' + curr_dict['first'] )
return '; '.join(auth_list)
def getFullText(obj):
num_texts = len(obj['body_text'])
curr_body = []
for i_text in range(num_texts):
curr_text = obj['body_text'][i_text]['text']
curr_body.append(curr_text)
return ' '.join(curr_body)
def getPaperInfo(obj):
title_ = obj['metadata']['title']
if 'abstract' in obj.keys() and len(obj['abstract']) > 0:
abstract_ = obj['abstract'][0]['text']
else:
abstract_ = ''
return title_, abstract_, getAuthors(obj)
def writeText2File(full_text, filename, savedir):
fo = open(savedir + 'processed_texts/' + filename + '.txt', 'w')
fo.write(full_text)
fo.close()
# handle having multiple sha's in a single line
def fixShaList(sha_list):
sha_dict = {}
i_row = 0
for i_sha in sha_list:
if not isNaN(i_sha):
if ';' in i_sha:
curr_list = i_sha.split(';')
for curr_item in curr_list:
sha_dict[curr_item.replace(' ', '')] = i_row
else:
sha_dict[i_sha] = i_row
i_row += 1
return sha_dict
def fromIndicesReturnBest(row_indices, time_list, doi_list):
if len(row_indices) > 1:
timestamps = []
dois = []
for i_row in row_indices:
timestamps.append(time_list[i_row])
dois.append(doi_list[i_row])
# return the index for the one with largest timestamp data
candidate_time = max(enumerate(timestamps), key=lambda x: len(x[1]))[1]
candidate_doi = max(enumerate(dois), key=lambda x: len(x[1]))[1]
return candidate_time, candidate_doi
else:
i_row = row_indices[0]
#print(i_row)
return time_list[i_row], doi_list[i_row] # TypeError: list indices must be integers or slices, not numpy.float64
def fromAuthorIndicesReturnBest(row_indices, time_list, doi_list, curr_title, curr_abstract, meta_data):
if len(row_indices) > 1:
timestamps = []
dois = []
if not isNaN(curr_title) and len(curr_title)>3:
short_title_list = list(meta_data.title[row_indices]) # list(title_list[row_indices])
highest_match = process.extractOne(curr_title, short_title_list, scorer=fuzz.token_set_ratio)
if highest_match[1] > 99:
i_row = row_indices[short_title_list.index(highest_match[0])]
return time_list[i_row], doi_list[i_row]
if len(curr_abstract) > 3:
short_abstract_list = list(meta_data.abstract[row_indices]) # list(abstract_list[row_indices])
highest_match = process.extractOne(curr_abstract, short_abstract_list, scorer=fuzz.token_set_ratio)
if highest_match[1] > 99:
i_row = row_indices[short_abstract_list.index(highest_match[0])]
return time_list[i_row], doi_list[i_row]
for i_row in row_indices:
timestamps.append(time_list[i_row])
dois.append(doi_list[i_row])
#print('The worst way of doing this')
# return the index for the one with largest timestamp data
candidate_time = max(enumerate(timestamps), key=lambda x: len(x[1]))[1]
candidate_doi = max(enumerate(dois), key=lambda x: len(x[1]))[1]
return candidate_time, candidate_doi
else: # if the author list matches only once, then assume it's the correct paper's metadata
i_row = row_indices[0]
return time_list[i_row], doi_list[i_row]
def getMetadata(meta_data, sha_dict, curr_title, curr_abstract, curr_authors, curr_sha):
#abstract_list = list(meta_data.abstract)
#title_list = list(meta_data.title)
time_list = list(meta_data.publish_time)
doi_list = list(meta_data.doi)
pdf_json_list = pd.DataFrame({'col': collectOnlyFilenames(list(meta_data.pdf_json_files))})
pmc_json_list = pd.DataFrame({'col': collectOnlyFilenames(list(meta_data.pmc_json_files))})
#author_list = list(meta_data.authors)
# order of matching the read files to the information in metadata.csv file
#1) see if the sha matches - if yes, collect the information and return it, else:
if curr_sha in sha_dict.keys():
i_row = sha_dict[curr_sha]
curr_timestamp = time_list[i_row]
curr_doi = doi_list[i_row]
return curr_timestamp, curr_doi
#2) see if the directory included name matches - if yes collect the information and return it, else:
elif curr_sha in list(pdf_json_list.col) or curr_sha in list(pmc_json_list.col):
if curr_sha in list(pdf_json_list.col): #Index_label = df[df['Updated Price']>1000].index.tolist()
row_indices1 = pdf_json_list[pdf_json_list['col'] == curr_sha].index.tolist() #np.where(pdf_json_list == curr_sha)[0] # get rid of numpy here - it has problems
else:
row_indices1 = []
if curr_sha in list(pmc_json_list.col):
row_indices2 = pmc_json_list[pmc_json_list['col'] == curr_sha].index.tolist() #np.where(pmc_json_list == curr_sha)[0]
else:
row_indices2 = []
row_indices = np.concatenate((row_indices1, row_indices2), axis=0)
row_indices = row_indices.astype('int32')
curr_timestamp, curr_doi = fromIndicesReturnBest(row_indices, time_list, doi_list)
return curr_timestamp, curr_doi
#3) see if the title matches - if yes, collect the information and return it, else:
elif len(curr_title) > 0 and curr_title in list(meta_data.title):
row_indices = meta_data[meta_data['title'] == curr_title].index.tolist() # np.where(title_list == curr_title)[0] # there could be more than one
curr_timestamp, curr_doi = fromIndicesReturnBest(row_indices, time_list, doi_list)
return curr_timestamp, curr_doi
#4) see if the abstract matches - if yes, collect the info and return, else:
elif len(curr_abstract) > 3 and curr_abstract in list(meta_data.abstract):
row_indices = meta_data[meta_data['abstract'] == curr_abstract].index.tolist() #np.where(np.array(abstract_list) == curr_abstract)[0] # there could be more than one
curr_timestamp, curr_doi = fromIndicesReturnBest(row_indices, time_list, doi_list)
return curr_timestamp, curr_doi
#5) see if the author list matches - among the returned papers find current title contains the given title or the abstract
elif curr_authors in list(meta_data.authors): # this is the solution for 70,000 remaining unmatched metadata - sensitive work required
row_indices = meta_data[meta_data['authors'] == curr_authors].index.tolist() # np.where(author_list == curr_authors)[0] # there could be more than one
curr_timestamp, curr_doi = fromAuthorIndicesReturnBest(row_indices, time_list, doi_list, curr_title, curr_abstract, meta_data)
return curr_timestamp, curr_doi
else:
# No other choice but to let it go
return '', ''
def collectBadTimestamps(metadata):
list_shas = []
list_foldernames = []
list_titles = []
list_timestamps = []
list_dois = []
for i_row in range(len(metadata.sha)):
if len(metadata['publish_time'][i_row]) < 5:
list_shas.append(metadata['sha'][i_row])
list_titles.append(metadata['title'][i_row])
list_foldernames.append(metadata['fullnames'][i_row])
list_timestamps.append(metadata['publish_time'][i_row])
list_dois.append(metadata['doi'][i_row])
df = pd.DataFrame({'sha': list_shas, 'fullnames' : list_foldernames, 'title': list_titles, 'publish_time': list_timestamps, 'doi':list_dois})
return df
def extractNewImprovedMetadata(datadir, savedir, metadatadir):
""" Reads the sha list from the original metadata file which is closest thing to associate metadata rows to json files
Then reads the authors list, title, abstract and the filenames from the json files
Tries to match those in the metadata rows - which is the only source of information for timestamps and doi's
Fills the empty cells in the original metadata file by the matching operations (including fuzzy text matching - json titles sometimes have additional noise)
Returns a brand new metadata file that is improved and filled as much as possible
Also returns a list of bad timestamps for some can be recovered from doi's - if they have non-empty doi's in the original metadata
Contact me (<NAME>) if there are problems.
"""
# read the metadata file
meta_data = | pd.read_csv(metadatadir + 'metadata.csv', dtype=str) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import dash
import pandas
from app import app
import cfg
import time
import pickle
import dash_html_components as html
from plotly import tools
import plotly.graph_objs as go
from iclip_tab import createGeneModelPlot
import plotly.utils as pu
@app.callback(
dash.dependencies.Output('rnaDescDiv', component_property='children'),
[dash.dependencies.Input('geneDrop', 'value')],
)
def rnaDesc(name):
if cfg.descAvail:
try:
return [
html.P(
cfg.geneDescriptions.loc[
cfg.geneDescriptions['ensembl_gene_id'] == name,
['description']
].iloc[0]
)
]
except IndexError:
return ['No description available']
except KeyError:
return ['No description available']
else:
return ['No description available']
@app.callback(
dash.dependencies.Output('spliceGraph', 'figure'),
[dash.dependencies.Input('spliceMem', 'data'),
dash.dependencies.Input('rnaParamList', 'values'),
dash.dependencies.Input('rnaRadio', 'value'),
dash.dependencies.Input('covColorFinal', 'data'),
dash.dependencies.Input('eventColorFinal', 'data'),
dash.dependencies.Input('legendSpacingDiv', 'data'),
dash.dependencies.Input('coverageScale', 'value'),
dash.dependencies.Input('sequenceRadio', 'value'),
dash.dependencies.Input('eventScale', 'value'),
dash.dependencies.Input('bsGraphMem', 'data')]
)
def showRNA(figData, dataSets, displayType, covColor, eventColor, legendSpacing, coverageScale, seqDisp, eventScale, bsMem):
"""Update callback that selects traces to be displayed based on settings.
Positional arguments:
figData -- Trace data from the data callback.
datasets -- List of datasets to display.
displayType -- Type of splice event display.
covColor -- Colors for coverage traces.
eventColorl -- Colots for splice events.
legendSpacing -- Specifies margin between colorbar and other legend items.
coverageScale -- Scaling factor for coverage plots.
eventScale -- Scaling factor for event plots.
"""
legendColumnSpacing = legendSpacing
figData = figData
traces = figData['rnaTraces']
geneModels = figData['geneModels']
coverageColors = covColor
try:
seqTrace = bsMem[seqDisp]
if seqDisp == 'heatSeq':
for i in seqTrace:
i['showscale'] = False
except:
seqTrace = []
#print(seqTrace)
eventColors = eventColor
eventIndices = [] # Save indices of all elements that contain event traces
rnaDataSets = sorted(list(cfg.coverageData.keys()))
displayed_rnaDataSet = []
maxYDict = figData['maxYList']
axisTitles = []
yVals = []
for rm in sorted(dataSets):
for set in rnaDataSets:
if rm == set.split('_')[0]:
displayed_rnaDataSet.append(set)
finTraces = []
eventIndices = []
for index, t in enumerate(traces):
try:
if len(t[displayType]) > 1:
try:
if t[displayType][0]['meta'] in displayed_rnaDataSet:
if displayType == 'two':
for i in t[displayType]:
newColor = eventColors[i['name']]
i['marker'] = {'color' : newColor}
finTraces.append(t[displayType])
eventIndices.append(index//2)
axisTitles.append('')
except KeyError:
if t[displayType]['meta'] in displayed_rnaDataSet:
if displayType == 'two':
newColor = eventColors[t['name']]
t['marker'] = {'color' : newColor}
finTraces.append(t[displayType])
eventIndices.append(index//2)
axisTitles.append('')
else:
if t[displayType][0] in displayed_rnaDataSet:
finTraces.append([])
axisTitles.append('')
eventIndices.append(index//2)
except KeyError:
if t['meta'] in displayed_rnaDataSet:
newColor = coverageColors[t['meta'].split('_')[0]]
yVals.append(maxYDict[t['meta']])
axisTitles.append('')
t['fillcolor'] = newColor
finTraces.append(t)
numIsoforms = len(geneModels) # Number of isoforms in the gene model
numRows = len(finTraces)+numIsoforms+1#+1 for sequence trace
# Setup row heights based on available data
plotSpace = 0.9 # Space taken up by data tracks
spacingSpace = 1.0 - plotSpace # Space left for spacer tracks
rowHeight = plotSpace / numRows
if numRows > 1:
vSpace = spacingSpace / (numRows - 1)
else:
vSpace = spacingSpace
rowHeights = []
rowHeights.append(rowHeight/2)
eventHeights = []
eventMaxHeights = figData['maxHeights']
for index, i in enumerate(eventMaxHeights):
if index in eventIndices:
if i == 0:
eventHeights.append(0)
if i > 0 and i <= 5:
eventHeights.append(1)
if i >= 6 and i < 10:
eventHeights.append(2)
if i >= 10:
eventHeights.append(i % 5 +1)
if cfg.spliceEventAvail:
for i in range(1,numRows):
if i > len(finTraces):
rowHeights.append(0.5 * rowHeight) # Gene model row
elif (i % 2 == 0):
try:
rowHeights.append(eventHeights[(i//2)-1] * rowHeight * eventScale) # Splice event row
except IndexError:
rowHeights.append(0)
else:
rowHeights.append(3 * rowHeight * coverageScale) # Coverage row
else:
for i in range(1,numRows):
if i > len(finTraces): rowHeights.append(0.5 * rowHeight) # Gene model row
else:
rowHeights.append(3 * rowHeight * coverageScale) # Coverage row
fig = tools.make_subplots(print_grid=False, rows=numRows, cols=1,
shared_xaxes=True, row_width=rowHeights[::-1], vertical_spacing = vSpace)
# Layouting of the figure
eventIndicesDraw = [] # Save indices of all elements that contain event traces
for i in seqTrace:
fig.append_trace(i, 1, 1)
for index, t in enumerate(finTraces):
try:
fig.append_trace(t, index + 2, 1)
except ValueError:
eventIndicesDraw.append(index)
for i in eventIndicesDraw: # Add event traces after all coverage traces have been added for legend item positioning
for x in finTraces[i]:
fig.append_trace(x, i + 2, 1)
counter = len(finTraces)+1
for model in geneModels:
for part in model:
fig.append_trace(part, counter+1, 1)
counter += 1
fig['layout']['xaxis'].update(nticks=6)
fig['layout']['xaxis'].update(tickmode='array')
fig['layout']['xaxis'].update(showgrid=True)
fig['layout']['xaxis'].update(ticks='outside')
fig['layout']['xaxis'].update(ticksuffix='b')
fig['layout']['xaxis'].update(ticksuffix='b')
fig['layout'].update(hovermode='closest')
fig['layout']['yaxis'].update(fixedrange=True)
fig['layout'].update(barmode='relative')
# Reverse x-axis if gene is on - strand to always show models in 3'->5'
if figData['strand'] == '-':
fig['layout']['xaxis'].update(autorange='reversed')
for i in range(1, numRows+1): # prevent zoom on y axis
fig['layout']['yaxis' + str(i)].update(fixedrange=True)
try:
maxYVal = max(yVals)
except ValueError:
maxYVal = 0
blockHeight = 0.4
for i in range(1, numRows):
if cfg.spliceEventAvail:
if i % 2 != 0 and i <= len(finTraces): # Coverage row
fig['layout']['yaxis' + str(i+1)].update(range=[0, maxYVal],title={'text': axisTitles[i-1]})
fig['layout']['yaxis' + str(i+1)].update(showticklabels=True, showgrid=True, zeroline=True)
else: # Event or gene model row
if i > len(finTraces): # Gene model row
fig['layout']['yaxis' + str(i+1)].update(showticklabels=False, showgrid=False, zeroline=False)
fig['layout']['yaxis' + str(i+1)].update(range=[-blockHeight, blockHeight])
else: # Event row
fig['layout']['yaxis' + str(i+1)].update(showticklabels=False, showgrid=False, zeroline=False, title={'text': axisTitles[i-1]})
else:
if i <= len(finTraces): # Coverage row
fig['layout']['yaxis' + str(i+1)].update(range=[0, maxYVal], title={'text': axisTitles[i-1]})
fig['layout']['yaxis' + str(i+1)].update(showticklabels=True, showgrid=True, zeroline=True)
else: # Gene model row
fig['layout']['yaxis' + str(i+1)].update(showticklabels=False, showgrid=False, zeroline=False)
fig['layout']['yaxis' + str(i+1)].update(range=[-blockHeight, blockHeight], )
# Setup plot height, add 85 to account for margins
fig['layout'].update(margin=go.layout.Margin(l=60, r=40, t=25, b=60),)
fig['layout']['yaxis'].update(visible = False, showticklabels=False, showgrid=False, zeroline=False)
rowScales = [x/rowHeight for x in rowHeights]
size = 0
for i in rowScales:
size += 50*i
fig['layout']['height'] = (size + 85)
# set spacing for the second legend column
fig['layout']['legend'].update(x = legendColumnSpacing)
#print('Showcallback: ' + str(end-start))
return fig
@app.callback(
dash.dependencies.Output('spliceMem', 'data'),
[dash.dependencies.Input('geneDrop', 'value')],
[dash.dependencies.State('rnaRadio', 'value'),
dash.dependencies.State('rnaParamList', 'values'),
dash.dependencies.State('covColorFinal', 'data'),
dash.dependencies.State('eventColorFinal', 'data'),
dash.dependencies.State('legendSpacingDiv', 'data'),
dash.dependencies.State('coverageScale', 'value'),
dash.dependencies.State('eventScale', 'value')]
)
def rnaCallback(geneName, displayMode,rnaParamList, colorsFinal, eventColorsFinal, legendSpacing,
coverageScale, eventScale):
"""Data callback that selects relevant data and creates all possible traces.
Positional arguments:
geneName -- Name of the selected gene in order to filter the data.
displaymode --determines how splice events will be visualized.
rnaParamList -- Selected RNA data sets to plot.
colorsFinal -- Last confirmed color.
eventColorsFinal -- Last confirmed colors for splice events.
legendSpacing -- Specifies margin between colorbar and other legend items.
coverageScale -- Scaling factor for coverage plots.
eventScale -- Scaling factor for event plots.
"""
colors = colorsFinal
figData = {}
# Select appropriate data from gene annotations
currentGene = pandas.DataFrame()
for index, elem in enumerate(cfg.geneAnnotations):
currentGene = elem[elem['geneID'].str.contains(geneName)]
if not currentGene.empty:
break
# Get axis minimum and maximum over all isoforms. Also get current chromosome
xAxisMax = currentGene['chromEnd'].max()
xAxisMin = currentGene['chromStart'].min()
chrom = currentGene['chrom'].iloc[0]
strand = currentGene['strand'].iloc[0]
figData.update({'strand': strand})
color_dict = colors # Color per mutant
figData.update({'covColors' : color_dict})
# Filter out needed datasets
rnaDataSets = sorted(list(cfg.coverageData.keys()))
displayed_rnaDataSet = rnaDataSets
# for rm in sorted(rnaParamList):
# for set in rnaDataSets:
# if rm == set.split('_')[0]:
# displayed_rnaDataSet.append(set)
# Dicts for lists of axis values
xVals = {}
yVals = {}
maxYVal = 0 # Used to scale y-axes later
maxYVals = {}
eventDict = {} # stores dataframes with relevant splice event data
iterTime = 0
evSel = 0
covSel = 0
for ds in sorted(displayed_rnaDataSet): # Select relevant coverage files from Index
covStart = time.time()
spliceSlice = coverageDataSelection(ds, xAxisMin, xAxisMax, chrom)
covEnd = time.time()
covSel += covEnd-covStart
# Pre-init y-value list
yVal = [0] * (len(range(xAxisMin, xAxisMax)))
organism = ds.split("_")[0] # Prefix of the curret data frame, first filter
spliceEvents = pandas.DataFrame() # will hold splice event data for the current data set
evStart = time.time()
if any(organism in s for s in cfg.spliceEventNames[1]): # Check if there are splice events for the current prefix
for d in sorted(cfg.spliceEventDFs.keys()):
if ds in d: # Check for remaining filename, to match the correct files
# Criteria to filter relevant lines from current dataframe
bcrit11 = cfg.spliceEventDFs[d]['chrom'] == chrom
bcrit21 = cfg.spliceEventDFs[d]['chromStart'] >= xAxisMin
bcrit22 = cfg.spliceEventDFs[d]['chromStart'] <= xAxisMax
bcrit31 = cfg.spliceEventDFs[d]['chromEnd'] >= xAxisMin
bcrit32 = cfg.spliceEventDFs[d]['chromEnd'] <= xAxisMax
spliceEvents = cfg.spliceEventDFs[d].loc[bcrit11 & ((bcrit21 & bcrit22) | (bcrit31 & bcrit32))]
# Use itertuples to iterate over rows, since itertuples is supposed to be faster
evEnd = time.time()
evSel += evEnd-evStart
iterStart = time.time()
for row in spliceSlice.itertuples():
# Increment all values covered by the current row, will overshoot when row crosses border of gene, thus try except
for j in range(row.chromStart, row.chromEnd):
if (j - xAxisMin) >=0: # Ensure we don't fall into negative list indices
try:
yVal[j - xAxisMin] += row.count
except IndexError:
pass
iterEnd = time.time()
iterTime += iterEnd-iterStart
# Store reference to value list in dict
yVals[ds] = yVal
# Safe event dataframe to be used in the next function
eventDict[ds] = spliceEvents
# Create x-axis values
xVal = list(range(xAxisMin, xAxisMax))
xVals[ds] = xVal
# Find maximum y-axis value for axis scaling
maxYVals.update({ds: max(yVal)})
if max(yVal) > maxYVal: maxYVal = max(yVal)
figData.update({'maxY' : maxYVal})
figData.update({'maxYList' : maxYVals})
# Create RNA-seq traces from data
rnaSeqPlotData = createRNAPlots(xVals, yVals, eventDict, displayed_rnaDataSet,
color_dict, displayMode, eventColorsFinal)
traces = rnaSeqPlotData[0]
eventMaxHeights = rnaSeqPlotData[1]
axisTitles = rnaSeqPlotData[2]
figData.update({'rnaTraces' : traces})
figData.update({'maxHeights' : eventMaxHeights})
figData.update({'axisTitles' : axisTitles})
overlappingGenes = []
for i in cfg.geneAnnotations: # Select data for gene models from all annotation files
bcrit11 = i['chrom'] == chrom
bcrit21 = i['chromStart'] >= xAxisMin
bcrit22 = i['chromStart'] <= xAxisMax
bcrit31 = i['chromEnd'] >= xAxisMin
bcrit32 = i['chromEnd'] <= xAxisMax
bcrit41 = i['chromStart'] <= xAxisMin
bcrit42 = i['chromEnd'] >= xAxisMax
preDF = i.loc[bcrit11 & ((bcrit21 & bcrit22) | (bcrit31 & bcrit32) | (bcrit41 & bcrit42))]
result = preDF[~preDF['geneID'].str.contains(geneName)]
overlappingGenes.append(result)
overlaps = pandas.concat(overlappingGenes)
isoformList = | pandas.concat([currentGene, overlaps]) | pandas.concat |
### tse electoral crime html parser
# developed by:
# <NAME>
# <EMAIL>
# import standard libraries
import os, sys, re, csv
import pandas as pd
# import third-party libraries
import tse
# define clear function
clear = lambda: os.system('clear')
# load list of files to parse, and create identifier for all files
files = os.listdir('html')
files = sorted(files)[1:]
files = ['html/' + file for file in files]
# define arguments for pandas methods
kwarg = {'ignore_index': True}
kwargs = {'index': False, 'sep': ',', 'quoting': csv.QUOTE_NONNUMERIC}
### parse summary
# define function to parse case summary
def case_summary(file):
case = tse.parser(file).parse_summary()
case.update({'candidateID': file[5:-5]})
return case
# map over list of cases and extract summary information
summary = list(map(case_summary, files))
# build dataset
summaries = pd.concat([ | pd.DataFrame.from_dict(s) | pandas.DataFrame.from_dict |
import pandas as pd
from models.fout import Fout
from models.leerling import Leerling
from models.options import Options
from models.somtoday import Somtoday
from models.tbgi import Tbgi
from models.telling_status import TellingStatus
class Vergelijk:
VERGELIJKING = {'tbgi': {'van': 'TBGI', 'met': 'Somtoday'},
'somtoday': {'van': 'Somtoday', 'met': 'TBGI'}}
def __init__(self, somtoday: Somtoday, tbgi: Tbgi, options: Options):
self.options: Options = options
self.soort: str = options.soort
self.somtoday: pd.DataFrame = somtoday.somtoday
self.tbgi: pd.DataFrame = tbgi.tbgi
self.tbgi_fouten: [Fout] = []
self.somtoday_fouten: [Fout] = []
self.fouten: {str: [Fout]} = {'tbgi': [], 'somtoday': []}
self.telling: {str: {str: int}} = {'tbgi': {}, 'somtoday': {}}
def check_tbgi(self):
self.options.message("#### TBGI vergelijken met Somtoday")
fouten = self.fouten['tbgi']
for _, tbgi_row in self.tbgi.iterrows():
self.telling['tbgi'][tbgi_row['categorie']] = self.telling['tbgi'].get(tbgi_row['categorie'], 0) + 1
# zoek de leerling in somtoday op BSN of OWN
sll: pd.DataFrame
sll = self.somtoday[self.somtoday.BSN == tbgi_row['BSN']]
if sll.empty:
sll = self.somtoday[self.somtoday.OWN == tbgi_row['OWN']]
if sll.empty:
fouten.append(Fout(fout=Fout.ONBEKENDE_LEERLING,
bron=Fout.TBGI,
tbgi=tbgi_row))
continue
elif 1 < len(sll):
fout = Fout(fout=Fout.MEERVOUDIGE_LEERLING,
bron=Fout.TBGI,
tbgi=tbgi_row)
for _, ll_row in sll.iterrows():
fout.add_leerling(Leerling(leerling=ll_row))
fouten.append(fout)
continue
# we weten zeker dat er maar 1 rij in het dataframe zit
ll: Leerling = Leerling()
for _, ll_row in sll.iterrows():
ll.init_series(leerling=ll_row)
if not TellingStatus.is_gelijk(ll.categorie, tbgi_row['categorie']):
fouten.append(Fout(fout=Fout.LEERLING_NIET_GELIJK,
bron=Fout.TBGI,
leerling=ll,
tbgi=tbgi_row))
def check_somtoday(self):
self.options.message("#### Somtoday vergelijken met TBGI")
fouten = self.fouten['somtoday']
for _, somtoday_row in self.somtoday.iterrows():
sll = Leerling(somtoday_row)
self.telling['somtoday'][sll.categorie] = self.telling['somtoday'].get(sll.categorie, 0) + 1
tll: pd.DataFrame or None = None
if not | pd.isna(sll.BSN) | pandas.isna |
"""
Download, transform and simulate various datasets.
"""
# Author: <NAME> <<EMAIL>>
# License: MIT
from os.path import join
from urllib.parse import urljoin
from string import ascii_lowercase
from sqlite3 import connect
from rich.progress import track
import numpy as np
import pandas as pd
from .base import Datasets, FETCH_URLS
class ContinuousCategoricalDatasets(Datasets):
"""Class to download, transform and save datasets with both continuous
and categorical features."""
@staticmethod
def _modify_columns(data, categorical_features):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1), categorical_features
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data, categorical_features = self._modify_columns(*fetch_data())
self.content_.append((name, data, categorical_features))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
def fetch_adult(self):
"""Download and transform the Adult Data Set.
https://archive.ics.uci.edu/ml/datasets/Adult
"""
data = pd.read_csv(FETCH_URLS["adult"], header=None, na_values=" ?").dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3, 5, 6, 7, 8, 9, 13]
return data, categorical_features
def fetch_abalone(self):
"""Download and transform the Abalone Data Set.
https://archive.ics.uci.edu/ml/datasets/Abalone
"""
data = pd.read_csv(FETCH_URLS["abalone"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0]
return data, categorical_features
def fetch_acute(self):
"""Download and transform the Acute Inflammations Data Set.
https://archive.ics.uci.edu/ml/datasets/Acute+Inflammations
"""
data = pd.read_csv(
FETCH_URLS["acute"], header=None, sep="\t", decimal=",", encoding="UTF-16"
)
data["target"] = data[6].str[0] + data[7].str[0]
data.drop(columns=[6, 7], inplace=True)
categorical_features = list(range(1, 6))
return data, categorical_features
def fetch_annealing(self):
"""Download and transform the Annealing Data Set.
https://archive.ics.uci.edu/ml/datasets/Annealing
"""
data = pd.read_csv(FETCH_URLS["annealing"], header=None, na_values="?")
# some features are dropped; they have too many missing values
missing_feats = (data.isnull().sum(0) / data.shape[0]) < 0.1
data = data.iloc[:, missing_feats.values]
data[2].fillna(data[2].mode().squeeze(), inplace=True)
data = data.T.reset_index(drop=True).T
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 1, 5, 9]
return data, categorical_features
def fetch_census(self):
"""Download and transform the Census-Income (KDD) Data Set.
https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29
"""
data = pd.read_csv(FETCH_URLS["census"], header=None)
categorical_features = (
list(range(1, 5))
+ list(range(6, 16))
+ list(range(19, 29))
+ list(range(30, 38))
+ [39]
)
# some features are dropped; they have too many missing values
cols_ids = [1, 6, 9, 13, 14, 20, 21, 29, 31, 37]
categorical_features = np.argwhere(
np.delete(
data.rename(columns={k: f"nom_{k}" for k in categorical_features})
.columns.astype("str")
.str.startswith("nom_"),
cols_ids,
)
).squeeze()
data = data.drop(columns=cols_ids).T.reset_index(drop=True).T
# some rows are dropped; they have rare missing values
data = data.iloc[
data.applymap(lambda x: x != " Not in universe").all(1).values, :
]
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
return data, categorical_features
def fetch_contraceptive(self):
"""Download and transform the Contraceptive Method Choice Data Set.
https://archive.ics.uci.edu/ml/datasets/Contraceptive+Method+Choice
"""
data = pd.read_csv(FETCH_URLS["contraceptive"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [4, 5, 6, 8]
return data, categorical_features
def fetch_covertype(self):
"""Download and transform the Covertype Data Set.
https://archive.ics.uci.edu/ml/datasets/Covertype
"""
data = pd.read_csv(FETCH_URLS["covertype"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
wilderness_area = pd.Series(
np.argmax(data.iloc[:, 10:14].values, axis=1), name=10
)
soil_type = pd.Series(np.argmax(data.iloc[:, 14:54].values, axis=1), name=11)
data = (
data.drop(columns=list(range(10, 54)))
.join(wilderness_area)
.join(soil_type)[list(range(0, 12)) + ["target"]]
)
categorical_features = [10, 11]
return data, categorical_features
def fetch_credit_approval(self):
"""Download and transform the Credit Approval Data Set.
https://archive.ics.uci.edu/ml/datasets/Credit+Approval
"""
data = pd.read_csv(
FETCH_URLS["credit_approval"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 3, 4, 5, 6, 8, 9, 11, 12]
return data, categorical_features
def fetch_dermatology(self):
"""Download and transform the Dermatology Data Set.
https://archive.ics.uci.edu/ml/datasets/Dermatology
"""
data = pd.read_csv(
FETCH_URLS["dermatology"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = list(range(data.shape[1] - 1))
categorical_features.remove(33)
return data, categorical_features
def fetch_echocardiogram(self):
"""Download and transform the Echocardiogram Data Set.
https://archive.ics.uci.edu/ml/datasets/Echocardiogram
"""
data = pd.read_csv(
FETCH_URLS["echocardiogram"],
header=None,
error_bad_lines=False,
warn_bad_lines=False,
na_values="?",
)
data.drop(columns=[10, 11], inplace=True)
data.dropna(inplace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3]
return data, categorical_features
def fetch_flags(self):
"""Download and transform the Flags Data Set.
https://archive.ics.uci.edu/ml/datasets/Flags
"""
data = pd.read_csv(FETCH_URLS["flags"], header=None)
target = data[6].rename("target")
data = data.drop(columns=[0, 6]).T.reset_index(drop=True).T.join(target)
categorical_features = [
0,
1,
4,
8,
9,
10,
11,
12,
13,
14,
15,
21,
22,
23,
24,
25,
26,
27,
]
return data, categorical_features
def fetch_heart_disease(self):
"""Download and transform the Heart Disease Data Set.
https://archive.ics.uci.edu/ml/datasets/Heart+Disease
"""
data = (
pd.concat(
[
pd.read_csv(url, header=None, na_values="?")
for url in FETCH_URLS["heart_disease"]
],
ignore_index=True,
)
.drop(columns=[10, 11, 12])
.dropna()
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8]
return data, categorical_features
def fetch_hepatitis(self):
"""Download and transform the Hepatitis Data Set.
https://archive.ics.uci.edu/ml/datasets/Hepatitis
"""
data = (
pd.read_csv(FETCH_URLS["hepatitis"], header=None, na_values="?")
.drop(columns=[15, 18])
.dropna()
)
target = data[0].rename("target")
data = data.drop(columns=[0]).T.reset_index(drop=True).T.join(target)
categorical_features = list(range(1, 13)) + [16]
return data, categorical_features
def fetch_german_credit(self):
"""Download and transform the German Credit Data Set.
https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
"""
data = pd.read_csv(FETCH_URLS["german_credit"], header=None, sep=" ")
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = (
np.argwhere(data.iloc[0, :-1].apply(lambda x: str(x)[0] == "A").values)
.squeeze()
.tolist()
)
return data, categorical_features
def fetch_heart(self):
"""Download and transform the Heart Data Set.
http://archive.ics.uci.edu/ml/datasets/statlog+(heart)
"""
data = pd.read_csv(FETCH_URLS["heart"], header=None, delim_whitespace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8, 10, 12]
return data, categorical_features
def fetch_thyroid(self):
"""Download and transform the Thyroid Disease Data Set.
Label 0 corresponds to no disease found.
Label 1 corresponds to one or multiple diseases found.
https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease
"""
data = (
pd.read_csv(FETCH_URLS["thyroid"], header=None, na_values="?")
.drop(columns=27)
.dropna()
.T.reset_index(drop=True)
.T
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
data["target"] = (
data["target"].apply(lambda x: x.split("[")[0]) != "-"
).astype(int)
categorical_features = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
18,
20,
22,
24,
26,
27,
]
return data, categorical_features
class MultiClassDatasets(Datasets):
"""Class to download, transform and save multi-class datasets."""
def fetch_first_order_theorem(self):
"""Download and transform the First Order Theorem Data Set.
https://www.openml.org/d/1475
"""
data = pd.read_csv(FETCH_URLS["first_order_theorem"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_gas_drift(self):
"""Download and transform the Gas Drift Data Set.
https://www.openml.org/d/1476
"""
data = pd.read_csv(FETCH_URLS["gas_drift"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_autouniv_au7(self):
"""Download and transform the AutoUniv au7 Data Set
https://www.openml.org/d/1552
"""
data = pd.read_csv(FETCH_URLS["autouniv_au7"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_autouniv_au4(self):
"""Download and transform the AutoUniv au4 Data Set
https://www.openml.org/d/1548
"""
data = pd.read_csv(FETCH_URLS["autouniv_au4"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_mice_protein(self):
"""Download and transform the Mice Protein Data Set
https://www.openml.org/d/40966
"""
data = | pd.read_csv(FETCH_URLS["mice_protein"]) | pandas.read_csv |
# @package mktDataAnalysis
# mktDataAnalysis class in charge of creating the needed indicators using the market data
# @author <NAME>
import sys
sys.path.insert(0, r'')
import json
import os
from tradingBot.src.utils.exceptions import BadKwargs, SymbolNotSupported
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
pd.options.mode.chained_assignment = None
class mktDataAnalysis():
## mktDataAnalysis
# @class mktDataAnalysis
paths = {"mainPath": "tradingBot/dataBase/{}/{}", "subPaths": [
{"id": "indicators", "subPath": "/indicators"},
{"id": "intervals", "subPath": "/intervals"}
]}
def __init__(self, coin=None, pair=None, coinBotObj=None):
self.coinBotObj = coinBotObj
self.dBIntervals = coinBotObj.tmfrmVar
mainPath = self.paths['mainPath'].format(coin, pair)
self.indicPath = mainPath + self.paths['subPaths'][0]["subPath"]
self.DBPath = mainPath + self.paths['subPaths'][1]["subPath"]
self.rptIndic = {}
self.indicIntervals = []
self.getIndInterval = []
self.coin = coin
self.pair = pair
#This section will be deleted in future
if not os.path.exists(self.indicPath):
os.makedirs(self.indicPath)
#TODO ACCESS THE ACTUALIZED DB FROM CB
#for nameDB in self.dBIntervals:
# setattr(self, nameDB, getattr(self.coinBotObj, nameDB))
#TODO IF WE ELIMINATE ALL INDICATORS THEN WHY WE OPEN THEM HERE.
#self.openInd()
#self.delAllIndicator()
def newIndicator(self, indicator=None, period=None, interval=None):
# @fn newIndicator
# @brief Adds a new indicator
# @param indicator The name of the indicator. Send string (e.g., "WMA")
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @exception False If indicator is already created.
if not isinstance(period, int):
return False
interval = self._getIntvl(timeframe=interval)
id = str(period) + indicator + interval
for indicFiles in self.getIndInterval:
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
if line["id"] == id:
self.rptIndic[id] = self.rptIndic[id] + 1 if id in self.rptIndic else 1
return False
"""
if indicator == "RSI" and indicator == \
line['indicator'] and interval == line['interval']:
return False
"""
if not self.checkIntDB(interval=interval):
return False
newInd = {
"indicator": indicator,
"interval": interval,
"period": period,
"id": id,
"start": 0,
"end": 0,
"data": []
}
newInd['data'] = self.actlIndData(indicator=indicator, period=period, interval=interval,\
start=None, end=None, int_unix=None)
if interval not in self.indicIntervals:
self.indicIntervals.append(interval)
if not newInd['data']:
return False
newInd['start'] = newInd['data'][0]['timestamp']
newInd['end'] = newInd['data'][-1]['timestamp']
indic = getattr(self, "indic_" + interval)
indic["indicators"].append(newInd)
setattr(self, "indic_" + interval, indic)
def delIndicator(self, id=None):
# @fn delIndicator
# @brief Delates one indicator
# @param id ID of the indicator. Send string (e.g., "80SMA1h")
for indicFiles in self.getIndInterval:
newInd = {"indicators": []}
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
if line["id"] == id:
if id in self.rptIndic:
self.rptIndic[id] = self.rptIndic[id] - 1
if self.rptIndic[id] >= 0:
newInd["indicators"].append(line)
else:
self.rptIndic[id] = -1
if not line["id"] == id:
newInd["indicators"].append(line)
setattr(self, indicFiles["indicator_int"], newInd)
def delAllIndicator(self):
# @fn delAllIndicator
# @brief Delates all indicators that we currently have.
for indicFiles in self.getIndInterval:
newInd = {"indicators": []}
setattr(self, indicFiles["indicator_int"], newInd)
def actlIndicators(self):
# @fn actlIndicators
# @brief Actualize all indicators from all intervals
for indicFiles in self.getIndInterval:
newInd = {"indicators": []}
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
info = {
"indicator": line['indicator'],
"interval": line['interval'],
"period": line['period'],
"id": line['id'],
"start": line['data'][0]['timestamp'],
"end": line['data'][-1]['timestamp'],
"data": line['data']
}
int_unix = info['data'][1]['timestamp'] - info['data'][0]['timestamp']
newData = self.actlIndData(indicator=info['indicator'], period=info['period'],\
interval=info['interval'], start=info['start'], end=info['end'], int_unix=int_unix)
if newData[0]['timestamp'] == info['end']:
info['data'][-1] = newData[0]
else:
del info['data'][0:len(newData)]
info['data'] += newData
info['start'] = info['data'][0]['timestamp']
info['end'] = info['data'][-1]['timestamp']
newInd["indicators"].append(info)
setattr(self, indicFiles["indicator_int"], newInd)
def actlIndData(self, indicator=None, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn actlIndicators
# @brief Send the indicator to be actualized to its respective function.
# @param indicator The name of the indicator. Send string (e.g., "WMA")
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return data Data actualized in array format with json's as values.
if "EMA" == indicator:
data = self.indEMA(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "RSI" == indicator:
data = self.indRSI(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "SMA" == indicator:
data = self.indSMA(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "WMA" == indicator:
data = self.indWMA(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "ATR" == indicator:
data = self.indATR(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "BB" == indicator:
data = self.indBB(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
else:
return False
return data
def viewIndicators(self):
# @fn viewIndicators
# @brief View all indicators
# @return data All indicators in command line
indica = {"indicators": []}
for indicFiles in self.getIndInterval:
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
newInd = {
"indicator": line['indicator'],
"interval": line['interval'],
"period": line['period'],
"id": line['id'],
}
indica["indicators"].append(newInd)
data = pd.DataFrame.from_dict(indica['indicators'], orient='columns')
data = data.sort_values(by=['interval', 'indicator', 'period'])
data = data.reindex(columns=['interval', 'indicator', 'period', 'id'])
print(data.to_string(index=False))
def indRSI(self, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn indRSI
# @brief Actualize each indicator depending on all variables.
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return actData Data actualized in array format with json's as values.
def calcData(data=None, kLines=None):
# @fn calcData
# @brief
# @param data
# @param kLines
# @exception
# @return actData
delta = data['close'].diff(1)
delta.dropna(inplace=True)
positive = delta.copy()
negative = delta.copy()
positive[positive < 0] = 0
negative[negative > 0] = 0
average_gain = positive.rolling(window=period).mean()
average_loss = abs(negative.rolling(window=period).mean())
relative_strength = average_gain / average_loss
rsi = 100.0 - (100.0 / (1.0 + relative_strength))
actData = pd.DataFrame()
actData['timestamp'] = data['timestamp']
actData['value'] = rsi
actData = json.loads(actData.to_json(orient="records"))
return actData[kLines:]
data = getattr(self.coinBotObj, interval)
data = pd.DataFrame.from_dict(data, orient='columns')
data["close"] = pd.to_numeric(data["close"])
startDB = data.iloc[0]['timestamp']
endDB = data.iloc[-1]['timestamp']
if int_unix == None or end > data.iloc[-1]['timestamp']:
actData = calcData(data=data, kLines=0)
elif end == endDB:
actData = calcData(data=data[-(period + 1):], kLines=-1)
else:
opData, kLines = self.checkLen(period=period, end=end, endDB=endDB, int_unix=int_unix)
actData = calcData(data=data[opData:], kLines=kLines)
return actData
def indEMA(self, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn indEMA
# @brief Actualize each indicator depending on all variables.
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return actData Data actualized in array format with json's as values.
def calcData(data=None, kLines=None):
# @fn calcData
# @brief
# @param data
# @param kLines
# @exception
# @return actData
ema = data['close'].ewm(span=period, adjust=False).mean()
actData = pd.DataFrame()
actData['timestamp'] = data['timestamp']
actData['value'] = ema
actData = json.loads(actData.to_json(orient="records"))
return actData[kLines:]
data = getattr(self.coinBotObj, interval)
print("My data intvl {} for latest candle: {}".format(interval, data[-1]['timestamp']))
data = pd.DataFrame.from_dict(data, orient='columns')
startDB = data.iloc[0]['timestamp']
endDB = data.iloc[-1]['timestamp']
if int_unix == None or end > data.iloc[-1]['timestamp']:
actData = calcData(data=data, kLines=0)
elif end == endDB:
actData = calcData(data=data[-(period + 1):], kLines=-1)
else:
opData, kLines = self.checkLen(period=period, end=end, endDB=endDB, int_unix=int_unix)
actData = calcData(data=data[opData:], kLines=kLines)
print("Interval Value {} and period {}".format(actData[-1], period))
return actData
def indSMA(self, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn indSMA
# @brief Actualize each indicator depending on all variables.
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return actData Data actualized in array format with json's as values.
def calcData(data=None, kLines=None):
# @fn calcData
# @brief
# @param data
# @param kLines
# @exception
# @return actData
sma = data['close'].rolling(window=period).mean()
actData = pd.DataFrame()
actData['timestamp'] = data['timestamp']
actData['value'] = sma
actData = json.loads(actData.to_json(orient="records"))
return actData[kLines:]
data = getattr(self.coinBotObj, interval)
data = pd.DataFrame.from_dict(data, orient='columns')
startDB = data.iloc[0]['timestamp']
endDB = data.iloc[-1]['timestamp']
if int_unix == None or end > data.iloc[-1]['timestamp']:
actData = calcData(data=data, kLines=0)
elif end == endDB:
actData = calcData(data=data[-(period + 1):], kLines=-1)
else:
opData, kLines = self.checkLen(period=period, end=end, endDB=endDB, int_unix=int_unix)
actData = calcData(data=data[opData:], kLines=kLines)
return actData
def indWMA(self, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn indWMA
# @brief Actualize each indicator depending on all variables.
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return actData Data actualized in array format with json's as values.
def calcData(data=None, kLines=None):
# @fn calcData
# @brief
# @param data
# @param kLines
# @exception
# @return actData
wma = data['close'].rolling(window=period).apply(wma_calc(weights), raw=True)
actData = pd.DataFrame()
actData['timestamp'] = data['timestamp']
actData['value'] = wma
actData = json.loads(actData.to_json(orient="records"))
return actData[kLines:]
def wma_calc(w):
# @fn wma_calc
# @brief
# @param w
# @exception
# @return g
def g(x):
# @fn g
# @brief
# @param x
# @exception
# @return
return sum(w * x) / sum(w)
return g
weights = list(reversed([(period - n) * period for n in range(period)]))
data = getattr(self.coinBotObj, interval)
data = pd.DataFrame.from_dict(data, orient='columns')
startDB = data.iloc[0]['timestamp']
endDB = data.iloc[-1]['timestamp']
if int_unix == None or end > data.iloc[-1]['timestamp']:
actData = calcData(data=data, kLines=0)
elif end == endDB:
actData = calcData(data=data[-(period + 1):], kLines=-1)
else:
opData, kLines = self.checkLen(period=period, end=end, endDB=endDB, int_unix=int_unix)
actData = calcData(data=data[opData:], kLines=kLines)
return actData
def indBB(self, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn indBB
# @brief Actualize each indicator depending on all variables.
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return actData Data actualized in array format with json's as values.
def calcData(data=None, kLines=None):
# @fn calcData
# @brief
# @param data
# @param kLines
# @exception
# @return actData
actData = | pd.DataFrame() | pandas.DataFrame |
import sys
from sqlalchemy import create_engine
import pandas as pd
def load_data(messages_filepath, categories_filepath):
'''
This function loads message and categories dataset into pandas DataFrames,
and combine them into a new dataframe
INPUT:
messages_filepath: filepath of 'messages.csv'
categories_filepath: filepath of 'categories.csv'
OUTPUT:
df: A merged dataset using the common id
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories, on = 'id')
return df
def clean_data(df):
'''
This function split [categories] column into separate category columns,
convert category values to just numbers 0 or 1,
and replace [categories] column in df with new category columns
INPUT:
df: the merged dataframe
OUTPUT:
df: a cleaned dataframe that is ready for Machine Learning Pipeline
'''
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(pat=';', expand=True)
# select the first row of the categories dataframe
row = categories.iloc[0]
# use this row to extract a list of new column names for categories.
category_colnames = row.apply(lambda x: x[:-2])
# rename the columns of `categories`
categories.columns = category_colnames
# convert category values to numbers 0 or 1
for column in categories:
categories[column] = categories[column].apply(lambda x: str(x)[-1])
categories[column] = | pd.to_numeric(categories[column]) | pandas.to_numeric |
import networkx as nx
import numpy as np
from hiveplot import HivePlot
import pandas as pd
import matplotlib.pyplot as plt
# load data
pd_data = pd.read_csv('./kg_final_with_temporal_data_and_validated_inconsistencies.txt', sep='\t')
pd_data = pd_data[['Subject', 'Predicate', 'Object']]
# filter positives only
neg_predicates = [
'confers no resistance to antibiotic',
'not upregulated by antibiotic',
'no represses',
'no activates',]
def _check_match(x, predicates):
flag = False
for predicate in predicates:
if predicate in x:
flag = True
return flag
# remove the negatives
pd_data = pd_data[~pd_data['Predicate'].apply(lambda x: _check_match(x, neg_predicates))]
# select data by relation type
pd_cras = pd_data[pd_data['Predicate'].str.contains('resistance to antibiotic')]
pd_ubas = pd_data[pd_data['Predicate'].str.contains('upregulated by antibiotic')]
pd_represses = pd_data[pd_data['Predicate'].str.contains('represses')]
pd_activates = pd_data[pd_data['Predicate'].str.contains('activates')]
pd_has = pd_data[pd_data['Predicate'].str.contains('has')]
pd_iii = pd_data[pd_data['Predicate'].str.contains('is involved in')]
pd_ipo = pd_data[pd_data['Predicate'].str.contains('is part of')]
pd_tb = pd_data[pd_data['Predicate'].str.contains('targeted by')]
# get genes
genes = []
genes.extend(pd_cras['Subject'].to_numpy().tolist())
genes.extend(pd_ubas['Subject'].to_numpy().tolist())
genes.extend(pd_represses['Subject'].to_numpy().tolist())
genes.extend(pd_represses['Object'].to_numpy().tolist())
genes.extend(pd_activates['Subject'].to_numpy().tolist())
genes.extend(pd_activates['Object'].to_numpy().tolist())
genes.extend(pd_has['Subject'].to_numpy().tolist())
genes.extend(pd_iii['Subject'].to_numpy().tolist())
genes.extend(pd_ipo['Subject'].to_numpy().tolist())
genes.extend(pd_tb['Subject'].to_numpy().tolist())
genes = list(set(genes))
pd_genes = pd.DataFrame(genes, columns=['Label'])
pd_genes['Category'] = 'gene'
print('gene:', pd_genes.shape)
# get antibiotics
antibiotics = []
antibiotics.extend(pd_cras['Object'].to_numpy().tolist())
antibiotics.extend(pd_ubas['Object'].to_numpy().tolist())
antibiotics.extend(pd_tb['Object'].to_numpy().tolist())
antibiotics = list(set(antibiotics))
pd_antibiotics = pd.DataFrame(antibiotics, columns=['Label'])
pd_antibiotics['Category'] = 'antibiotic'
print('antibiotic:', pd_antibiotics.shape)
# get molecular_function
molecular_functions = pd_has['Object'].to_numpy().tolist()
molecular_functions = list(set(molecular_functions))
pd_molecular_functions = | pd.DataFrame(molecular_functions, columns=['Label']) | pandas.DataFrame |
import pandas as pd
tmtids = pd.read_csv('tmtids.csv')
tmtvac = pd.read_csv('tmt_speedvac_group.csv')
hphdate = | pd.read_csv('hphdate.csv') | pandas.read_csv |
import itertools
import time
import glob as gb
import librosa
import matplotlib.pyplot as plt
import librosa.display
import pickle
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score
import os
import soundfile as sf
import sys
import warnings
from keras.utils.vis_utils import plot_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import tensorflow.keras as keras
from sklearn.svm import LinearSVC
from tensorflow.keras.layers import Input
from tensorflow.keras.regularizers import l2, l1_l2
import seaborn as sns
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import classification_report
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import statistics
from sklearn import tree
from sklearn.dummy import DummyClassifier
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
import random
from numpy import inf
import audioread
import librosa.segment
import numpy as np
import data_utils as du
import data_utils_input as dus
from data_utils_input import normalize_image, padding_MLS, padding_SSLM, borders
from keras import backend as k
from shutil import copyfile
import fnmatch
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from ast import literal_eval
from sklearn.feature_selection import RFE
from skimage.transform import resize
from tensorflow.python.ops.init_ops_v2 import glorot_uniform
import lightgbm as lgb
from treegrad import TGDClassifier
from sklearn.preprocessing import MultiLabelBinarizer
import logging
# import tensorflow_decision_forests as tfdf # linux only
from tensorflow.keras.layers.experimental import RandomFourierFeatures
from XBNet.training_utils import training, predict
from XBNet.models import XBNETClassifier
from XBNet.run import run_XBNET
import autokeras as ak
from djinn import djinn
import hyperas
from hyperopt import Trials, STATUS_OK, tpe
from hyperas.distributions import choice, uniform
from os import listdir, walk, getcwd, sep
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
import math
from scipy import signal
import tensorflow.keras.layers as kl
import tensorflow.keras.applications as ka
import tensorflow.keras.optimizers as ko
import tensorflow.keras.models as km
import skimage.measure
import scipy
from scipy.spatial import distance
from tensorflow.keras.layers import Flatten, Dropout, Activation, BatchNormalization, Dense
from sklearn.model_selection import GridSearchCV
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.optimizers import SGD
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
from tensorflow.keras.regularizers import l1
from keras.utils import np_utils
from pydub import AudioSegment
from tensorflow.keras.models import load_model
from sklearn.metrics import roc_curve, roc_auc_score, auc
import datetime
import glob
import math
import re
import pyaudio
import wave
import torch
from matplotlib.pyplot import specgram
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import multilabel_confusion_matrix
tf.get_logger().setLevel(logging.ERROR)
k.set_image_data_format('channels_last')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if not sys.warnoptions:
warnings.simplefilter("ignore") # ignore warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# region Directories
MASTER_DIR = 'D:/Google Drive/Resources/Dev Stuff/Python/Machine Learning/Master Thesis/'
MASTER_INPUT_DIR = 'F:/Master Thesis Input/'
MASTER_LABELPATH = os.path.join(MASTER_INPUT_DIR, 'Labels/')
WEIGHT_DIR = os.path.join(MASTER_DIR, 'Weights/')
MIDI_Data_Dir = np.array(gb.glob(os.path.join(MASTER_DIR, 'Data/MIDIs/*')))
FULL_DIR = os.path.join(MASTER_INPUT_DIR, 'Full/')
FULL_MIDI_DIR = os.path.join(FULL_DIR, 'MIDI/')
FULL_LABELPATH = os.path.join(MASTER_LABELPATH, 'Full/')
# endregion
"""=================================================================================================================="""
# region DEPRECATED
# Deprecated
Train_Data_Dir = np.array(gb.glob(os.path.join(MASTER_INPUT_DIR, 'Train/*'))) # os.path.join(MASTER_DIR, 'Data/Train/*'
Test_Data_Dir = np.array(gb.glob(os.path.join(MASTER_INPUT_DIR, 'Test/*'))) # os.path.join(MASTER_DIR, 'Data/Test/*')))
Validate_Data_Dir = np.array(gb.glob(os.path.join(MASTER_INPUT_DIR, 'Validate/*'))) # os.path.join(MASTER_DIR,'Data/Val
MLS_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/MLS/')
SSLMCOS_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/SSLMCOS/')
SSLMEUC_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/SSLMEUC/')
SSLMCRM_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/SSLMCRM/')
TRAIN_DIR = os.path.join(MASTER_INPUT_DIR, 'Train/')
TEST_DIR = os.path.join(MASTER_INPUT_DIR, 'Test/')
VAL_DIR = os.path.join(MASTER_INPUT_DIR, 'Validate/')
TRAIN_LABELPATH = os.path.join(MASTER_LABELPATH, 'Train/')
TEST_LABELPATH = os.path.join(MASTER_LABELPATH, 'Test/')
VAL_LABELPATH = os.path.join(MASTER_LABELPATH, 'Validate/')
# Deprecated
def validate_directories():
print("Validating Training Directory...")
dus.validate_folder_contents(TRAIN_LABELPATH, os.path.join(TRAIN_DIR, 'MIDI/'), os.path.join(TRAIN_DIR, 'MLS/'),
os.path.join(TRAIN_DIR, 'SSLM_CRM_COS/'), os.path.join(TRAIN_DIR, 'SSLM_CRM_EUC/'),
os.path.join(TRAIN_DIR, 'SSLM_MFCC_COS/'), os.path.join(TRAIN_DIR, 'SSLM_MFCC_EUC/'))
print("Succes.\n")
print("Validating Validation Directory...")
dus.validate_folder_contents(VAL_LABELPATH, os.path.join(VAL_DIR, 'MIDI/'), os.path.join(VAL_DIR, 'MLS/'),
os.path.join(VAL_DIR, 'SSLM_CRM_COS/'), os.path.join(VAL_DIR, 'SSLM_CRM_EUC/'),
os.path.join(VAL_DIR, 'SSLM_MFCC_COS/'), os.path.join(VAL_DIR, 'SSLM_MFCC_EUC/'))
print("Succes.\n")
print("Validating Testing Directory...")
dus.validate_folder_contents(TEST_LABELPATH, os.path.join(TEST_DIR, 'MIDI/'), os.path.join(TEST_DIR, 'MLS/'),
os.path.join(TEST_DIR, 'SSLM_CRM_COS/'), os.path.join(TEST_DIR, 'SSLM_CRM_EUC/'),
os.path.join(TEST_DIR, 'SSLM_MFCC_COS/'), os.path.join(TEST_DIR, 'SSLM_MFCC_EUC/'))
print("Succes.\n")
# Deprecated
def get_class_weights(labels, one_hot=False):
if one_hot is False:
n_classes = max(labels) + 1
else:
n_classes = len(labels[0])
class_counts = [0 for _ in range(int(n_classes))]
if one_hot is False:
for label in labels:
class_counts[label] += 1
else:
for label in labels:
class_counts[np.where(label == 1)[0][0]] += 1
return {i: (1. / class_counts[i]) * float(len(labels)) / float(n_classes) for i in range(int(n_classes))}
# Deprecated
def buildValidationSet():
cnt = 1
numtrainfiles = len(fnmatch.filter(os.listdir(os.path.join(TRAIN_DIR, "MLS/")), '*.npy'))
for file in os.listdir(os.path.join(TRAIN_DIR, "MLS/")):
numvalfiles = len(fnmatch.filter(os.listdir(os.path.join(VAL_DIR, "MLS/")), '*.npy'))
if numvalfiles >= numtrainfiles * 0.2:
print(f"Validation set >= 20% of training set: {numvalfiles}/{numtrainfiles}")
break
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
formfolder = "" # Start search for correct form to search for label
for root, dirs, files in os.walk(os.path.join(MASTER_DIR, 'Labels/')):
flag = False
for tfile in files:
if tfile.split('/')[-1].split('.')[0] == name:
formfolder = os.path.join(root, file).split('/')[-1].split('\\')[0]
flag = True
if flag:
break
path = os.path.join(os.path.join(MASTER_DIR, 'Labels/'), formfolder) + '/' + os.path.basename(name) + '.txt'
num_lines = sum(1 for _ in open(path))
if num_lines <= 2:
print("File has not been labeled with ground truth yet. Skipping...")
cnt += 1
continue
else:
src1 = os.path.join(TRAIN_DIR, "MLS/") + '/' + filename
src2 = os.path.join(TRAIN_DIR, "SSLM_CRM_COS/") + '/' + filename
src3 = os.path.join(TRAIN_DIR, "SSLM_CRM_EUC/") + '/' + filename
src4 = os.path.join(TRAIN_DIR, "SSLM_MFCC_COS/") + '/' + filename
src5 = os.path.join(TRAIN_DIR, "SSLM_MFCC_EUC/") + '/' + filename
dst1 = os.path.join(VAL_DIR, "MLS/") + '/' + filename
dst2 = os.path.join(VAL_DIR, "SSLM_CRM_COS/") + '/' + filename
dst3 = os.path.join(VAL_DIR, "SSLM_CRM_EUC/") + '/' + filename
dst4 = os.path.join(VAL_DIR, "SSLM_MFCC_COS/") + '/' + filename
dst5 = os.path.join(VAL_DIR, "SSLM_MFCC_EUC/") + '/' + filename
if os.path.exists(dst1) and os.path.exists(dst2) and os.path.exists(dst3) and os.path.exists(dst4) \
and os.path.exists(dst5):
print("File has already been prepared for training material. Skipping...")
cnt += 1
continue
else:
copyfile(src1, dst1)
copyfile(src2, dst2)
copyfile(src3, dst3)
copyfile(src4, dst4)
copyfile(src5, dst5)
cnt += 1
pass
# Deprecated
def findBestShape(mls_train, sslm_train):
dim1_mls = [i.shape[0] for i in mls_train.getImages()]
dim2_mls = [i.shape[1] for i in mls_train.getImages()]
print(dim1_mls)
print(dim2_mls)
dim1_sslm = [i.shape[0] for i in sslm_train.getImages()]
dim2_sslm = [i.shape[1] for i in sslm_train.getImages()]
print(dim1_sslm)
print(dim2_sslm)
dim1_mean = min(statistics.mean(dim1_mls), statistics.mean(dim2_sslm))
dim2_mean = min(statistics.mean(dim1_mls), statistics.mean(dim2_sslm))
dim1_median = min(statistics.median(dim1_mls), statistics.median(dim2_sslm))
dim2_median = min(statistics.median(dim1_mls), statistics.median(dim2_sslm))
dim1_mode = min(statistics.mode(dim1_mls), statistics.mode(dim2_sslm))
dim2_mode = min(statistics.mode(dim1_mls), statistics.mode(dim2_sslm))
print(f"Dimension 0:\nMean: {dim1_mean}\t\tMedian: {dim1_median}\t\tMode: {dim1_mode}")
print(f"Dimension 1:\nMean: {dim2_mean}\t\tMedian: {dim2_median}\t\tMode: {dim2_mode}")
# Deprecated WORKING FUSE MODEL
def old_formnn_fuse(output_channels=32, lrval=0.00001, numclasses=12):
cnn1_mel = formnn_mls(output_channels, lrval=lrval)
cnn1_sslm = formnn_sslm(output_channels, lrval=lrval)
combined = layers.concatenate([cnn1_mel.output, cnn1_sslm.output], axis=2)
cnn2_in = formnn_pipeline(combined, output_channels, lrval=lrval, numclasses=numclasses)
cnn2_in = layers.Dense(numclasses, activation='sigmoid')(cnn2_in)
opt = keras.optimizers.Adam(lr=lrval)
model = keras.models.Model(inputs=[cnn1_mel.input, cnn1_sslm.input], outputs=[cnn2_in])
model.compile(loss=keras.losses.BinaryCrossentropy(from_logits=True), optimizer=opt, metrics=['accuracy'])
model.summary() # Try categorical_crossentropy, metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
return model
# Deprecated WORKING PIPELINE MODEL
def old_formnn_pipeline(combined, output_channels=32, lrval=0.0001):
z = layers.ZeroPadding2D(padding=((1, 1), (6, 6)))(combined)
z = layers.Conv2D(filters=(output_channels * 2), kernel_size=(3, 5), strides=(1, 1),
padding='same', dilation_rate=(1, 3))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.SpatialDropout2D(rate=0.5)(z)
# z = layers.Reshape(target_shape=(-1, 1, output_channels * 152))(z)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), strides=(1, 1), padding='same')(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.SpatialDropout2D(rate=0.5)(z)
z = layers.Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding='same')(z)
z = layers.GlobalMaxPooling2D()(z)
return z
# Deprecated MLS MODEL
def cnn_mls(output_channels, lrval=0.0001):
model = tf.keras.Sequential()
model.add(layers.Conv2D(filters=output_channels,
kernel_size=(5, 7), strides=(1, 1),
padding='same', # ((5 - 1) // 2, (7 - 1) // 2),
activation=layers.LeakyReLU(alpha=lrval), input_shape=(200, 1150, 4) # (1,)
))
model.add(layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')) # (1, 1)))
# opt = keras.optimizers.Adam(lr=lrval)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# Deprecated SSLM MODEL
def cnn_sslm(output_channels, lrval=0.0001):
model = tf.keras.Sequential()
model.add(layers.Conv2D(filters=output_channels,
kernel_size=(5, 7), strides=(1, 1),
padding='same', # ((5 - 1) // 2, (7 - 1) // 2),
activation=layers.LeakyReLU(alpha=lrval), input_shape=(200, 1150, 4) # (3,)
))
model.add(layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')) # (1, 1)))
# opt = keras.optimizers.Adam(lr=lrval)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# Deprecated PIPELINE MODEL
def cnn2(output_channels, lrval=0.0001):
model = tf.keras.Sequential()
model.add(layers.Conv2D(filters=(output_channels * 2),
kernel_size=(3, 5), strides=(1, 1),
padding='same', # ((3 - 1) // 2, (5 - 1) * 3 // 2),
dilation_rate=(1, 3),
activation=layers.LeakyReLU(alpha=lrval), input_shape=(40, 1150, 8)
))
model.add(layers.SpatialDropout2D(rate=0.5))
model.add(
layers.Conv2D(output_channels * 152, 128, (1, 1), activation=layers.LeakyReLU(alpha=lrval), padding='same'))
# *72=para 6pool, *152 para 2pool3
model.add(layers.SpatialDropout2D(rate=0.5))
model.add(layers.Conv2D(128, 1, (1, 1), padding='same')) # , padding='same'))
# x = np.reshape(x, -1, x.shape[1] * x.shape[2], 1, x.shape[3]) # reshape model?
# model = keras.layers.Reshape((-1, model.shape))(model)
# Feature maps are joined with the column dimension (frequency)
# opt = keras.optimizers.Adam(lr=lrval) # learning rate
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
# model.summary()
return model
# Deprecated
def fuse_model(output_channels, lrval=0.0001):
cnn1_mel = cnn_mls(output_channels, lrval=lrval)
cnn1_sslm = cnn_sslm(output_channels, lrval=lrval)
combined = keras.layers.concatenate([cnn1_mel.output, cnn1_sslm.output])
cnn2_in = cnn2(output_channels, lrval=lrval)(combined)
opt = keras.optimizers.Adam(lr=lrval) # learning rate
model = keras.models.Model(inputs=[cnn1_mel.input, cnn1_sslm.input], outputs=[cnn2_in])
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
model.get_layer(name='sequential_2').summary()
if not os.path.isfile(os.path.join(MASTER_DIR, 'Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True)
# if not os.path.isfile(os.path.join(MASTER_DIR, 'Model_Diagram_Inner.png')):
# plot_model(model.get_layer(name='sequential_2'), to_file=os.path.join(MASTER_DIR, 'Model_Diagram_Inner.png'),
# show_shapes=True, show_layer_names=True, expand_nested=True)
return model
# Probably deprecated
def prepare_train_data():
"""
Retrieve analysis of the following audio data for each training file:
- Log-scaled Mel Spectrogram (MLS)
- Self-Similarity Lag Matrix (Mel-Frequency Cepstral Coefficients/MFCCs - Cosine Distance, SSLMCOS)
- Self-Similarity Lag Matrix (MFCCs - Euclidian Distance, SSLMEUC)
- Self-Similarity Matrix (Chromas, SSLMCRM)
Checks to ensure that each file has been fully analyzed/labeled with ground truth
and not yet prepared for training material.
"""
cnt = 1
for folder in MIDI_Data_Dir:
for file in os.listdir(folder):
foldername = folder.split('\\')[-1]
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
path = os.path.join(os.path.join(MASTER_DIR, 'Labels/'), foldername) + '/' + os.path.basename(name) + '.txt'
num_lines = sum(1 for _ in open(path))
if num_lines <= 2:
print("File has not been labeled with ground truth yet. Skipping...")
cnt += 1
continue
# elif os.path.basename(name) != "INSERT_DEBUG_NAME_HERE": # Debug output of specified file
else:
png1 = os.path.join(MASTER_DIR, 'Images/Train/') + "MLS/" + os.path.basename(name) + 'mls.png'
png2 = os.path.join(MASTER_DIR, 'Images/Train/') + "SSLMCOS/" + os.path.basename(name) + 'cos.png'
png3 = os.path.join(MASTER_DIR, 'Images/Train/') + "SSLMEUC/" + os.path.basename(name) + 'euc.png'
png4 = os.path.join(MASTER_DIR, 'Images/Train/') + "SSLMCRM/" + os.path.basename(name) + 'crm.png'
if os.path.exists(png1) and os.path.exists(png2) and os.path.exists(png3) and os.path.exists(png4):
print("File has already been prepared for training material. Skipping...")
cnt += 1
continue
fullfilename = folder + '/' + filename
du.create_mls_sslm(fullfilename, name, foldername)
du.peak_picking(fullfilename, name, foldername)
cnt += 1
# Deprecated
def old_prepare_train_data():
"""
Retrieve analysis of the following audio data for each training file:
- Log-scaled Mel Spectrogram (MLS)
- Self-Similarity Lag Matrix (Mel-Frequency Cepstral Coefficients/MFCCs - Cosine Distance, SSLMCOS)
- Self-Similarity Lag Matrix (MFCCs - Euclidian Distance, SSLMEUC)
- Self-Similarity Matrix (Chromas, SSLMCRM)
"""
cnt = 1
for file in Train_Data_Dir:
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
du.create_mls_sslm(filename, name)
du.create_mls_sslm2(filename, name)
cnt += 1
# Deprecated
def old_prepare_model_training_input():
"""
Read in the input data for the model, return: images [MLS, SSLMCOS, EUC, and CRM] labels (phrases), labels (seconds)
"""
mls_images = np.asarray(du.ReadImagesFromFolder(MLS_Data_Dir), dtype=np.float32)
sslmcos_images = np.asarray(du.ReadImagesFromFolder(SSLMCOS_Data_Dir), dtype=np.float32)
sslmeuc_images = np.asarray(du.ReadImagesFromFolder(SSLMEUC_Data_Dir), dtype=np.float32)
sslmcrm_images = du.ReadImagesFromFolder(SSLMCRM_Data_Dir)
lbls_seconds, lbls_phrases = du.ReadLabelSecondsPhrasesFromFolder()
# print(lbls_seconds)
# print([i for i, x in enumerate(lbls_seconds) if len(x) != 560])
# lbls_seconds = np.array(lbls_seconds).flatten()
# lbls_seconds = [item for sublist in lbls_seconds for item in sublist]
# for i in range(len(lbls_seconds)):
# lbls_seconds[i] = np.asarray(lbls_seconds[i]).flatten()
lbls_seconds = padMatrix(lbls_seconds) # matrix must not be jagged in order to convert to ndarray of float32
# print(lbls_seconds)
lbls_seconds = np.asarray(lbls_seconds, dtype=np.float32)
mdl_images = [mls_images, sslmcos_images, sslmeuc_images, sslmcrm_images]
return mdl_images, lbls_seconds, lbls_phrases
# Probably deprecated
def padMatrix(a):
b = []
width = max(len(r) for r in a)
for i in range(len(a)):
if len(a[i]) != width:
x = np.pad(a[i], (width - len(a[i]), 0), 'constant', constant_values=0)
else:
x = a[i]
b.append(x)
return b
# Probably deprecated
def debugInput(mimg, lbls, lblp):
# model_images = [0 => mls, 1 => sslmcos, 2 => sslmeuc, 3 => sslmcrm]
print("Model images:", mimg)
print("Model images length:", len(mimg))
for i in range(len(mimg)):
print("M_Imgs[" + str(i) + "] length:", len(mimg[i]))
print("Label seconds:", lbls)
print("Label phrases:", lblp)
print("Image shape:", mimg[0][0].shape) # returns (height, width, channels) := (216, 1162, 4)
# Deprecated
def old_trainModel():
model_images, labels_seconds, labels_phrases = old_prepare_model_training_input()
# debugInput(model_images, labels_seconds, labels_phrases)
# FIT MODEL AND USE CHECKPOINT TO SAVE BEST MODEL
trmodel = fuse_model(4) # (32) CNN Layer 1 Output Characteristic Maps
checkpoint = ModelCheckpoint("best_initial_model.hdf5", monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
model_history = trmodel.fit((np.array([model_images[0]], dtype=np.float32),
np.array([model_images[1], model_images[2], model_images[3]], dtype=np.float32)),
# np.asarray([tf.stack(model_images[1:2]), model_images[3]],
# (np.array([model_images[1], model_images[2]], dtype=np.float32),
# np.array(model_images[3])),
np.array(labels_seconds, dtype=np.float32),
batch_size=32, epochs=2000,
validation_data=(labels_seconds,),
callbacks=[checkpoint])
print(model_history)
# PLOT MODEL HISTORY OF ACCURACY AND LOSS OVER EPOCHS
plt.plot(model_history.history['accuracy'])
plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_Model_Accuracy.png')
plt.show()
# pd.DataFrame(model_history.history).plot() # figsize=(8, 5)
# plt.show()
# summarize history for loss
plt.plot(model_history.history['loss'])
plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_Model_loss.png')
plt.show()
# Probably deprecated
def combine_generator(gen1, gen2):
while True:
yield next(gen1), next(gen2)
# endregion
# region OldModelDefinition
# MIDI MODEL -- Try switching activation to ELU instead of RELU. Mimic visual/aural analysis using ensemble method
def formnn_midi(output_channels=32, numclasses=12):
inputC = layers.Input(shape=(None, 1))
w = layers.Conv1D(output_channels * 2, kernel_size=10, activation='relu', input_shape=(None, 1))(inputC)
w = layers.Conv1D(output_channels * 4, kernel_size=10, activation='relu', kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01))(w)
w = layers.MaxPooling1D(pool_size=6)(w)
w = layers.Dropout(0.4)(w)
w = layers.Conv1D(output_channels * 4, kernel_size=10, activation='relu')(w)
w = layers.MaxPooling1D(pool_size=6)(w)
w = layers.Dropout(0.4)(w)
w = layers.GlobalMaxPooling1D()(w)
w = layers.Dense(output_channels * 8, activation='relu')(w)
w = layers.Dropout(0.4)(w)
w = layers.Dense(numclasses)(w)
w = layers.Softmax()(w)
w = keras.models.Model(inputs=inputC, outputs=w)
return w
def formnn_mls2(output_channels=32):
inputA = layers.Input(batch_input_shape=(None, None, None, 1))
x = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(inputA)
x = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(x)
x = keras.models.Model(inputs=inputA, outputs=x)
return x
def formnn_sslm2(output_channels=32):
inputB = layers.Input(batch_input_shape=(None, None, None, 1)) # (None, None, None, 4)
y = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(inputB)
y = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(y)
y = layers.AveragePooling2D(pool_size=(1, 4))(y)
y = keras.models.Model(inputs=inputB, outputs=y)
return y
def formnn_pipeline2(combined, output_channels=32, numclasses=12):
z = layers.Conv2D(filters=(output_channels * 2), kernel_size=(3, 5),
padding='same', dilation_rate=(1, 3), kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01), activation='relu')(combined)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(z)
z = layers.MaxPooling2D(pool_size=3)(z)
z = layers.SpatialDropout2D(rate=0.3)(z)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(z)
z = layers.MaxPooling2D(pool_size=3)(z)
z = layers.SpatialDropout2D(rate=0.3)(z)
z = layers.GlobalMaxPooling2D()(z)
# z = layers.Dense(output_channels * 8, activation='relu')(z)
# z = layers.Dropout(rate=0.3)(z)
z = layers.Dense(numclasses)(z)
z = layers.Softmax()(z)
return z
"""=======================ORIGINAL MODEL======================="""
# MLS MODEL
def formnn_mls(output_channels=32, lrval=0.0001):
inputA = layers.Input(batch_input_shape=(None, None, None, 1))
x = layers.ZeroPadding2D(padding=((2, 2), (3, 3)))(inputA)
x = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(x)
x = layers.LeakyReLU(alpha=lrval)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(x)
x = keras.models.Model(inputs=inputA, outputs=x)
return x
# SSLM MODEL
def formnn_sslm(output_channels=32, lrval=0.0001):
inputB = layers.Input(batch_input_shape=(None, None, None, 1)) # (None, None, None, 4)
y = layers.ZeroPadding2D(padding=((2, 2), (3, 3)))(inputB)
y = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(y)
y = layers.LeakyReLU(alpha=lrval)(y)
y = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(y)
y = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(y)
y = layers.AveragePooling2D(pool_size=(1, 4))(y)
y = keras.models.Model(inputs=inputB, outputs=y)
return y
# PIPELINE MODEL
def formnn_pipeline(combined, output_channels=32, lrval=0.0001, numclasses=12):
z = layers.ZeroPadding2D(padding=((1, 1), (6, 6)))(combined)
z = layers.Conv2D(filters=(output_channels * 2), kernel_size=(3, 5), strides=(1, 1),
padding='same', dilation_rate=(1, 3), kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.SpatialDropout2D(rate=0.3)(z)
# z = layers.Reshape(target_shape=(-1, 1, output_channels * 152))(z)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
# z = layers.SpatialDropout2D(rate=0.5)(z)
z = layers.Conv2D(filters=output_channels * 8, kernel_size=(1, 1), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.GlobalAveragePooling2D()(z)
# z = layers.Flatten()(z)
z = layers.Dense(numclasses)(z)
z = layers.Softmax()(z)
# Softmax -> Most likely class where sum(probabilities) = 1, Sigmoid -> Multiple likely classes, sum != 1
return z
def formnn_fuse(output_channels=32, lrval=0.0001, numclasses=12):
cnn1_mel = formnn_mls(output_channels, lrval=lrval)
cnn1_sslm = formnn_sslm(output_channels, lrval=lrval)
combined = layers.concatenate([cnn1_mel.output, cnn1_sslm.output], axis=2)
cnn2_in = formnn_pipeline(combined, output_channels, lrval=lrval, numclasses=numclasses)
# opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
opt = keras.optimizers.Adam(lr=lrval, epsilon=1e-6)
imgmodel = keras.models.Model(inputs=[cnn1_mel.input, cnn1_sslm.input], outputs=[cnn2_in])
midmodel = formnn_midi(output_channels, numclasses=numclasses)
averageOut = layers.Average()([imgmodel.output, midmodel.output])
model = keras.models.Model(inputs=[imgmodel.input[0], imgmodel.input[1], midmodel.input], outputs=averageOut)
model.compile(loss=['categorical_crossentropy'], optimizer=opt, metrics=['accuracy'])
# model.compile(loss=keras.losses.BinaryCrossentropy(from_logits=True), optimizer=opt, metrics=['accuracy'])
model.summary() # Try categorical_crossentropy, metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
return model
def old_trainFormModel():
batch_size = 10
# region MODEL_DIRECTORIES
mls_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'MLS/'), label_path=TRAIN_LABELPATH, # end=90,
transforms=[padding_MLS, normalize_image, borders], batch_size=batch_size)
sslm_cmcos_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_CRM_COS/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_cmeuc_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_CRM_EUC/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfcos_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_MFCC_COS/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfeuc_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_MFCC_EUC/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
midi_train = dus.BuildMIDIloader(os.path.join(TRAIN_DIR, 'MIDI/'), label_path=TRAIN_LABELPATH,
batch_size=batch_size)
mls_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'MLS/'), label_path=VAL_LABELPATH,
transforms=[padding_MLS, normalize_image, borders], batch_size=batch_size)
sslm_cmcos_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_CRM_COS/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_cmeuc_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_CRM_EUC/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfcos_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_MFCC_COS/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfeuc_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_MFCC_EUC/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
midi_val = dus.BuildMIDIloader(os.path.join(VAL_DIR, 'MIDI/'), label_path=VAL_LABELPATH, batch_size=batch_size)
mls_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'MLS/'), label_path=TEST_LABELPATH,
transforms=[padding_MLS, normalize_image, borders], batch_size=batch_size)
sslm_cmcos_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_CRM_COS/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_cmeuc_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_CRM_EUC/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfcos_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_MFCC_COS/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfeuc_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_MFCC_EUC/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
midi_test = dus.BuildMIDIloader(os.path.join(TEST_DIR, 'MIDI/'), label_path=TEST_LABELPATH, batch_size=batch_size)
# endregion
# findBestShape(mls_train, sslm_cmcos_train)
train_datagen = multi_input_generator(mls_train, sslm_cmcos_train, sslm_cmeuc_train, sslm_mfcos_train,
sslm_mfeuc_train, midi_train)
valid_datagen = multi_input_generator(mls_val,
sslm_cmcos_val, sslm_cmeuc_val, sslm_mfcos_val, sslm_mfeuc_val, midi_val)
test_datagen = multi_input_generator(mls_test,
sslm_cmcos_test, sslm_cmeuc_test, sslm_mfcos_test, sslm_mfeuc_test, midi_test)
steps_per_epoch = len(list(mls_train)) // batch_size
steps_per_valid = len(list(mls_val)) // batch_size
label_encoder = LabelEncoder()
label_encoder.classes_ = np.load(os.path.join(MASTER_DIR, 'form_classes.npy'))
if mls_train.getNumClasses() != mls_val.getNumClasses() or mls_train.getNumClasses() != mls_test.getNumClasses():
print(f"Train and validation or testing datasets have differing numbers of classes: "
f"{mls_train.getNumClasses()} vs. {mls_val.getNumClasses()} vs. {mls_test.getNumClasses()}")
# classweights = get_class_weights(mls_train.getLabels().numpy().squeeze(axis=-1), one_hot=True)
"""
# Show class weights as bar graph
barx, bary = zip(*sorted(classweights.items()))
plt.figure(figsize=(12, 8))
plt.bar(label_encoder.inverse_transform(barx), bary, color='green')
for i in range(len(barx)):
plt.text(i, bary[i]//2, round(bary[i], 3), ha='center', color='white')
plt.title('Train Class Weights')
plt.ylabel('Weight')
plt.xlabel('Class')
plt.savefig('Initial_Model_Class_Weights.png')
plt.show()
"""
model = formnn_fuse(output_channels=32, lrval=0.00005, numclasses=mls_train.getNumClasses()) # Try 'val_loss'?
# model.load_weights('best_initial_model.hdf5')
early_stopping = EarlyStopping(patience=5, verbose=5, mode="auto")
checkpoint = ModelCheckpoint(os.path.join(MASTER_DIR, 'best_formNN_model.hdf5'), monitor='val_accuracy', verbose=0,
save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
model_history = model.fit(train_datagen, epochs=100, verbose=1, validation_data=valid_datagen, shuffle=False,
callbacks=[checkpoint, early_stopping], batch_size=batch_size, # class_weight=classweight
steps_per_epoch=steps_per_epoch, validation_steps=steps_per_valid)
print("Training complete!\n")
# region LossAccuracyGraphs
plt.plot(model_history.history['loss'])
plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_Model_Loss.png')
plt.show()
plt.plot(model_history.history['accuracy'])
plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_Model_Accuracy.png')
plt.show()
pd.DataFrame(model_history.history).plot()
plt.show()
# endregion
predictions = model.predict_generator(valid_datagen, steps=1, verbose=1, workers=0)
print(predictions)
print("Prediction complete!")
inverted = label_encoder.inverse_transform([np.argmax(predictions[0, :])])
print("Predicted: ", end="")
print(inverted, end=""),
print("\tActual: ", end="")
print(label_encoder.inverse_transform([np.argmax(mls_val.getFormLabel(mls_val.getCurrentIndex()-1))]))
print("Name: " + mls_val.getSong(mls_val.getCurrentIndex()-1))
print("\nEvaluating...")
score = model.evaluate_generator(test_datagen, steps=len(list(mls_test)), verbose=1)
print("Evaluation complete!\nScore:")
print(f"Loss: {score[0]}\tAccuracy: {score[1]}")
# region EvaluationGraphs
predictions = model.predict(test_datagen, steps=len(list(mls_test)), verbose=1)
predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = pd.DataFrame({'Predicted Values': predictions})
actual = mls_test.getLabels().numpy().argmax(axis=1)
actual = actual.astype(int).flatten()
actual = (label_encoder.inverse_transform(actual))
actual = pd.DataFrame({'Actual Values': actual})
cm = confusion_matrix(actual, predictions)
plt.figure(figsize=(12, 10))
cm = pd.DataFrame(cm, index=[i for i in label_encoder.classes_[0:mls_test.getNumClasses()]],
columns=[i for i in label_encoder.classes_[0:mls_test.getNumClasses()]])
ax = sns.heatmap(cm, linecolor='white', cmap='Blues', linewidth=1, annot=True, fmt='')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.title('Confusion Matrix', size=20)
plt.xlabel('Predicted Labels', size=14)
plt.ylabel('Actual Labels', size=14)
plt.savefig('Initial_Model_Confusion_Matrix.png')
plt.show()
clf_report = classification_report(actual, predictions, output_dict=True,
target_names=[i for i in label_encoder.classes_[0:mls_test.getNumClasses()]])
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True, cmap='viridis')
plt.title('Classification Report', size=20)
plt.savefig('Initial_Model_Classification_Report.png')
plt.show()
# endregion
def formnn_cnn_mod(input_dim_1, filters=64, lrval=0.0001, numclasses=12):
model = tf.keras.Sequential()
model.add(layers.Conv1D(filters, kernel_size=10, activation='relu', input_shape=(input_dim_1, 1)))
model.add(layers.Dropout(0.4)) # ?
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu', kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)))
model.add(layers.MaxPooling1D(pool_size=6))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu'))
model.add(layers.MaxPooling1D(pool_size=6))
# model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Flatten())
model.add(layers.Dense(filters*4, activation='relu'))
# model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(numclasses, activation='softmax')) # Try softmax?
opt = keras.optimizers.Adam(lr=lrval, epsilon=1e-6)
# opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def formnn_cnn_old(input_dim_1, filters=64, lrval=0.0001, numclasses=12):
model = tf.keras.Sequential()
model.add(layers.Conv1D(filters, kernel_size=10, activation='relu', input_shape=(input_dim_1, 1)))
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu', kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)))
model.add(layers.MaxPooling1D(pool_size=6))
model.add(layers.Dropout(0.4))
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu'))
model.add(layers.MaxPooling1D(pool_size=6))
model.add(layers.Dropout(0.4))
model.add(layers.Flatten())
model.add(layers.Dense(filters*4, activation='relu'))
model.add(layers.Dropout(0.4))
model.add(layers.Dense(numclasses, activation='softmax')) # Try softmax?
# opt = keras.optimizers.Adam(lr=lrval, epsilon=1e-6)
opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# endregion
# region OldWorkingModelDefinition
def formnn_cnn(input_dim_1, filters=8, lrval=0.0001, numclasses=12, kernelsize=3, l1reg=0.01, l2reg=0.01, dropout=0.6):
np.random.seed(9)
X_input = Input(shape=(input_dim_1, 1))
X = layers.Conv1D(filters, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X_input)
X = layers.BatchNormalization(axis=2)(X)
X = layers.Activation('relu')(X)
X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
X = layers.Conv1D(filters * 2, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
X = layers.BatchNormalization(axis=2)(X)
X = layers.Activation('relu')(X)
X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
X = layers.Conv1D(filters * 4, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
X = layers.BatchNormalization(axis=2)(X)
X = layers.Activation('relu')(X)
X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
X = layers.Flatten()(X)
# X = layers.Conv1D(filters * 8, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
# bias_regularizer=l2(0.5))(X)
X = layers.Dense(filters * 8, kernel_initializer=glorot_uniform(seed=9), # 256
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
X = layers.BatchNormalization(axis=-1)(X)
X = layers.Activation('relu')(X)
# X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
# X = layers.Flatten()(X)
X = layers.Dense(numclasses, activation='sigmoid', kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
# opt = keras.optimizers.Adam(lr=lrval)
opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
model = keras.models.Model(inputs=X_input, outputs=X, name='FormModel')
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def oldWorkingtrainFormModel():
# region DataPreProcessing
df = pd.read_excel(os.path.join(MASTER_DIR, 'Data/full_augmented_dataset.xlsx'))
# df = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset.xlsx'))
names = df[['piece_name', 'composer', 'filename']]
y = df['formtype']
# """
df = df.drop(columns=['sslm_chroma_cos_mean', 'sslm_chroma_cos_var', 'sslm_chroma_euc_mean', 'sslm_chroma_euc_var',
'sslm_mfcc_cos_mean', 'sslm_mfcc_cos_var', 'sslm_mfcc_euc_mean', 'sslm_mfcc_euc_var'])
# """
df.drop(columns=['spectral_bandwidth_var', 'spectral_centroid_var', 'spectral_flatness_var', 'spectral_rolloff_var',
'zero_crossing_var', 'fourier_tempo_mean', 'fourier_tempo_var'], inplace=True) # Remove useless
# nonlist = df[['duration', 'spectral_contrast_var']]
nonlist = df[['duration']]
df.drop(columns=['piece_name', 'composer', 'filename', 'duration', 'spectral_contrast_var', 'formtype'],
inplace=True)
# df = df[['ssm_log_mel_mean', 'ssm_log_mel_var', 'mel_mean', 'mel_var', 'chroma_stft_mean', 'chroma_stft_var']]
# df = df[['ssm_log_mel_mean', 'ssm_log_mel_var']]
df = df[['ssm_log_mel_mean']] # best decision tree accuracy
print("Fixing broken array cells as needed...")
def fix_broken_arr(strx):
if '[' in strx:
if ']' in strx:
return strx
else:
return strx + ']'
for col in df.columns:
df[col] = df[col].apply(lambda x: fix_broken_arr(x))
# print("Headers:", pd.concat([pd.concat([names, pd.concat([nonlist, df], axis=1)], axis=1), y], axis=1).columns)
# Headers: Index(['piece_name', 'composer', 'filename', 'duration', 'ssm_log_mel_mean', 'formtype'], dtype='object')
print("Done processing cells, building training set...")
# d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()).add_prefix(col) for col in df.columns]
d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()) for col in df.columns]
df = pd.concat(d, axis=1).fillna(0)
df = pd.concat([pd.concat([names, pd.concat([nonlist, df], axis=1)], axis=1), y], axis=1) # print(df)
train, test = train_test_split(df, test_size=0.169, random_state=0, stratify=df['formtype']) # test_s=.169 gave 50%
# df.to_csv(os.path.join(MASTER_DIR, 'full_modified_dataset.csv'))
X_train = train.iloc[:, 3:-1]
# X_train_names = train.iloc[:, 0:3]
y_train = train.iloc[:, -1]
print("Train shape:", X_train.shape)
X_test = test.iloc[:, 3:-1]
# X_test_names = test.iloc[:, 0:3]
y_test = test.iloc[:, -1]
print("Test shape:", X_test.shape)
# Normalize Data
"""
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train) # Good for decision tree
X_test = min_max_scaler.fit_transform(X_test)
"""
# X_train = preprocessing.scale(X_train)
# X_test = preprocessing.scale(X_test)
# """
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train = (X_train - mean) / std # Good for decision tree
X_test = (X_test - mean) / std
# """
print("Normalized Train shape:", X_train.shape)
print("Normalized Test shape:", X_test.shape)
# Convert to arrays for keras
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
label_encoder = LabelEncoder()
old_y_train = y_train
# old_y_test = y_test
int_y_train = label_encoder.fit_transform(y_train)
print(int_y_train.shape)
# int_y_train = int_y_train.reshape(len(int_y_train), 1)
# int_y_test = label_encoder.fit_transform(y_test)
# int_y_test = int_y_test.reshape(len(int_y_test), 1)
y_train = to_categorical(label_encoder.fit_transform(y_train))
y_test = to_categorical(label_encoder.fit_transform(y_test))
print(y_train.shape, y_test.shape)
print(label_encoder.classes_, "\n")
""" BASE MODEL """
# DummyClassifier makes predictions while ignoring input features
dummy_clf = DummyClassifier(strategy="stratified")
dummy_clf.fit(X_train, y_train)
DummyClassifier(strategy='stratified')
dummy_clf.predict(X_test)
print("Dummy classifier accuracy:", dummy_clf.score(X_test, y_test))
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
clf.predict(X_test)
print("Decision tree accuracy:", clf.score(X_test, y_test))
""" FEATURE TUNING """
selector = SelectKBest(f_classif, k=15) # 1000 if using RFE
Z_train = selector.fit_transform(X_train, old_y_train)
skb_values = selector.get_support()
Z_test = X_test[:, skb_values]
np.save(os.path.join(MASTER_DIR, "selectkbest_indices.npy"), skb_values)
print(Z_train.shape)
print(Z_test.shape)
"""
plt.title('Feature Importance')
plt.ylabel('Score')
plt.xlabel('Feature')
plt.plot(selector.scores_)
plt.savefig('Initial_Feature_Importance.png')
plt.show()
"""
print("Indices of top 10 features:", (-selector.scores_).argsort()[:10])
""" KBEST MODEL """
clf = tree.DecisionTreeClassifier()
clf = clf.fit(Z_train, y_train)
clf.predict(Z_test)
# treedepth = clf.tree_.max_depth
skb_score = clf.score(Z_test, y_test)
print("K-Best Decision tree accuracy:", skb_score) # Highest score: 84.3% accuracy
# """
# Accuracy 0.211, stick with SKB? Gives good loss though
clf = LinearSVC(C=0.01, penalty="l1", dual=False)
clf.fit(X_train, old_y_train)
rfe_selector = RFE(clf, 15, verbose=5)
rfe_selector = rfe_selector.fit(Z_train, old_y_train)
# rfe_selector = rfe_selector.fit(X_train, old_y_train)
rfe_values = rfe_selector.get_support()
# np.save(os.path.join(MASTER_DIR, "rfebest_indices.npy"), rfe_values)
print("Indices of RFE important features:", np.where(rfe_values)[0])
W_train = Z_train[:, rfe_values]
W_test = Z_test[:, rfe_values]
# "" " RFE MODEL " ""
clf = tree.DecisionTreeClassifier()
clf = clf.fit(W_train, y_train)
clf.predict(W_test)
rfe_score = clf.score(W_test, y_test)
print("RFE Decision tree accuracy:", rfe_score) # Highest score: 83.7% accuracy, typically better than SKB
"""
plt.figure(figsize=(30, 20)) # set plot size (denoted in inches)
tree.plot_tree(clf, fontsize=10)
plt.show()
plt.savefig('tree_high_dpi', dpi=100)
"""
# """
# endregion
# Reshape to 3D tensor for keras
if skb_score > rfe_score:
X_train = Z_train[:, :, np.newaxis]
X_test = Z_test[:, :, np.newaxis]
# X1_train = Z_train
# X1_test = Z_test
else:
X_train = W_train[:, :, np.newaxis]
X_test = W_test[:, :, np.newaxis]
X1_train = W_train
X1_test = W_test
treedepth = clf.tree_.max_depth
# print(treedepth)
X_train = X_train[:, :, np.newaxis]
X_test = X_test[:, :, np.newaxis]
"""
# Autokeras Model - 32% accuracy
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=10)
model_history = clf.fit(W_train, y_train, epochs=100)
predicted_y = clf.predict(W_test)
print(predicted_y)
print(clf.evaluate(W_test, y_test))
model = clf.export_model()
model.summary()
# model.save('best_auto_model.h5', save_format='tf')
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_CNN_AutoModel_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_CNN_AutoModel_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
"""
"""
# Deep CNN Decision Tree - 50% accuracy
feature_extractor = Sequential()
feature_extractor.add(layers.Conv1D(16, 3, padding='valid', activation='relu', input_shape=(X_train.shape[1], 1),
strides=1, kernel_regularizer=l1_l2(l1=0.01, l2=0.01)))
feature_extractor.add(layers.MaxPooling1D(2))
feature_extractor.add(layers.Dropout(0.6))
feature_extractor.add(layers.BatchNormalization())
feature_extractor.add(layers.Conv1D(32, 3, padding='valid', activation='relu',
kernel_regularizer=l1_l2(l1=0.01, l2=0.01), strides=1))
# New layers for prediction outside of feature extraction model
x = feature_extractor.output
x = layers.MaxPooling1D(4)(x)
x = layers.Dropout(0.6)(x)
x = layers.BatchNormalization()(x)
x = layers.Flatten()(x)
prediction_layer = layers.Dense(len(label_encoder.classes_), activation='softmax')(x)
# New model combining both layer sets
lrval = 0.1
# opt = keras.optimizers.Adam(lr=lrval)
opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
cnn_model = keras.models.Model(inputs=feature_extractor.input, outputs=prediction_layer)
cnn_model.compile(optimizer=opt, loss='categorical_crossentropy')
for i in range(10):
cnn_model.fit(X_train, y_train, verbose=1)
# Predict only the output of the feature extraction model
X_ext = feature_extractor.predict(X_train)
dtc = tree.DecisionTreeClassifier() # criterion='entropy'
nsamples, nx, ny = X_ext.shape
X_ext = X_ext.reshape((nsamples, nx * ny))
# Train the decision tree on the extracted features
dtc.fit(X_ext, y_train)
# Evaluate decision tree
X_ext = feature_extractor.predict(X_test)
nsamples, nx, ny = X_ext.shape
X_ext = X_ext.reshape((nsamples, nx * ny))
dtc.predict(X_ext)
dtc_score = dtc.score(X_ext, y_test)
print("Deep CNN Decision tree accuracy:", dtc_score)
# """
"""
# Deep SVM-NN - 23% accuracy
model = keras.Sequential([
keras.Input(shape=(X_train.shape[1],)),
RandomFourierFeatures(output_dim=4096, scale=10.0, kernel_initializer="gaussian"),
layers.Dense(units=len(label_encoder.classes_)),
])
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.hinge,
metrics=[keras.metrics.CategoricalAccuracy(name="acc")],
)
model.fit(X_train, y_train, epochs=100, batch_size=32, validation_data=(X_test, y_test), verbose=1)
"""
"""
# Deep ANN Decision Tree - 53% accuracy
model = Sequential()
model.add(layers.Dense(128, activation='relu', input_shape=(X_train.shape[1],)))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(len(label_encoder.classes_), activation='softmax'))
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# model.fit(X_train, y_train, epochs=100, batch_size=32, validation_data=(X_test, y_test), verbose=1)
model.fit(X_train, y_train, epochs=10000)
score, acc = model.evaluate(X_test, y_test, verbose=1) # ~26-35% accuracy
feature_vectors_model = keras.models.Model(model.input, model.get_layer('dense_3').output)
X_ext = feature_vectors_model.predict(X_train)
dtc = tree.DecisionTreeClassifier()
dtc.fit(X_ext, y_train)
X_ext = feature_vectors_model.predict(X_test)
dtc.predict(X_ext)
dtc_score = dtc.score(X_ext, y_test)
print("Deep ANN Decision Tree accuracy:", dtc_score)
"""
"""
# Deep Jointly-Informed Neural Network (DJINN) - 45% accuracy
modelname = "class_djinn_test"
ntrees = 1 # number of trees = number of neural nets in ensemble
maxdepth = 18 # 4 or 20-25; max depth of tree -- optimize this for each data set
dropout_keep = 1.0 # dropout typically set to 1 for non-Bayesian models
model = djinn.DJINN_Classifier(ntrees, maxdepth, dropout_keep)
optimal = model.get_hyperparameters(X1_train, y_train, random_state=1)
batchsize = optimal['batch_size']
learnrate = optimal['learn_rate']
epochs = optimal['epochs']
model.train(X1_train, int_y_train, epochs=epochs, learn_rate=learnrate, batch_size=batchsize,
display_step=1, save_files=True, file_name=modelname,
save_model=True, model_name=modelname, random_state=1)
m = model.predict(X1_test)
acc = accuracy_score(int_y_test, m.flatten())
print('DJINN Accuracy: ', acc)
model.close_model()
"""
"""
# XGBoosted Neural Network - 24% accuracy
model = XBNETClassifier(X1_train, int_y_train, num_layers=2)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
m, acc, lo, val_ac, val_lo = run_XBNET(X1_train, X1_test, int_y_train, int_y_test, model,
criterion, optimizer, batch_size=32, epochs=100)
print(predict(m, X1_test))
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.plot(acc, label='XBNET Training Accuracy')
plt.plot(val_ac, label='XBNET Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(lo, label='XBNET Training Loss')
plt.plot(val_lo, label='XBNET Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
"""
"""
# TreeGrad Deep Neural Decision Forest - 83% accuracy
model = TGDClassifier(num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,
autograd_config={'refit_splits': True})
model.fit(X1_train, int_y_train)
acc = accuracy_score(int_y_test, model.predict(X1_test))
print('TreeGrad Deep Neural Decision Forest accuracy: ', acc)
predictions = model.predict(X1_test)
# predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = pd.DataFrame({'Predicted Values': predictions})
# actual = y_test.argmax(axis=1)
actual = int_y_test.astype(int).flatten()
actual = (label_encoder.inverse_transform(actual))
actual = pd.DataFrame({'Actual Values': actual})
cm = confusion_matrix(actual, predictions)
plt.figure(figsize=(12, 10))
cm = pd.DataFrame(cm, index=[i for i in label_encoder.classes_], columns=[i for i in label_encoder.classes_])
ax = sns.heatmap(cm, linecolor='white', cmap='Blues', linewidth=1, annot=True, fmt='')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.title('Confusion Matrix', size=20)
plt.xlabel('Predicted Labels', size=14)
plt.ylabel('Actual Labels', size=14)
plt.savefig('TreeGrad_Confusion_Matrix.png')
plt.show()
clf_report = classification_report(actual, predictions, output_dict=True,
target_names=[i for i in label_encoder.classes_])
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True, cmap='viridis')
plt.title('Classification Report', size=20)
plt.savefig('TreeGrad_Classification_Report.png')
plt.show()
with open('treegrad_model_save.pkl', 'wb') as f:
pickle.dump(model, f)
with open('treegrad_model_save.pkl', 'rb') as f:
model2 = pickle.load(f)
acc = accuracy_score(int_y_test, model2.predict(X1_test))
print('TreeGrad Deep Neural Decision Forest accuracy from save: ', acc)
"""
# """
model = formnn_cnn(X_train.shape[1], filters=32, lrval=0.003, numclasses=len(label_encoder.classes_),
kernelsize=10, l1reg=0.000001, l2reg=0.000001, dropout=0.6)
model.summary()
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_CNN_Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_CNN_Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
history_loss = []
history_val_loss = []
history_accuracy = []
history_val_accuracy = []
num_epochs = 0
"""
# Try predict
model.load_weights('best_form_model_50p.hdf5')
result = model.predict(X_test)
percent_correct = 0
pred_table = pd.DataFrame(columns=["Piece", "Predicted", "Actual"])
X_test_names = np.array(X_test_names)
for i in range(len(result)):
resultlbl = label_encoder.inverse_transform([np.argmax(result[i, :])])
actuallbl = label_encoder.inverse_transform([np.argmax(y_test[i, :])])
pred_table.loc[i] = ([X_test_names[i][2], resultlbl, actuallbl])
percent_correct += 1 if resultlbl == actuallbl else 0
print(pred_table.to_string(index=False))
print("Accuracy: " + str(float(percent_correct/len(result))*100) + "%")
return
"""
# model.load_weights('best_form_model_44p.hdf5')
model.load_weights('best_form_new_model40p.hdf5')
# while True:
for i in range(0, 3000):
# early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=5, mode="auto")
checkpoint = ModelCheckpoint("best_form_new_model.hdf5", monitor='val_accuracy', verbose=0,
save_best_only=False, mode='max', save_freq='epoch', save_weights_only=True)
model_history = model.fit(X_train, y_train, batch_size=32, epochs=1, validation_data=(X_test, y_test),
callbacks=[checkpoint]) # , early_stopping epochs=2000 loss hits 0.7
history_loss.append(model_history.history['loss'])
history_val_loss.append(model_history.history['val_loss'])
history_accuracy.append(model_history.history['accuracy'])
history_val_accuracy.append(model_history.history['val_accuracy'])
num_epochs += 1
print("Epochs completed:", num_epochs)
print("\nEvaluating...")
score = model.evaluate(X_test, y_test, verbose=1)
print("Evaluation complete!\n__________Score__________")
print(f"Loss: {score[0]}\tAccuracy: {score[1]}")
feature_vectors_model = keras.models.Model(model.input, model.get_layer('dense').output)
X_ext = feature_vectors_model.predict(X_train)
dtc = tree.DecisionTreeClassifier()
"""
# More trees performs worst, rfc0 28%, everything else 12-15%
rfc0 = RandomForestClassifier(n_estimators=1)
rfc1 = RandomForestClassifier(n_estimators=10)
rfc2 = RandomForestClassifier(n_estimators=100)
rfc3 = RandomForestClassifier(n_estimators=1000)
rfc4 = RandomForestClassifier(n_estimators=int(np.sqrt(X_train.shape[1])))
rfc5 = RandomForestClassifier(n_estimators=int(X_train.shape[1]/2))
"""
dtc.fit(X_ext, y_train)
X_ext = feature_vectors_model.predict(X_test)
dtc.predict(X_ext)
dtc_score = dtc.score(X_ext, y_test)
print("Deep CNN Decision Tree 2 accuracy:", dtc_score) # ^ 26%, 29%
# if score[1] >= 0.51:
# region EvaluationGraphs
plt.plot(history_loss) # plt.plot(model_history.history['loss'])
plt.plot(history_val_loss) # plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_Model_Loss.png')
plt.show()
plt.plot(history_accuracy) # plt.plot(model_history.history['accuracy'])
plt.plot(history_val_accuracy) # plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_Model_Accuracy.png')
plt.show()
# pd.DataFrame(model_history.history).plot()
# plt.show()
predictions = model.predict(X_test, verbose=1)
predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = pd.DataFrame({'Predicted Values': predictions})
actual = y_test.argmax(axis=1)
actual = actual.astype(int).flatten()
actual = (label_encoder.inverse_transform(actual))
actual = pd.DataFrame({'Actual Values': actual})
cm = confusion_matrix(actual, predictions)
plt.figure(figsize=(12, 10))
cm = pd.DataFrame(cm, index=[i for i in label_encoder.classes_], columns=[i for i in label_encoder.classes_])
ax = sns.heatmap(cm, linecolor='white', cmap='Blues', linewidth=1, annot=True, fmt='')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.title('Confusion Matrix', size=20)
plt.xlabel('Predicted Labels', size=14)
plt.ylabel('Actual Labels', size=14)
plt.savefig('Initial_Model_Confusion_Matrix.png')
plt.show()
clf_report = classification_report(actual, predictions, output_dict=True,
target_names=[i for i in label_encoder.classes_])
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True, cmap='viridis')
plt.title('Classification Report', size=20)
plt.savefig('Initial_Model_Classification_Report.png')
plt.show()
# break
# elif num_epochs >= 50:
# model.load_weights('best_form_model_44p.hdf5')
# num_epochs = 0
# continue
# endregion
# """
pass
def old_preparePredictionData(filepath, savetoexcel=False):
print("Preparing MLS")
mls = dus.util_main_helper(feature="mls", filepath=filepath, predict=True)
print("Preparing SSLM-MFCC-COS")
sslm_mfcc_cos = dus.util_main_helper(feature="mfcc", filepath=filepath, mode="cos", predict=True)
print("Preparing SSLM-MFCC-EUC")
sslm_mfcc_euc = dus.util_main_helper(feature="mfcc", filepath=filepath, mode="euc", predict=True)
print("Preparing SSLM-CRM-COS")
sslm_crm_cos = dus.util_main_helper(feature="chroma", filepath=filepath, mode="cos", predict=True)
print("Preparing SSLM-CRM-EUC")
sslm_crm_euc = dus.util_main_helper(feature="chroma", filepath=filepath, mode="euc", predict=True)
midimages = [mls, sslm_mfcc_cos, sslm_mfcc_euc, sslm_crm_cos, sslm_crm_euc]
cur_data = []
for image in midimages:
if image.ndim == 1:
raise ValueError("Erroneous Image Shape:", image.shape, image.ndim)
else:
image1 = np.mean(image, axis=0)
image2 = np.var(image, axis=0)
image = np.array([image1, image2])
cur_data.append(image)
print("Preparing audio feature data")
dfmid = dus.get_midi_dataframe(building_df=True)
dfmid = dus.get_audio_features(dfmid, 0, filepath, building_df=True)
dfmid = dfmid.fillna(0)
dfmid = np.array(dfmid)
sngdur = 0
with audioread.audio_open(filepath) as f:
sngdur += f.duration
np.set_string_function(
lambda x: repr(x).replace('(', '').replace(')', '').replace('array', '').replace(" ", ' '), repr=False)
np.set_printoptions(threshold=inf)
print("Building feature table")
df = get_column_dataframe()
c_flname = os.path.basename(filepath.split('/')[-1].split('.')[0])
c_sngdur = sngdur
c_slmmls = cur_data[0]
c_scmcos = cur_data[1]
c_scmeuc = cur_data[2]
c_smfcos = cur_data[3]
c_smfeuc = cur_data[4]
c_midinf = dfmid[0]
df.loc[0] = ["TBD", "TBD", c_flname, c_sngdur, c_slmmls[0], c_slmmls[1], c_scmcos[0], c_scmcos[1],
c_scmeuc[0], c_scmeuc[1], c_smfcos[0], c_smfcos[1], c_smfeuc[0], c_smfeuc[1],
c_midinf[2], c_midinf[3], c_midinf[4], c_midinf[5], c_midinf[6], c_midinf[7],
c_midinf[8], c_midinf[9], c_midinf[10], c_midinf[11], c_midinf[12], c_midinf[13],
c_midinf[14], c_midinf[15], c_midinf[0], c_midinf[1], c_midinf[16], c_midinf[17],
c_midinf[18], c_midinf[19], c_midinf[20], c_midinf[21], c_midinf[22], c_midinf[23],
c_midinf[24], c_midinf[25], c_midinf[26], c_midinf[27], c_midinf[28], c_midinf[29], "TBD"]
for col in df.columns:
df[col] = df[col].apply(lambda x: str(x)
.replace(", dtype=float32", "").replace("],", "]")
.replace("dtype=float32", "").replace("...,", ""))
if savetoexcel:
df.to_excel(os.path.join(MASTER_DIR, c_flname + '.xlsx'), index=False)
return df
def old_predictForm():
midpath = input("Enter path to folder or audio file: ")
df = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset.xlsx')) # 15,330
df = pd.DataFrame(df.loc[[0, 153]]).reset_index()
df2 = pd.DataFrame()
if not os.path.exists(midpath):
raise FileNotFoundError("Path not found or does not exist.")
else:
if os.path.isfile(midpath):
# df2 = pd.read_excel(os.path.join(MASTER_DIR, 'brahms_opus117_1.xlsx'))
df2 = old_preparePredictionData(midpath, savetoexcel=False)
elif os.path.isdir(midpath):
if midpath[-1] != "\\" or midpath[-1] != "/":
if "\\" in midpath:
midpath = midpath + "\\"
else:
midpath = midpath + "/"
cnt = 0
audio_extensions = ["3gp", "aa", "aac", "aax", "act", "aiff", "alac", "amr", "ape", "au", "awb", "dct",
"dss", "dvf", "flac", "gsm", "iklax", "ivs", "m4a", "m4b", "m4p", "mmf", "mp3", "mpc",
"msv", "nmf", "ogg", "oga", "mogg", "opus", "ra", "rm", "raw", "rf64", "sln", "tta",
"voc", "vox", "wav", "wma", "wv", "webm", "8svx", "cda", "mid", "midi", "MID" "mp4"]
for (mid_dirpath, mid_dirnames, mid_filenames) in os.walk(midpath):
for f in mid_filenames:
if f.endswith(tuple(audio_extensions)):
print("Reading file #" + str(cnt + 1))
mid_path = mid_dirpath + f
df2t = old_preparePredictionData(mid_path, savetoexcel=False)
df2 = pd.concat([df2, df2t], ignore_index=True).reset_index(drop=True)
cnt += 1
else:
raise FileNotFoundError("Path resulted in error.")
# Reshape test data to match training set
np.set_string_function(
lambda x: repr(x).replace('(', '').replace(')', '').replace('array', '').replace(" ", ' '), repr=False)
np.set_printoptions(threshold=inf)
for i in range(df2.shape[0]):
for col_name, data in df2.items():
if "[" in str(data[i]) and "]" in str(data[i]):
compdata = df.iloc[1][col_name]
if "[" in compdata and "]" in compdata:
if 'dtype=complex64' in compdata or 'dtype=complex64' in str(data[i]):
continue # Ignore since complex values aren't used in model
arr_1 = np.array(literal_eval(compdata))
# print("Evaluating:", str(data[i]))
arr_2 = np.array(literal_eval(str(data[i]).strip()))
arr_2 = np.resize(arr_2, arr_1.shape)
df2.at[i, col_name] = arr_2
# df = df2
df = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset.xlsx')) # 15,330
train_rows = df.shape[0]
df = pd.concat([df, df2], ignore_index=True).reset_index(drop=True)
names = df[['piece_name', 'composer', 'filename']]
y = df['formtype']
df.drop(columns=['spectral_bandwidth_var', 'spectral_centroid_var', 'spectral_flatness_var', 'spectral_rolloff_var',
'zero_crossing_var', 'fourier_tempo_mean', 'fourier_tempo_var'], inplace=True)
nonlist = df[['duration', 'spectral_contrast_var']]
df.drop(columns=['piece_name', 'composer', 'filename', 'duration', 'spectral_contrast_var', 'formtype'],
inplace=True)
d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()).add_prefix(col) for col in df.columns]
df = pd.concat(d, axis=1).fillna(0)
df = pd.concat([pd.concat([names, | pd.concat([nonlist, df], axis=1) | pandas.concat |
import pytz
from pandas import to_datetime
from smrf.framework.model_framework import SMRF
from smrf.tests.smrf_test_case import SMRFTestCase
class TestModelFramework(SMRFTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.smrf = SMRF(cls.config_file)
def test_start_date(self):
self.assertEqual(
self.smrf.start_date,
to_datetime(self.smrf.config['time']['start_date'], utc=True)
)
def test_end_date(self):
self.assertEqual(
self.smrf.end_date,
to_datetime(self.smrf.config['time']['end_date'], utc=True)
)
def test_time_zone(self):
self.assertEqual(self.smrf.time_zone, pytz.UTC)
def test_date_time(self):
self.assertEqual(
self.smrf.date_time[0],
to_datetime('1998-01-14 15:00:00', utc=True)
)
self.assertEqual(
self.smrf.date_time[-1],
to_datetime('1998-01-14 19:00:00', utc=True)
)
self.assertEqual(
self.smrf.date_time[0].tzname(),
str(pytz.UTC)
)
self.assertEqual(
type(self.smrf.date_time),
list
)
def test_assert_time_steps(self):
self.assertEqual(self.smrf.time_steps, 5)
class TestModelFrameworkMST(SMRFTestCase):
"""
Test timezone handling for MST.
"""
TIMEZONE = pytz.timezone('MST')
@classmethod
def setUpClass(cls):
super().setUpClass()
base_config = cls.base_config_copy()
base_config.cfg['time']['time_zone'] = 'MST'
cls.smrf = SMRF(base_config)
def test_timezone_error(self):
base_config = self.base_config
base_config.cfg['time']['time_zone'] = 'mst'
with self.assertRaises(Exception):
SMRF(base_config)
def test_start_date(self):
self.assertEqual(
self.smrf.start_date,
to_datetime(self.smrf.config['time']['start_date']).tz_localize(
self.TIMEZONE
)
)
def test_end_date(self):
self.assertEqual(
self.smrf.end_date,
to_datetime(self.smrf.config['time']['end_date']).tz_localize(
self.TIMEZONE
)
)
def test_time_zone(self):
self.assertEqual(self.smrf.time_zone, self.TIMEZONE)
def test_date_time(self):
self.assertEqual(
self.smrf.date_time[0],
to_datetime('1998-01-14 15:00:00').tz_localize(self.TIMEZONE)
)
self.assertEqual(
self.smrf.date_time[-1],
| to_datetime('1998-01-14 19:00:00') | pandas.to_datetime |
'''
Reads in literature metallicities and makes new Fe/H basis
'''
import pickle
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from astroquery.simbad import Simbad
from . import *
class LitFehRaw():
'''
Read in Fe/H values from the literature, before making any transformations
'''
def __init__(self):
# map the raw data to object
# source_dir=config_red["data_dirs"]["DIR_LIT_HIGH_RES_FEH"]):
source_dir = "/Users/bandari/Documents/git.repos/rrlfe/src/high_res_feh/"
# stand-in that consists of our program star names
self.df_our_program_stars = pd.read_csv(source_dir + "our_program_stars_names_only.csv")
# Fe/H from Layden+ 1994; this may serve as the common basis for RRabs
self.df_layden_feh = pd.read_csv(source_dir + "layden_1994_abundances.dat")
# RES: "rather low"
# Fe/H Clementini+ 1995
self.df_clementini_feh = pd.read_csv(source_dir + "clementini_1995_abundances.dat")
# Fe/H Fernley+ 1996
self.df_fernley96_feh = pd.read_csv(source_dir + "fernley_1996_abundances.dat")
# RES: 60,000, FeI & FeII, 5900-8100 A
# Fe/H from Fernley+ 1997
self.df_fernley97_feh = pd.read_csv(source_dir + "fernley_1997_abundances.dat")
# RES: 60,000, two FeII lines, 5900-8100 A
# log(eps) from Lambert+ 1996
self.df_lambert_logeps = pd.read_csv(source_dir + "lambert_1996_abundances.dat")
# RES: ~23,000, FeII + photometric models, 3600-9000 A
# Fe/H from Wallerstein and Huang 2010, arXiv 1004.2017
self.df_wallerstein_feh = pd.read_csv(source_dir + "wallerstein_huang_2010_abundances.dat")
# RES: ~30,000, FeII
# Fe/H from Chadid+ 2017 ApJ 835.2:187 (FeI and II lines)
self.df_chadid_feh = pd.read_csv(source_dir + "chadid_2017_abundances.dat")
# RES: 38000, FeI & FeII, 3400-9900 A
# Fe/H from Liu+ 2013 Res Ast Astroph 13:1307
self.df_liu_feh = pd.read_csv(source_dir + "liu_2013_abundances.dat")
# RES: ~60,000, FeI (& FeII?), 5100-6400 A
# Fe/H from Nemec+ 2013
self.df_nemec_feh = pd.read_csv(source_dir + "nemec_2013_abundances.dat")
# RES: ~65,000 or 36,000, FeI & FeII, 5150-5200 A
# Fe/H from Solano+ 1997
self.df_solano_feh = pd.read_csv(source_dir + "solano_1997_abundances.dat")
# RES: 22,000 & 19,000, strong FeI lines, 4160-4390 & 4070-4490 A
# Fe/H from Pancino+ 2015 MNRAS 447:2404
self.df_pancino_feh = pd.read_csv(source_dir + "pancino_2015_abundances.dat")
# RES: >30,000, FeI (weighted average), 4000-8500 A
# Fe/H from Sneden+ 2017
self.df_sneden_feh = pd.read_csv(source_dir + "sneden_2017_abundances.dat", delimiter="|")
# RES: ~27,000 (at 5000 A), FeI & FeII, 3400-9000 A
# Fe/H from Crestani+ 2021
self.df_crestani_feh = pd.read_csv(source_dir + "crestani_2021_abundances.dat",
delimiter=",")
# Fe/H from Kemper+ 1982; this might serve as the common basis for RRcs
self.df_kemper_feh = pd.read_csv(source_dir + "kemper_1982_abundances.dat")
# Fe/H from Govea+ 2014
## ## note: Govea+ has abundances for each phase value, and this
## ## includes NLTE phases; how to get single Fe/H?
self.df_govea_feh = pd.read_csv(source_dir + "govea_2014_abundances.dat")
def map_names(df_pass):
# find common ASAS names
import ipdb; ipdb.set_trace()
# treat each lit source individually to get single Fe/H and error
# loop over rows, parse as necessary
for row_num in range(0,len(df_pass)):
name_initial = df_pass["name"]
def matchmaker(basis_table_pass, input_table_pass):
'''
Find what stars are common to two input tables, return arrays of FeHs, fit best-fit line
INPUTS:
input_table: table I'm interested in checking for overlapping stars
(pandas dataframe with col ["name_match"]: star name; col ["feh_single"]: Fe/H)
basis_table: table with the names for which I am looking for repeats in the other table
(pandas dataframe with col ["name_match"]: star name; col ["feh_single"]: Fe/H)
OUTPUTS:
pandas dataframe with
1. overlapping star names
2. FeHs from the input_table
3. FeHs from the basis_table
4. residuals in FeH: FeH_input - FeH_basis
5. string indicating the high-res dataset being matched
'''
basis_table = basis_table_pass.copy(deep=True)
input_table = input_table_pass.copy(deep=True)
# make all strings lowercase, to make case insensitive match
basis_table['name_match'] = basis_table['name_match'].str.lower()
input_table['name_match'] = input_table['name_match'].str.lower()
merged_table = basis_table.merge(input_table, how="inner", on="name_match", suffixes=("_basis", "_lit"))
return merged_table
def find_offsets(match_pass):
'''
Finds the offsets that need to be added in to overlap datasets as per Crestani+ 2017 Fig. 6
INPUTS:
match_pass: the dataframe holding matched Fe/Hs
OUTPUTS:
y_offset_2_lit: the constant offset that needs to be added back in
'''
# find offset between (lit vs. Layden) residuals and Chadid+ 2017 at Fe/H=-1.25 (see their Fig. 6)
chadid_y_125 = -0.10583621694962 # from Chadid line at Fe/H=-1.25
feh_basis_loc = -1.25 # corresponding x- value (Fe/H in the basis dataset)
m_lit, b_lit = np.polyfit(match_pass["feh_single_basis"],np.subtract(match_pass["feh_single_lit"],match_pass["feh_single_basis"]),1)
# find offset between residuals and zero
y_offset_lit = m_lit*feh_basis_loc + b_lit
# offset between the offsets ("offset_2"); this is what needs to be added in
# to bring it in line with lit
y_offset_2_lit = chadid_y_125 - y_offset_lit
return y_offset_2_lit
def main():
# read in raw
raws = LitFehRaw()
# make transformations to get single Fe/H value
# convert outputs in astropy.Table format to DataFrames
df_our_stars = pd.DataFrame(raws.df_our_program_stars)
df_govea = pd.DataFrame(raws.df_govea_feh) #avg
df_layden = pd.DataFrame(raws.df_layden_feh) # simple
df_clementini = pd.DataFrame(raws.df_clementini_feh) # logeps, then avg
df_fernley96 = pd.DataFrame(raws.df_fernley96_feh) # simple, add error
df_fernley97 = pd.DataFrame(raws.df_fernley97_feh) # simple
df_lambert = pd.DataFrame(raws.df_lambert_logeps) # logeps, then avg
df_wallerstein = pd.DataFrame(raws.df_wallerstein_feh) # simple, add error
df_chadid = pd.DataFrame(raws.df_chadid_feh) # avg
df_liu = pd.DataFrame(raws.df_liu_feh) # simple, add error (at different phases)
df_nemec = pd.DataFrame(raws.df_nemec_feh) # simple
df_solano = pd.DataFrame(raws.df_solano_feh) # simple, add error
df_pancino = pd.DataFrame(raws.df_pancino_feh) # simple
df_sneden = pd.DataFrame(raws.df_sneden_feh) # avg (diff phases); note sigma is one of last rows
df_crestani = pd.DataFrame(raws.df_crestani_feh) # avg
df_kemper = | pd.DataFrame(raws.df_kemper_feh) | pandas.DataFrame |
import pandas as pd
from django.core.files import File
from fixtures_functions import *
from main.functions import extraer_cuentas, crear_cuentas, valida_cuenta
class TestExtraerCuenta:
def test_loading_good_file(self):
f = open('main/tests/data/cuentas.xlsx', 'rb')
df = extraer_cuentas(File(f))
assert isinstance(df, pd.DataFrame)
for col in ['num', 'nombre', 'etiqueta']:
assert col in df.columns
assert len(df) == 13
for num in ['100', '101', '1101', '1110', '1200', '1201', '1210', '300', '312', '324', '15', '2001', '2000']:
assert num in list(df.num)
# test on an excel with wrong format, and a non-excel file
@pytest.mark.parametrize('filename', ['empty_file.xlsx', 'logo.svg'])
def test_loading_bad_file(self, filename):
f = open('main/tests/data/'+filename, 'rb')
df = extraer_cuentas(File(f))
assert isinstance(df, pd.DataFrame)
for col in ['num', 'nombre', 'etiqueta']:
assert col in df.columns
assert len(df) == 0
def test_file_with_errors(self):
f = open('main/tests/data/cuentas_with_errors.xlsx', 'rb')
df = extraer_cuentas(File(f))
for col in ['num', 'nombre', 'etiqueta']:
assert col in df.columns
assert len(df) == 13
class TestCrearCuentas:
@pytest.mark.parametrize('sobreescribir', [False, True])
def test_crear_cuentas_good_data(self, populate_database_cuentas, sobreescribir):
populate_database_cuentas
# first two accounts exist alread, so they should not be created
cuentas_dict = {
'num': [ f'10{n}' for n in range(6) ],
'nombre': [ f'Cuenta{n+1}' for n in range(6) ],
'etiqueta': ['balance']*6
}
cuentas_df = pd.DataFrame(cuentas_dict)
creadas, errors = crear_cuentas(cuentas_df, sobreescribir)
creadas_nombres = [ c.nombre for c in creadas ]
creadas_errors = [ c.nombre for c in errors ]
# check cuentas exist in returned list
for nombre in ['Cuenta1', 'Cuenta2']:
if sobreescribir:
assert nombre in creadas_nombres
assert nombre not in creadas_errors
else:
assert nombre not in creadas_nombres
assert nombre in creadas_errors
assert errors[0].error == 'Cuenta ya existente'
assert errors[1].error == 'Cuenta ya existente'
for nombre in ['Cuenta3', 'Cuenta4', 'Cuenta5', 'Cuenta6']:
assert nombre in creadas_nombres
# check cuentas exist in database
cuentas_db = Cuenta.objects.all()
for cuenta in creadas:
assert cuenta in cuentas_db
def test_crear_cuentas_wrong_etiqueta(self, populate_database_etiquetas):
populate_database_etiquetas
cuentas_dict = {
'num': ['100', '101', '102', '103', '104', '105'],
'nombre': ['Caja1', 'Caja2', 'Caja3', 'Caja4', 'Caja5', 'Caja6'],
'etiqueta': ['balance']*3 + ['wrong']*3
}
cuentas_df = pd.DataFrame(cuentas_dict)
creadas, _ = crear_cuentas(cuentas_df, False)
lista_etiquetas = list()
for cuenta in creadas:
etiquetas = cuenta.etiqueta.all()
for et in etiquetas:
lista_etiquetas.append(et.id)
assert 'balance' in lista_etiquetas
assert 'wrong' not in lista_etiquetas
def test_crear_cuentas_with_errors(self, populate_database_etiquetas):
populate_database_etiquetas
cuentas_dict = {
'num': ['100', '100', '102', '103', '104', '105'],
'nombre': ['Caja1', 'Caja2', '', 'Caja4', 'Caja5', 'Caja6'],
'etiqueta': ['balance']*5 + ['']
}
cuentas_df = pd.DataFrame(cuentas_dict)
creadas, error = crear_cuentas(cuentas_df, False)
assert len(creadas) == 5
assert len(error) == 1
assert error[0].num == '102'
cuentas_db = Cuenta.objects.filter(num='102')
assert len(cuentas_db) == 0
class TestValidaCuenta:
@pytest.mark.parametrize('num, nombre, msg', [
('100', 'Caja1', 'ok'),
('101', '', 'Cuenta en blanco no permitida'),
])
def test_valida_cuenta(self, num, nombre, msg):
cuentas_dict = {
'num': num,
'nombre': nombre,
'etiqueta': ['balance'],
}
cuentas_df = | pd.DataFrame(cuentas_dict) | pandas.DataFrame |
import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
from typing import List, Tuple
SPACES = ' ' * 10
def load_page(df: pd.DataFrame,
player_list: List[str]) -> None:
""" In this section you can compare two players against each other based on their respective performances.
Please note that the Head to Head section is meant for games that were played with 2 players against each other.
Sections:
* The Winner
* Stats per Game
Parameters:
-----------
df : pandas.core.frame.DataFrame
The data to be used for the analyses of played board game matches.
player_list : list of str
List of players that participated in the board games
"""
player_one, player_two = prepare_layout(player_list)
two_player_matches, matches_df = check_if_two_player_matches_exist(df, player_one, player_two)
if two_player_matches:
sidebar_frequency_graph(matches_df)
extract_winner(df, player_one, player_two)
stats_per_game(matches_df, player_one, player_two)
else:
st.header("🏳️ Error")
st.write("No two player matches were played with **{}** and **{}**. "
"Please select different players".format(player_one, player_two))
def prepare_layout(player_list: List[str]) -> Tuple[str, str]:
""" Create the layout for the page including general selection options
Parameters:
-----------
player_list : list of str
List of players that participated in the board games
"""
# Choose players
st.title("🎲 Head to Head")
st.write("In this section you can compare two players against each other based on their"
"respective performances. Please note that the *Head to Head* section is meant for "
"games that were played with 2 players against each other. ")
st.sidebar.subheader("Please select two players")
player_one = st.sidebar.selectbox("Select player one", player_list, index=0)
player_two = st.sidebar.selectbox("Select player two", player_list, index=1)
return player_one, player_two
def check_if_two_player_matches_exist(df: pd.DataFrame,
player_one: str,
player_two: str) -> Tuple[bool, pd.DataFrame]:
""" Checks if player_one and player_two have played against each other in two player games
Parameters:
-----------
df : pandas.core.frame.DataFrame
The data to be used for the analyses of played board game matches.
player_one : str
One of the players in the game
player_two : str
One of the players in the game
Returns:
--------
boolean
True if there are matches played between player_one and player_two
False otherwise
matches_df: pandas.core.frame.DataFrame
Data with only the two players selected and where two player games have been played
"""
matches_df = df.loc[(df[player_one + "_played"] == 1) &
(df[player_two + "_played"] == 1) &
(df["Nr_players"] == 2), :]
if (len(matches_df) == 0) | (player_one == player_two):
return False, matches_df
else:
return True, matches_df
def sidebar_frequency_graph(matches_df: pd.DataFrame) -> None:
""" Extracts and visualizes the frequency of games
Parameters:
-----------
matches_df: pandas.core.frame.DataFrame
Data with only the two players selected and where two player games have been played
"""
to_plot = matches_df.sort_values("Date").set_index("Date").resample("3D").count().reset_index()
chart = alt.Chart(to_plot).mark_area(
color='goldenrod',
opacity=1
).encode(
x='Date',
y=alt.Y('Players', title='Number of Games'),
).properties(background='transparent')
if len(to_plot) > 0:
st.sidebar.altair_chart(chart)
def extract_winner(df: pd.DataFrame,
player_one: str,
player_two: str) -> None:
""" Extract the winner of the two players
Parameters:
-----------
df : pandas.core.frame.DataFrame
The data to be used for the analyses of played board game matches.
player_one : str
One of the players in the game
player_two : str
One of the players in the game
"""
# Extract common games
games = df.loc[(df[player_one + "_played"] == 1) &
(df[player_two + "_played"] == 1) &
(df["Nr_players"] == 2), :]
player_one_won = len(games[games[player_one + "_winner"] == 1])
player_two_won = len(games[games[player_two + "_winner"] == 1])
to_plot = pd.DataFrame([[player_one_won, player_one],
[player_two_won, player_two]], columns=['Results', 'Player'])
if player_one_won != player_two_won:
if player_one_won > player_two_won:
percentage = round(player_one_won / len(games) * 100, 2)
winner = player_one
else:
percentage = round(player_two_won / len(games) * 100, 2)
winner = player_two
st.header("**♟** The Winner - {}**♟**".format(winner))
st.write("The winner is decided simply by the amount of games won one by either player.")
st.write("{}🔹 Out of {} games, {} games were won by **{}** "
"whereas {} games were won by **{}**".format(SPACES, len(games), player_one_won, player_one,
player_two_won, player_two))
st.write("{}🔹 In other words, {}% of games were won by **{}** who is the clear winner!".format(SPACES,
percentage,
winner))
else:
winner = player_one + " and " + player_two
st.header("**♟** The Winners - {}**♟**".format(winner))
st.write("The winner is decided simply by the amount of games won one by either player.")
st.write("{}🔹 Out of {} games, {} games were won by **{}** "
"whereas {} games were won by **{}**".format(SPACES, len(games), player_one_won, player_one,
player_two_won, player_two))
st.write("{}🔹 In other words, it is a **tie**!".format(SPACES))
bars = alt.Chart(to_plot).mark_bar().encode(
x='Results:Q',
y='Player:O',
color='Player:O'
)
text = bars.mark_text(
align='left',
baseline='middle',
dx=3 # Nudges text to right so it doesn't appear on top of the bar
).encode(
text='Results:Q'
)
st.write(bars + text)
def stats_per_game(matches_df: pd.DataFrame,
player_one: str,
player_two: str) -> None:
""" Show statistics per game
Parameters:
-----------
matches_df : pandas.core.frame.DataFrame
Data with only the two players selected and where two player games have been played
player_one : str
One of the players in the game
player_two : str
One of the players in the game
"""
st.header("**♟** Stats per Game **♟**")
st.write("Please select a game below to see the statistics for both players.")
game_selection_df = game_selection(matches_df)
scores_over_time(player_one, player_two, game_selection_df)
general_stats_game(player_one, player_two, game_selection_df)
def game_selection(matches_df: pd.DataFrame) -> pd.DataFrame:
""" Select game and filter data based on the game
Parameters:
-----------
matches_df: pandas.core.frame.DataFrame
Data with only the two players selected and where two player games have been played
Returns:
--------
game_selection_df : pandas.core.frame.DataFrame
Filtered data based on the selected game
"""
games = list(matches_df.Game.unique())
games.sort()
game = st.selectbox("Select a game", games)
game_selection_df = matches_df.loc[(matches_df.Game == game), :]
return game_selection_df
def scores_over_time(player_one: str,
player_two: str,
game_selection_df: pd.DataFrame) -> None:
""" Visualize scores over time for a specific game for two players
Parameters:
-----------
player_one : str
One of the players in the game
player_two : str
One of the players in the game
game_selection_df : pandas.core.frame.DataFrame
Filtered data based on the selected game
"""
player_one_vals = list(game_selection_df[player_one + '_score'].values)
player_two_vals = list(game_selection_df[player_two + '_score'].values)
vals = player_one_vals + player_two_vals
player_indices = [player_one if i < len(player_one_vals) else player_two for i, _ in enumerate(vals)]
indices = list(np.arange(len(vals) / 2))
indices = indices + indices
to_plot = pd.DataFrame(np.array([indices, vals, player_indices]).T, columns=['Indices', 'Scores', 'Players'])
to_plot.Indices = to_plot.Indices.astype(float)
to_plot.Scores = to_plot.Scores.astype(float)
st.write("Here you can see how games have progressed since the beginning. There is purposefully"
" no time displayed as that might clutter the visualization. All scores on the left hand side"
" were the first matches and scores on the right are the last.")
colors = ['#2196F3', '#FF5722']
chart = alt.Chart(to_plot,
title="Scores over time").mark_line().encode(
alt.X('Indices', axis=None, scale=alt.Scale(domain=(0, max(to_plot.Indices)))),
y='Scores:Q',
color=alt.Color('Players', scale=alt.Scale(range=colors))
).configure_axis(
grid=False
).configure_view(
strokeOpacity=0
)
st.altair_chart(chart)
def general_stats_game(player_one: str,
player_two: str,
game_selection_df: pd.DataFrame) -> None:
""" Show general statistics of a specific game for two players
Parameters:
-----------
player_one : str
One of the players in the game
player_two : str
One of the players in the game
game_selection_df : pandas.core.frame.DataFrame
Filtered data based on the selected game
"""
result = | pd.DataFrame(columns=['Player', 'Avg', 'Min', 'Max', 'Number']) | pandas.DataFrame |
from __future__ import print_function, division, absolute_import
import logging
import os
import warnings
from collections import defaultdict
import h5py
import numpy as np
import pandas as pd
from kglib.cross_correlation import Analyze_CCF
from .HelperFunctions import roundodd, mad, integral
from kglib.cross_correlation import CCF_Systematics
from kglib import fitters
home = os.environ['HOME']
def create_group(current, name, attrs, overwrite):
"""
Create and HDF5 group from the current level.
Parameters:
===========
- current: h5py Group or File object
The current level
- name: string
The name of the new group
- attrs: dictionary
attributes to assign to the group
- overwrite: boolean
If True and the name is already a group in the current level,
overwrite it. Otherwise, just return that group.
Returns:
An h5py Group object.
"""
if name in current:
if not overwrite:
return current[name]
# Update the attributes
for k in attrs:
current[name].attrs[k] = attrs[k]
return current[name]
group = current.create_group(name)
for k in attrs:
group.attrs[k] = attrs[k]
return group
def create_dataset(group, name, attrs, data, overwrite, **kwargs):
"""
Create and HDF5 dataset from the current level.
Parameters:
===========
- group: h5py Group or File object
The current level. The dataset will be made in
this "folder".
- name: string
The name of the new dataset
- attrs: dictionary
attributes to assign to the dataset
- overwrite: boolean
If True and the name is already a dataset in the current level,
overwrite it. Otherwise, just return that dataset.
- **kwargs: Any additional keyword arguments to pass to group.create_dataset.
Returns:
An h5py dataset object.
"""
if name in group:
new_ds = group[name]
if not overwrite:
return new_ds
new_ds.resize(data.shape)
new_ds[:] = data
# Update the attributes
for k in attrs:
new_ds.attrs[k] = attrs[k]
return new_ds
new_ds = group.create_dataset(data=data, name=name, **kwargs)
for k in attrs:
new_ds.attrs[k] = attrs[k]
return new_ds
def combine_hdf5_synthetic(file_list, output_file, overwrite=True):
"""
Combine several hdf5 files into one.
The structure is assumed to be that of the synthetic binary search
Parameters:
===========
- file_list: iterable
A list containing the filenames of the hdf5
files to combine. Each file must have the very
specific structure of a synthetic binary star
search.
- output_file: string
The name of the file to output with the combined data
- overwrite: boolean
If True, it overwrites any duplicated datasets in the output.
The last hdf5 file in the file_list will not be overwritten.
"""
if output_file in file_list:
raise ValueError('Output file cannot be one of the input files!')
with h5py.File(output_file, 'w') as output:
# Loop over the files in file_list
for fname in file_list:
with h5py.File(fname, 'r') as f:
logging.debug('\n\nFile {}'.format(fname))
# Primary star
for p_name, primary in f.iteritems():
logging.debug('Primary {}'.format(p_name))
p = create_group(output, p_name, primary.attrs, overwrite)
# Secondary star
for s_name, secondary in primary.iteritems():
if 'bright' in s_name:
logging.warn('Ignoring entry {}!'.format(s_name))
continue
logging.debug('\tSecondary {}'.format(s_name))
s = create_group(p, s_name, secondary.attrs, overwrite)
# Add mode
for mode, mode_group in secondary.iteritems():
m = create_group(s, mode, mode_group.attrs, overwrite)
# Loop over datasets
for ds_name, ds in mode_group.iteritems():
# Make a more informative dataset name
ds_name = 'T{}_logg{}_metal{:+.1f}_vsini{}'.format(ds.attrs['T'],
ds.attrs['logg'],
ds.attrs['[Fe/H]'],
ds.attrs['vsini'])
# Dataset attributes should not be big things like arrays.
if 'velocity' in ds.attrs:
data = np.array((ds.attrs['velocity'], ds.value))
else:
data = ds.value
# Make attributes dictionary
attrs = {k: ds.attrs[k] for k in ds.attrs}
new_ds = create_dataset(m, ds_name, attrs, data, overwrite,
chunks=True, maxshape=(2, None))
f.flush()
def combine_hdf5_real(file_list, output_file, overwrite=True):
"""
Combine several hdf5 files into one.
The structure is assumed to be that of the normal binary search
Parameters:
===========
- file_list: iterable
A list containing the filenames of the hdf5
files to combine. Each file must have the very
specific structure of my binary star
search.
- output_file: string
The name of the file to output with the combined data
- overwrite: boolean
If True, it overwrites any duplicated datasets in the output.
The last hdf5 file in the file_list will not be overwritten.
"""
if output_file in file_list:
raise ValueError('Output file cannot be one of the input files!')
with h5py.File(output_file, 'w') as output:
# Loop over the files in file_list
for fname in file_list:
with h5py.File(fname, 'r') as f:
logging.debug('\n\nFile {}'.format(fname))
# Star name
for star_name, star in f.iteritems():
logging.debug('Star {}'.format(star_name))
s = create_group(output, star_name, star.attrs, overwrite)
# Date
for date_str, date in star.iteritems():
logging.debug('\tDate {}'.format(date_str))
d = create_group(s, date_str, date.attrs, overwrite)
# Loop over datasets
for ds_name, ds in date.iteritems():
# Make a more informative dataset name
ds_name = 'T{}_logg{}_metal{:+.1f}_vsini{}_mode-{}'.format(ds.attrs['T'],
ds.attrs['logg'],
ds.attrs['[Fe/H]'],
ds.attrs['vsini'],
ds.attrs['addmode'])
# Dataset attributes should not be big things like arrays.
if 'velocity' in ds.attrs:
data = np.array((ds.attrs['velocity'], ds.value))
else:
data = ds.value
# Make attributes dictionary
attrs = {k: ds.attrs[k] for k in ds.attrs}
new_ds = create_dataset(d, ds_name, attrs, data, overwrite,
chunks=True, maxshape=(2, None))
f.flush()
def combine_hdf5_sensitivity(file_list, output_file='tmp.fits', overwrite=True):
"""
Combine several hdf5 files into one.
The structure is assumed to be that of the sensitivity analysis
Parameters:
===========
- file_list: iterable
A list containing the filenames of the hdf5
files to combine. Each file must have the very
specific structure of my sensitivity analysis.
- output_file: string
The name of the file to output with the combined data
- overwrite: boolean
If True, it overwrites any duplicated datasets in the output.
The last hdf5 file in the file_list will not be overwritten.
"""
if output_file in file_list:
raise ValueError('Output file cannot be one of the input files!')
with h5py.File(output_file, 'w') as output:
# Loop over the files in file_list
for fname in file_list:
with h5py.File(fname, 'r') as f:
logging.debug('\n\nFile {}'.format(fname))
# Star name
for star_name, star in f.iteritems():
logging.debug('Star {}'.format(star_name))
s = create_group(output, star_name, star.attrs, overwrite)
# Date
for date_str, date in star.iteritems():
logging.debug('\tDate {}'.format(date_str))
d = create_group(s, date_str, date.attrs, overwrite)
# Temperature
for T_string, Teff in date.iteritems():
logging.debug('\t\tT = {}'.format(T_string))
T = create_group(d, T_string, Teff.attrs, overwrite)
# Loop over datasets
for ds_name, ds in Teff.iteritems():
logging.debug('\t\t\t{}'.format(ds.name))
# Make a more informative dataset name
ds_name = 'logg{}_metal{:+.1f}_vsini{}_rv{:+.0f}_mode-{}'.format(ds.attrs['logg'],
ds.attrs['[Fe/H]'],
ds.attrs['vsini'],
ds.attrs['rv'],
ds.attrs['addmode'])
# Dataset attributes should not be big things like arrays.
if 'velocity' in ds.attrs:
data = np.array((ds.attrs['velocity'], ds.value))
else:
data = ds.value
# Make attributes dictionary
attrs = {k: ds.attrs[k] for k in ds.attrs}
new_ds = create_dataset(T, ds_name, attrs, data, overwrite,
chunks=True, maxshape=(2, None))
f.flush()
class Full_CCF_Interface(object):
"""
Interface to all of my cross-correlation functions in one class!
"""
def __init__(self, cache=False, update_cache=True, **cache_kwargs):
"""
Parameters:
===========
- cache: boolean
Should we use/create a cache of the HDF5 datasets?
Speeds things up significantly, but takes more memory.
- update_cache: boolean
SHould we update the cache? Only used if cache = True
- cache_kwargs: Any additional keyword arguments to pass to
self._make_cache. Especially useful is
cache_fname, giving a CSV file with the information.
"""
# Instance variables to hold the ccf interfaces
self._ccf_files = {'TS23': '{}/School/Research/McDonaldData/Cross_correlations/CCF.hdf5'.format(home),
'HET': '{}/School/Research/HET_data/Cross_correlations/CCF.hdf5'.format(home),
'CHIRON': '{}/School/Research/CHIRON_data/Cross_correlations/CCF.hdf5'.format(home),
'IGRINS': '{}/School/Research/IGRINS_data/Cross_correlations/CCF.hdf5'.format(home)}
self._interfaces = {inst: Analyze_CCF.CCF_Interface(self._ccf_files[inst]) for inst in self._ccf_files.keys()}
# Variables for correcting measured --> actual temperatures
self._caldir = {'TS23': '{}/School/Research/McDonaldData/SyntheticData/'.format(home),
'HET': '{}/School/Research/HET_data/SyntheticData/'.format(home),
'CHIRON': '{}/School/Research/CHIRON_data/SyntheticData/'.format(home),
'IGRINS': '{}/School/Research/IGRINS_data/SyntheticData/'.format(home)}
self._fitters = {'TS23': fitters.Bayesian_LS,
'HET': fitters.Bayesian_LS,
'CHIRON': fitters.Bayesian_LS,
'IGRINS': fitters.Bayesian_LS}
self._flatchain_format = '{directory}{instrument}_{addmode}_flatchain.npy'
self._flatlnprob_format = '{directory}{instrument}_{addmode}_flatlnprob.npy'
self._uncertainty_scale = '{directory}{instrument}_{addmode}uncertainty_scalefactor.txt'
# Make a couple data caches to speed things up
self._chainCache = {}
self._predictionCache = {}
self._cache = None
if cache:
self._make_cache(update_cache=update_cache, **cache_kwargs)
return
def list_stars(self, print2screen=False):
"""
List all of the stars in all of the CCF interfaces
Parameters:
===========
- print2screen: bool
Should we print the stars and dates to screen?
Returns:
=========
- star_list: list
A list of every star in the file, sorted by name.
"""
stars = []
for inst in self._interfaces.keys():
if print2screen:
print('Stars observed with {}: \n============================\n\n'.format(inst))
stars.extend(self._interfaces[inst].list_stars(print2screen=print2screen))
return list( | pd.unique(stars) | pandas.unique |
import glob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def training_trend(ouput_file="ftraining_acc.png"):
print('loading all training trends ...')
# sns.set(style="ticks")
# output_folder = ''
output_folder = 'mgf_list_v3/'
train_log_0 = pd.read_csv('/Volumes/MSSHARE/Joonyong/DeepNovoRun/log_file_caption_2dir.tab', sep='\t')
train_log_17 = pd.read_csv('/Volumes/MSSHARE/Joonyong/DeepNovoRun_0-17/deepnovo.train.model_0-17/log_file_caption_2dir.tab', sep='\t')
train_log_57 = pd.read_csv('/Volumes/MSSHARE/Joonyong/DeepNovoRun_0-57/deepnovo.train.model_0-57/log_file_caption_2dir.tab', sep='\t')
train_log_97 = pd.read_csv('/Volumes/MSSHARE/Joonyong/DeepNovoRun_0-97/deepnovo.train.model_0-97/log_file_caption_2dir.tab', sep='\t')
train_log_157 = pd.read_csv('/Volumes/MSSHARE/Joonyong/DeepNovoRun_0-158/deepnovo.train.model_0-158/log_file_caption_2dir.tab', sep='\t')
# train_log_232 = pd.read_csv('/Volumes/MSSHARE/Joonyong/DeepNovoRun_232_27/deepnovo.train.model_232_27/log_file_caption_2dir.tab', sep='\t')
train_log_232 = pd.read_csv('/Volumes/MSSHARE/Joonyong/DeepNovoRun_235_v3_27/log_file_caption_2dir.tab', sep='\t')
train_log_232t = pd.read_csv('/Volumes/MSSHARE/Joonyong/PnnlRun3_235_v4_0001_ep_30/log_file_caption_2dir.tab', sep='\t')
# train_logs = [train_log_0, train_log_17, train_log_57, train_log_97, train_log_157, train_log_232, train_log_232t]
# train_models = ["Pre-trained", "Train-300k", "Train-1m", "Train-2m", "Train-3m", "Train-4m", "Train-4m+tuned"]
train_logs = [train_log_17, train_log_57, train_log_97, train_log_157, train_log_232, train_log_232t]
train_models = ["Train-300k", "Train-1m", "Train-2m", "Train-3m", "Train-4m", "Train-4m+tuned"]
# train_logs = [train_log_17, train_log_97, train_log_232, train_log_232t]
# train_models = ["Train-300k", "Train-2m", "Train-4m", "Train-4m+tuned"]
for i, tl in enumerate(train_logs):
tl['model'] = train_models[i]
train_logs_all = | pd.concat(train_logs) | pandas.concat |
import cloudpickle
import heapq as hq
import logging
import numpy
import multiprocessing
import os
import pickle
import pandas as pd
import py_entitymatching as mg
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching as em
import sys
import time
from array import array
from collections import namedtuple
from joblib import Parallel, delayed
from operator import attrgetter
from py_entitymatching.debugblocker.debugblocker_cython import \
debugblocker_cython, debugblocker_config_cython, debugblocker_topk_cython, debugblocker_merge_topk_cython
from py_entitymatching.utils.validation_helper import validate_object_type
logger = logging.getLogger(__name__)
SELECTED_FIELDS_UPPER_BOUND = 8
def debug_blocker(candidate_set, ltable, rtable, output_size=200,
attr_corres=None, verbose=True, n_jobs=1, n_configs=1):
"""
This function debugs the blocker output and reports a list of potential
matches that are discarded by a blocker (or a blocker sequence).
Specifically, this function takes in the two input tables for
matching and the candidate set returned by a blocker (or a blocker
sequence), and produces a list of tuple pairs which are rejected by the
blocker but with high potential of being true matches.
Args:
candidate_set (DataFrame): The candidate set generated by
applying the blocker on the ltable and rtable.
ltable,rtable (DataFrame): The input DataFrames that are used to
generate the blocker output.
output_size (int): The number of tuple pairs that will be
returned (defaults to 200).
attr_corres (list): A list of attribute correspondence tuples.
When ltable and rtable have different schemas, or the same
schema but different words describing the attributes, the
user needs to manually specify the attribute correspondence.
Each element in this list should be a tuple of strings
which are the corresponding attributes in ltable and rtable.
The default value is None, and if the user doesn't specify
this list, a built-in function for finding the
attribute correspondence list will be called. But we highly
recommend the users manually specify the attribute
correspondences, unless the schemas of ltable and rtable are
identical (defaults to None).
verbose (boolean): A flag to indicate whether the debug information
should be logged (defaults to False).
n_jobs (int): The number of parallel jobs to be used for computation
(defaults to 1). If -1 all CPUs are used. If 0 or 1,
no parallel computation is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used (where n_cpus are the total number of CPUs in the
machine).Thus, for n_jobs = -2, all CPUs but one are used.
If (n_cpus + 1 + n_jobs) is less than 1, then no parallel
computation is used (i.e., equivalent to the default).
n_configs (int): The maximum number of configs to be used for
calculating the topk list(defaults to 1). If -1, the config
number is set as the number of cpu. If -2, all configs are used.
if n_configs is less than the maximum number of generated configs,
then n_configs will be used. Otherwise, all the generated configs
will be used.
Returns:
A pandas DataFrame with 'output_size' number of rows. Each row in the
DataFrame is a tuple pair which has potential of being a true
match, but is rejected by the blocker (meaning that the tuple
pair is in the Cartesian product of ltable and rtable subtracted
by the candidate set). The fields in the returned DataFrame are
from ltable and rtable, which are useful for determining similar
tuple pairs.
Raises:
AssertionError: If `ltable`, `rtable` or `candset` is not of type
pandas DataFrame.
AssertionError: If `ltable` or `rtable` is empty (size of 0).
AssertionError: If the output `size` parameter is less than or equal
to 0.
AssertionError: If the attribute correspondence (`attr_corres`) list is
not in the correct format (a list of tuples).
AssertionError: If the attribute correspondence (`attr_corres`)
cannot be built correctly.
Examples:
>>> import py_entitymatching as em
>>> ob = em.OverlapBlocker()
>>> C = ob.block_tables(A, B, l_overlap_attr='title', r_overlap_attr='title', overlap_size=3)
>>> corres = [('ID','ssn'), ('name', 'ename'), ('address', 'location'),('zipcode', 'zipcode')]
>>> D = em.debug_blocker(C, A, B, attr_corres=corres)
>>> import py_entitymatching as em
>>> ob = em.OverlapBlocker()
>>> C = ob.block_tables(A, B, l_overlap_attr='name', r_overlap_attr='name', overlap_size=3)
>>> D = em.debug_blocker(C, A, B, output_size=150)
"""
# Check input types.
_validate_types(ltable, rtable, candidate_set, output_size,
attr_corres, verbose)
# Basic checks.
# Check table size.
if len(ltable) == 0:
raise AssertionError('Error: ltable is empty!')
if len(rtable) == 0:
raise AssertionError('Error: rtable is empty!')
# Check the value of output size.
if output_size <= 0:
raise AssertionError('The input parameter: \'pred_list_size\''
' is less than or equal to 0. Nothing needs'
' to be done!')
# get metadata
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger, verbose)
# validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger, verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger, verbose)
# Check the user input field correst list (if exists) and get the raw
# version of our internal correst list.
_check_input_field_correspondence_list(ltable, rtable, attr_corres)
corres_list = _get_field_correspondence_list(ltable, rtable,
l_key, r_key, attr_corres)
# Build the (col_name: col_index) dict to speed up locating a field in
# the schema.
ltable_col_dict = _build_col_name_index_dict(ltable)
rtable_col_dict = _build_col_name_index_dict(rtable)
# Filter correspondence list to remove numeric types. We only consider
# string types for document concatenation.
_filter_corres_list(ltable, rtable, l_key, r_key,
ltable_col_dict, rtable_col_dict, corres_list)
# Get field filtered new table.
ltable_filtered, rtable_filtered = _get_filtered_table(
ltable, rtable, corres_list)
# Select a subset of fields with high scores
feature_list = _select_features(ltable_filtered, rtable_filtered, l_key, r_key)
if len(feature_list) == 0:
raise AssertionError('\nError: the selected field list is empty,'
' nothing could be done! Please check if all'
' table fields are numeric types.')
# Map the record key value to its index in the table
lrecord_id_to_index_map = _build_id_to_index_map(ltable_filtered, l_key)
rrecord_id_to_index_map = _build_id_to_index_map(rtable_filtered, r_key)
# Build the tokenized record list delimited by a white space on the
# selected fields.
lrecord_list = _get_tokenized_table(ltable_filtered, l_key, feature_list)
rrecord_list = _get_tokenized_table(rtable_filtered, r_key, feature_list)
# Build the token order according to token's frequency. To run a
# prefix filtering based similarity join algorithm, we first need
# the global token order.
order_dict, token_index_dict = _build_global_token_order(
lrecord_list, rrecord_list)
# Sort the token in each record by the global order.
_replace_token_with_numeric_index(lrecord_list, order_dict)
_replace_token_with_numeric_index(rrecord_list, order_dict)
_sort_record_tokens_by_global_order(lrecord_list)
_sort_record_tokens_by_global_order(rrecord_list)
lrecord_token_list, lrecord_index_list =\
_split_record_token_and_index(lrecord_list)
rrecord_token_list, rrecord_index_list =\
_split_record_token_and_index(rrecord_list)
del lrecord_list
del rrecord_list
# Reformat the candidate set from a dataframe to a list of record index
# tuple pair.
new_formatted_candidate_set = _index_candidate_set(
candidate_set, lrecord_id_to_index_map, rrecord_id_to_index_map, verbose)
ltable_field_length_list = _calc_table_field_length(lrecord_index_list, len(feature_list))
rtable_field_length_list = _calc_table_field_length(rrecord_index_list, len(feature_list))
ltable_field_token_sum = _calc_table_field_token_sum(ltable_field_length_list, len(feature_list))
rtable_field_token_sum = _calc_table_field_token_sum(rtable_field_length_list, len(feature_list))
rec_list = debugblocker_cython_parallel(lrecord_token_list, rrecord_token_list,
lrecord_index_list, rrecord_index_list,
ltable_field_token_sum, rtable_field_token_sum,
new_formatted_candidate_set, len(feature_list),
output_size, n_jobs, n_configs)
ret_dataframe = _assemble_topk_table(rec_list[0:output_size], ltable_filtered, rtable_filtered, l_key, r_key)
return ret_dataframe
def debugblocker_topk_cython_wrapper(config, lrecord_token_list,
rrecord_token_list, lrecord_index_list, rrecord_index_list, py_cand_set,
py_output_size):
# deserialize data
lrecord_token_list = pickle.loads(lrecord_token_list)
rrecord_token_list = pickle.loads(rrecord_token_list)
lrecord_index_list = pickle.loads(lrecord_index_list)
rrecord_index_list = pickle.loads(rrecord_index_list)
return debugblocker_topk_cython(config, lrecord_token_list, rrecord_token_list,
lrecord_index_list, rrecord_index_list, py_cand_set,
py_output_size)
def debugblocker_cython_parallel(lrecord_token_list, rrecord_token_list,
lrecord_index_list, rrecord_index_list,
ltable_field_token_sum, rtable_field_token_sum, py_cand_set,
py_num_fields, py_output_size, n_jobs, n_configs):
# pickle list of list to accelate in multi-process
lrecord_token_list = pickle.dumps(lrecord_token_list)
rrecord_token_list = pickle.dumps(rrecord_token_list)
lrecord_index_list = pickle.dumps(lrecord_index_list)
rrecord_index_list = pickle.dumps(rrecord_index_list)
# generate config lists
py_config_lists = debugblocker_config_cython(ltable_field_token_sum, rtable_field_token_sum,
py_num_fields, len(lrecord_token_list), len(rrecord_token_list))
n_configs = _get_config_num(n_jobs, n_configs, len(py_config_lists))
# parallel computer topk based on config lists
rec_lists = Parallel(n_jobs=n_jobs)(delayed(debugblocker_topk_cython_wrapper)
(py_config_lists[i], lrecord_token_list, rrecord_token_list,
lrecord_index_list, rrecord_index_list, py_cand_set,
py_output_size) for i in range(n_configs))
py_rec_list = debugblocker_merge_topk_cython(rec_lists)
return py_rec_list
# get the number of configs according the input value of n_configs
def _get_config_num(n_jobs, n_configs, n_total_configs):
if n_jobs == 0 or n_configs == 0 or n_configs < -2 :
raise ValueError('n_jobs != 0 && n_configs != 0 && n_configs >= -2')
n_cpus = multiprocessing.cpu_count()
if n_configs == -2 :
n_configs = n_total_configs
elif n_configs == -1 :
# set n_configs as the number of the cpu cores
if n_jobs < 0 :
n_configs = n_cpus + 1 + n_jobs
else:
n_configs = n_jobs
if n_configs < n_total_configs:
return n_configs
else:
return n_total_configs
# Validate the types of input parameters.
def _validate_types(ltable, rtable, candidate_set, output_size,
attr_corres, verbose):
validate_object_type(ltable, pd.DataFrame, 'Input left table')
validate_object_type(rtable, pd.DataFrame, 'Input right table')
validate_object_type(candidate_set, pd.DataFrame, 'Input candidate set')
validate_object_type(output_size, int, 'Output size')
if attr_corres is not None:
if not isinstance(attr_corres, list):
logging.error('Input attribute correspondence is not of'
' type list')
raise AssertionError('Input attribute correspondence is'
' not of type list')
for pair in attr_corres:
if not isinstance(pair, tuple):
logging.error('Pair in attribute correspondence list is not'
' of type tuple')
raise AssertionError('Pair in attribute correspondence list'
' is not of type tuple')
if not isinstance(verbose, bool):
logger.error('Parameter verbose is not of type bool')
raise AssertionError('Parameter verbose is not of type bool')
def _calc_table_field_length(record_index_list, num_field):
table_field_length_list = []
for i in range(len(record_index_list)):
field_array = []
for j in range(num_field):
field_array.append(0)
field_array = array('I', field_array)
for j in range(len(record_index_list[i])):
if (record_index_list[i][j] >= num_field):
raise AssertionError('index should less than num_field')
field_array[record_index_list[i][j]] += 1
table_field_length_list.append(field_array)
return table_field_length_list
def _calc_table_field_token_sum(table_field_length_list, num_field):
table_field_token_sum = []
for i in range(num_field):
table_field_token_sum.append(0)
for i in range(len(table_field_length_list)):
for j in range(len(table_field_length_list[i])):
table_field_token_sum[j] += table_field_length_list[i][j]
return table_field_token_sum
def _check_input_field_correspondence_list(ltable, rtable, field_corres_list):
if field_corres_list is None or len(field_corres_list) == 0:
return
true_ltable_fields = list(ltable.columns)
true_rtable_fields = list(rtable.columns)
for pair in field_corres_list:
if type(pair) != tuple or len(pair) != 2:
raise AssertionError('Error in checking user input field'
' correspondence: pair \'%s\' in not in the'
'tuple format!' % (pair))
given_ltable_fields = [field[0] for field in field_corres_list]
given_rtable_fields = [field[1] for field in field_corres_list]
for given_field in given_ltable_fields:
if given_field not in true_ltable_fields:
raise AssertionError('Error in checking user input field'
' correspondence: the field \'%s\' is'
' not in the ltable!' % (given_field))
for given_field in given_rtable_fields:
if given_field not in true_rtable_fields:
raise AssertionError('Error in checking user input field'
' correspondence:'
' the field \'%s\' is not in the'
' rtable!' % (given_field))
return
def _get_field_correspondence_list(ltable, rtable, lkey, rkey, attr_corres):
corres_list = []
if attr_corres is None or len(attr_corres) == 0:
corres_list = mg.get_attr_corres(ltable, rtable)['corres']
if len(corres_list) == 0:
raise AssertionError('Error: the field correspondence list'
' is empty. Please specify the field'
' correspondence!')
else:
for tu in attr_corres:
corres_list.append(tu)
key_pair = (lkey, rkey)
if key_pair not in corres_list:
corres_list.append(key_pair)
return corres_list
# Filter the correspondence list. Remove the fields in numeric types.
def _filter_corres_list(ltable, rtable, ltable_key, rtable_key,
ltable_col_dict, rtable_col_dict, corres_list):
ltable_dtypes = list(ltable.dtypes)
rtable_dtypes = list(rtable.dtypes)
for i in reversed(range(len(corres_list))):
lcol_name = corres_list[i][0]
rcol_name = corres_list[i][1]
# Filter the pair where both fields are numeric types.
if ltable_dtypes[ltable_col_dict[lcol_name]] != numpy.dtype('O') \
and rtable_dtypes[rtable_col_dict[rcol_name]] != numpy.dtype('O'):
if lcol_name != ltable_key and rcol_name != rtable_key:
corres_list.pop(i)
if len(corres_list) == 1 and corres_list[0][0] == ltable_key\
and corres_list[0][1] == rtable_key:
raise AssertionError('The field correspondence list is empty after'
' filtering: please verify your correspondence'
' list, or check if each field is of numeric'
' type!')
# Filter the original input tables according to the correspondence list.
# The filtered tables will only contain the fields in the correspondence list.
def _get_filtered_table(ltable, rtable, corres_list):
ltable_cols = [col_pair[0] for col_pair in corres_list]
rtable_cols = [col_pair[1] for col_pair in corres_list]
lfiltered_table = ltable[ltable_cols]
rfiltered_table = rtable[rtable_cols]
return lfiltered_table, rfiltered_table
def _build_col_name_index_dict(table):
col_dict = {}
col_names = list(table.columns)
for i in range(len(col_names)):
col_dict[col_names[i]] = i
return col_dict
# Select the most important fields for similarity join. The importance
# of a fields is measured by the combination of field value uniqueness
# and non-emptyness.
def _select_features(ltable, rtable, lkey, rkey):
lcolumns = list(ltable.columns)
rcolumns = list(rtable.columns)
lkey_index = -1
rkey_index = -1
if len(lcolumns) != len(rcolumns):
raise AssertionError('Error: FILTERED ltable and FILTERED rtable'
' have different number of fields!')
for i in range(len(lcolumns)):
if lkey == lcolumns[i]:
lkey_index = i
if lkey_index < 0:
raise AssertionError('Error: cannot find key in the FILTERED'
' ltable schema!')
for i in range(len(rcolumns)):
if rkey == rcolumns[i]:
rkey_index = i
if rkey_index < 0:
raise AssertionError('Error: cannot find key in the FILTERED'
' rtable schema!')
lweight = _get_feature_weight(ltable)
rweight = _get_feature_weight(rtable)
if len(lweight) != len(rweight):
raise AssertionError('Error: ltable and rtable don\'t have the'
' same schema')
Rank = namedtuple('Rank', ['index', 'weight'])
rank_list = []
for i in range(len(lweight)):
rank_list.append(Rank(i, lweight[i] * rweight[i]))
if lkey_index == rkey_index:
rank_list.pop(lkey_index)
else:
# Make sure we remove the index with larger value first!!!
if lkey_index > rkey_index:
rank_list.pop(lkey_index)
rank_list.pop(rkey_index)
else:
rank_list.pop(rkey_index)
rank_list.pop(lkey_index)
rank_list = sorted(rank_list, key=attrgetter('weight'), reverse=True)
rank_index_list = []
num_selected_fields = 0
if len(rank_list) < SELECTED_FIELDS_UPPER_BOUND:
num_selected_fields = len(rank_list)
else:
num_selected_fields = SELECTED_FIELDS_UPPER_BOUND
for i in range(num_selected_fields):
rank_index_list.append(rank_list[i].index)
return rank_index_list
# Calculate the importance (weight) for each field in a table.
def _get_feature_weight(table):
num_records = len(table)
if num_records == 0:
raise AssertionError('Error: empty table!')
weight = []
for col in table.columns:
value_set = set()
non_empty_count = 0
col_values = table[col]
for value in col_values:
if not pd.isnull(value) and value != '':
value_set.add(value)
non_empty_count += 1
selectivity = 0.0
if non_empty_count != 0:
selectivity = len(value_set) * 1.0 / non_empty_count
non_empty_ratio = non_empty_count * 1.0 / num_records
# The field weight is the combination of non-emptyness
# and uniqueness.
weight.append(non_empty_ratio + selectivity)
return weight
# Build the mapping of record key value and its index in the table.
def _build_id_to_index_map(table, table_key):
record_id_to_index = {}
id_col = list(table[table_key])
for i in range(len(id_col)):
# id_col[i] = str(id_col[i])
if id_col[i] in record_id_to_index:
raise AssertionError('record_id is already in record_id_to_index')
record_id_to_index[id_col[i]] = i
return record_id_to_index
# Tokenize a table. First tokenize each table column by a white space,
# then concatenate the column of each record. The reason for tokenizing
# columns first is that it's more efficient than iterate each dataframe
# tuple.
def _get_tokenized_table(table, table_key, feature_list):
record_list = []
columns = table.columns[feature_list]
tmp_table = []
for col in columns:
column_token_list = _get_tokenized_column(table[col])
tmp_table.append(column_token_list)
num_records = len(table[table_key])
for i in range(num_records):
token_list = []
index_map = {}
for j in range(len(columns)):
tmp_col_tokens = tmp_table[j][i]
for token in tmp_col_tokens:
if token != '':
if token in index_map:
token_list.append((token + '_' + str(index_map[token]), j))
index_map[token] += 1
else:
token_list.append((token, j))
index_map[token] = 1
record_list.append(token_list)
return record_list
# Tokenize each table column by white spaces.
def _get_tokenized_column(column):
column_token_list = []
for value in list(column):
tmp_value = _replace_nan_to_empty(value)
if tmp_value != '':
tmp_list = list(tmp_value.lower().split(' '))
column_token_list.append(tmp_list)
else:
column_token_list.append([''])
return column_token_list
# Check the value of each field. Replace nan with empty string
# Cast floats into integers.
def _replace_nan_to_empty(field):
if pd.isnull(field):
return ''
elif type(field) in [float, numpy.float64, int, numpy.int64]:
return str('{0:.0f}'.format(field))
else:
return str(field)
# Reformat the input candidate set. Since the input format is DataFrame,
# it's difficult for us to know if a tuple pair is in the candidate
# set or not. We will use the reformatted candidate set in the topk
# similarity join.
def _index_candidate_set(candidate_set, lrecord_id_to_index_map, rrecord_id_to_index_map, verbose):
if len(candidate_set) == 0:
return {}
new_formatted_candidate_set = {}
# Get metadata
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = \
cm.get_metadata_for_candset(candidate_set, logger, verbose)
# Validate metadata
cm._validate_metadata_for_candset(candidate_set, key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key,
logger, verbose)
ltable_key_data = list(candidate_set[fk_ltable])
rtable_key_data = list(candidate_set[fk_rtable])
for i in range(len(ltable_key_data)):
if ltable_key_data[i] in lrecord_id_to_index_map and \
rtable_key_data[i] in rrecord_id_to_index_map:
l_key_data = lrecord_id_to_index_map[ltable_key_data[i]]
r_key_data = rrecord_id_to_index_map[rtable_key_data[i]]
if l_key_data in new_formatted_candidate_set:
new_formatted_candidate_set[l_key_data].add(r_key_data)
else:
new_formatted_candidate_set[l_key_data] = {r_key_data}
return new_formatted_candidate_set
# Build the global order of tokens in the table by frequency.
def _build_global_token_order(lrecord_list, rrecord_list):
freq_order_dict = {}
_build_global_token_order_impl(lrecord_list, freq_order_dict)
_build_global_token_order_impl(rrecord_list, freq_order_dict)
token_list = []
for token in freq_order_dict:
token_list.append(token)
token_list = sorted(token_list, key=lambda x: (freq_order_dict[x], x))
order_dict = {}
token_index_dict = {}
for i in range(len(token_list)):
order_dict[token_list[i]] = i
token_index_dict[i] = token_list[i]
return order_dict, token_index_dict
# Implementation of building the global order of tokens in the table by frenqucy.
def _build_global_token_order_impl(record_list, order_dict):
for record in record_list:
for tup in record:
token = tup[0]
if token in order_dict:
order_dict[token] += 1
else:
order_dict[token] = 1
def _replace_token_with_numeric_index(record_list, order_dict):
for i in range(len(record_list)):
tmp_record = []
for tup in record_list[i]:
token = tup[0]
index = tup[1]
if token in order_dict:
tmp_record.append((order_dict[token], index))
record_list[i] = tmp_record
# Sort each tokenized record by the global token order.
def _sort_record_tokens_by_global_order(record_list):
for i in range(len(record_list)):
record_list[i] = sorted(record_list[i], key=lambda x: x[0])
def _split_record_token_and_index(record_list):
record_token_list = []
record_index_list = []
for i in range(len(record_list)):
token_list = []
index_list = []
for j in range(len(record_list[i])):
token_list.append(record_list[i][j][0])
index_list.append(record_list[i][j][1])
record_token_list.append(array('I', token_list))
record_index_list.append(array('I', index_list))
return record_token_list, record_index_list
# Assemble the topk heap to a dataframe.
def _assemble_topk_table(rec_list, ltable, rtable, lkey, rkey, ret_key='_id',
l_output_prefix='ltable_', r_output_prefix='rtable_'):
ret_data_col_name_list = ['_id']
ltable_col_names = list(ltable.columns)
rtable_col_names = list(rtable.columns)
lkey_index = 0
rkey_index = 0
for i in range(len(ltable_col_names)):
if ltable_col_names[i] == lkey:
lkey_index = i
for i in range(len(rtable_col_names)):
if rtable_col_names[i] == rkey:
rkey_index = i
ret_data_col_name_list.append(l_output_prefix + lkey)
ret_data_col_name_list.append(r_output_prefix + rkey)
ltable_col_names.remove(lkey)
rtable_col_names.remove(rkey)
for i in range(len(ltable_col_names)):
ret_data_col_name_list.append(l_output_prefix + ltable_col_names[i])
for i in range(len(rtable_col_names)):
ret_data_col_name_list.append(r_output_prefix + rtable_col_names[i])
ret_tuple_list = []
for i in range(len(rec_list)):
tup = rec_list[i]
lrecord = list(ltable.loc[tup[1]])
rrecord = list(rtable.loc[tup[2]])
ret_tuple = [i]
ret_tuple.append(lrecord[lkey_index])
ret_tuple.append(rrecord[rkey_index])
for j in range(len(lrecord)):
if j != lkey_index:
ret_tuple.append(lrecord[j])
for j in range(len(rrecord)):
if j != rkey_index:
ret_tuple.append(rrecord[j])
ret_tuple_list.append(ret_tuple)
data_frame = | pd.DataFrame(ret_tuple_list) | pandas.DataFrame |
import pandas as pd
import datetime as dt
from ._db_data import DBData
class RDA(DBData):
"""A class that contains all the Rapid Diagnostic Analytics tests"""
def __init__(self):
super().__init__()
db_obj = DBData()
# assign class variables
self.df_ta = db_obj.retrieve_data('cln_shift')
self.df_pa = db_obj.retrieve_data('cln_payroll')
self.df_master = db_obj.retrieve_data('cln_emp_master')
def test_1(self):
"""Payslips: Annualised salaries - Detect indicators of employee on annualised salary & compare to \
“like for like” employees paid hourly rates."""
# Aggregate salaried employees net income and hours worked
# Find like for like hourly worker and times that amount by the total salaried employees hour and see difference.
# I've got employee master data in the folder.
# Brining in payroll dataset & master dataset
df_pa = self.df_pa.__deepcopy__()
df_master = self.df_master.__deepcopy__()
# Creating a list of just Salary employees
salary_emp = list(df_pa['emp_id'].loc[df_pa['pay_type']=='SAL'])
# Removing duplicates from these emp_ids'
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
salary_emp = Remove(salary_emp)
# filtering the payroll data to include just salaried employees
df_sal_emp = df_pa.loc[df_pa['emp_id'].isin(salary_emp)]
# Filtering to remove all pay_types said to be excluded from the dataset
def sal_groupby(df_sal_emp):
df_sal_emp_exc = df_sal_emp.loc[df_sal_emp['mapping_inclusion'] != 'Exclude']
# Aggregating by emp_id to give total pay and total hours
agg_df_sal_emp = df_sal_emp_exc.groupby(['emp_id', 'position_name', 'level', 'employment_status', 'venue']).agg(
total_pay=pd.NamedAgg(column='period_amount', aggfunc=sum),
total_hours=pd.NamedAgg(column='hours_for_period', aggfunc=sum)).reset_index()
# Adding in the amount per hour worked
agg_df_sal_emp['amount_per_hour'] = agg_df_sal_emp['total_pay'] / agg_df_sal_emp['total_hours']
return agg_df_sal_emp
# Group by for salaried employees
agg_df_sal_emp = sal_groupby(df_pa.loc[df_pa['emp_id'].isin(salary_emp)])
# Adding a dummy key to show emp is salary
agg_df_sal_emp['is_emp_sal'] = 1
# Group by for non salaried employees
agg_df_non_sal_emp = sal_groupby(df_pa.loc[~df_pa['emp_id'].isin(salary_emp)])
# Adding a dummy key to show emp is NOT salary
agg_df_non_sal_emp['is_emp_sal'] = 0
# Aggregating together
agg_df_results = agg_df_sal_emp.append(agg_df_non_sal_emp)
# Returning converted to dict
return agg_df_results.to_dict(orient='list')
def test_2(self):
"""Payslips: “Fully loaded” flat rates - Detect indicators of employee on loaded flat rates & compare to \
“like for like” employees paid hourly rates."""
# For rockpool we dont have this!
pass
def test_3(self):
"""Payslips: Allowance consumption - Look for “like for like” employment and assess consistency of pay element \
consumption across the population."""
# within the payroll data key we have a flag for allowances
# Sum the allowances for each employee across the entire period
# Give a total of the hours work for period number of units of allowance awarded to them
# Brining in payroll dataset.
df_pa = self.df_pa.__deepcopy__()
# Filtering for just the allowances
df_pa = df_pa.loc[df_pa['is_allowance'] == 'y']
# aggregating over emp_id
allowance_agg_df = df_pa.groupby(['emp_id', 'position_name', 'mapping_codes', 'mapping_description']).agg(
total_allowance_paid=pd.NamedAgg(column='period_amount', aggfunc=sum),
total_allowance_hours=pd.NamedAgg(column='hours_for_period', aggfunc=sum)).reset_index()
return allowance_agg_df.to_dict(orient='list')
def test_4(self):
"""Payslips: Inaccurate classification or inconsistent rates - Look for “like for like” employment and \
determine deviation from mode rates paid at classification."""
# Group role, hr rate and count of all employees across data set.
# e.g if we have a cook who is being paid differently than all the others!
# Brining in payroll dataset.
df_pa = self.df_pa.__deepcopy__()
# Filtering for just the includes work codes as given by the rockpool logic
df_pa_inc = df_pa.loc[df_pa['mapping_inclusion'] != 'Exclude']
# Aggregating results.
df_pa_inc_agg = df_pa_inc.groupby(['emp_id', 'position_name']).agg(
total_pay=pd.NamedAgg(column='period_amount', aggfunc=sum),
total_hours=pd.NamedAgg(column='hours_for_period', aggfunc=sum)).reset_index()
# Adding in the amount per hour worked
df_pa_inc_agg['amount_per_hour'] = df_pa_inc_agg['total_pay'] / df_pa_inc_agg['total_hours']
return df_pa_inc_agg.to_dict(orient='list')
def test_5(self):
"""Payslips: Superannuation configuration and interpretation - Independent projection of super contributions \
and compare to actual payments. Challenge interpretations."""
# Map which payments should have super
# and then reconcile it to actual super payments
# However Rockpool dont have super in their data so can't do.
pass
def test_6(self):
"""Time & attendance: Employee “casualness” - Determine the regularity of employee working patterns rate an \
employee’s likelihood to be casual/non-casual."""
# Layout: if employees are working the same rough hours on each day consistently.
weekday = ['Mon', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
df_ta = self.df_ta.__deepcopy__()
df_ta['shift_date'] = df_ta['shift_start'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))
# Calculating the length of the shift in minutes
df_ta['shift_len_mins'] = (df_ta['shift_end'] - df_ta['shift_start']).dt. \
total_seconds().div(60).astype(int)
# The day of the week with Monday=0, Sunday=6. I have changed to str for analysis
df_ta['day_of_week'] = df_ta['shift_start'].dt.dayofweek.astype(int).apply(lambda x: weekday[x])
# Dummy to show if shift starts in AM or PM
df_ta['am'] = df_ta['shift_start'].apply(lambda x: 'am' if x.time() < dt.time(12) else 'pm')
# creating a concat to show day and AM or PM of shift
df_ta['shift_overview'] = df_ta['day_of_week'] + '_' + df_ta['am']
# Creating concat to feed into remove duplicates to get rid of split shifts per data and AM or PM e.g
# Someone works two PM shifts
df_ta['emp_shift_date_am_pm'] = df_ta['emp_id'] + '_' +\
df_ta['shift_date'] + \
'_' + df_ta['am']
# Taking a snap shot of df_ta to be returned before deduplication to give a calendar heat map.
# df_ta['shift_start'] = df_ta['shift_start'].apply(lambda x: x.strftime('%d/%-m/%y' '%H:%M:%S'))
# df_ta['shift_end'] = df_ta['shift_end'].apply(lambda x: x.strftime('%d/%-m/%y' '%H:%M:%S'))
cal_heat_map = df_ta[:]
return cal_heat_map.to_dict()
def test_7(self, shift_duration_hrs, min_break_duration_mins):
"""Time & attendance: Rest and meal breaks - Analyse shift patterns and timing and length of breaks across \
employee cohorts to find potentially missing entitlements."""
# If employees are taking the required break each shift
# with tsid_start_date being the same day the break is calculated by the time between the end of the last shift
# and the start of the next shift.
# two parameters.. length of shift worked, length of break
# Output - Which employees arnt taking the breaks
df_ta = self.df_ta.__deepcopy__()
# Creating the shift_date column for anlysis
df_ta['shift_date'] = df_ta['shift_start'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))
# Sort by emp_id, shift date, shift start time
df_ta = df_ta.sort_values(by=['emp_id', 'shift_start'], ascending=True)
# Get shift start and end time for each employee on each day
shifts = df_ta.groupby(['emp_id', 'shift_date']).agg({'shift_start': 'min', 'shift_end': 'max'})
shifts.columns = ['min_shift_start', 'max_shift_end']
shifts = shifts.reset_index()
shifts['max_shift_end'] = | pd.to_datetime(shifts['max_shift_end']) | pandas.to_datetime |
#!/usr/bin/env python3
import sys
#sys.path.append(".")
#import argparse
import json
import os, errno
import pandas as pd
import numpy as np
import torch
from torch import optim
from . import utils
from .train_eval_models import training_phase, evaluation_phase, repair_phase
from . import RVAE as outlier_model
def compute_metrics(model, X, dataset_obj, args, epoch, losses_save,
logit_pi_prev, mode, mute=True):
# get epoch metrics on outlier detection for train dataset
if args.outlier_model == "VAE":
# outlier analysis
loss_ret, metric_ret = evaluation_phase(model, X, dataset_obj, args, epoch)
# repair analysis
clean_loss_ret = repair_phase(model, X, X_clean, dataset_obj, args, target_errors, mode, epoch)
else:
# outlier analysis
loss_ret, metric_ret = evaluation_phase(model, X, dataset_obj, args, epoch,
clean_comp_show=True,
logit_pi_prev=logit_pi_prev,
w_conv=True)
if args.inference_type == 'seqvae' and not mute:
print('\n')
print('\n\nAdditional Info: Avg. SEQ-VAE Total loss: {:.3f}\tAvg. SEQ-VAE loss: {:.3f}\tAvg. SEQ-VAE NLL: {:.3f}\tAvg. SEQ-VAE KLD_Z: {:.3f}\tAvg. SEQ-VAE KLD_W: {:.3f}'.format(
loss_ret['eval_total_loss_seq'], loss_ret['eval_loss_seq'], loss_ret['eval_nll_seq'], loss_ret['eval_z_kld_seq'], loss_ret['eval_w_kld_seq']))
if args.outlier_model == "RVAE" and not mute:
print('\n\n')
print('====> ' + mode + ' set: -- clean component: p_recon(x_dirty | x_dirty) -- \n \t\t Epoch: {} Avg. loss: {:.3f}\tAvg. NLL: {:.3f}\tAvg. KLD_Z: {:.3f}\tAvg. KLD_W: {:.3f}'.format(
epoch, loss_ret['eval_loss_final_clean'], loss_ret['eval_nll_final_clean'],
loss_ret['eval_z_kld_final_clean'], loss_ret['eval_w_kld_final_clean']))
if args.outlier_model == "RVAE":
return metric_ret['pi_score']
else:
return metric_ret['nll_score']
def save_to_csv(model, X_data, X_data_clean, target_errors, attributes, losses_save,
dataset_obj, folder_output, args, epoch, mode='train'):
""" This method performs all operations needed to save the data to csv """
#Create saving folderes
try:
os.makedirs(folder_output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
### Evaluate model
_, metric_ret = evaluation_phase(model, X_data, dataset_obj, args, epoch)
clean_loss_ret = repair_phase(model, X_data, X_data_clean, dataset_obj, args, target_errors, mode, epoch)
## calc cell metrics
auc_cell_nll, auc_vec_nll, avpr_cell_nll, avpr_vec_nll = utils.cell_metrics(target_errors, metric_ret['nll_score'], weights=False)
if args.outlier_model == "RVAE":
auc_cell_pi, auc_vec_pi, avpr_cell_pi, avpr_vec_pi = utils.cell_metrics(target_errors, metric_ret['pi_score'], weights=True)
else:
auc_cell_pi, auc_vec_pi, avpr_cell_pi, avpr_vec_pi = -10, np.zeros(len(attributes))*-10, -10, np.zeros(len(attributes))*-10
# store AVPR for features (cell only)
df_avpr_feat_cell = pd.DataFrame([], index=['AVPR_nll', 'AVPR_pi'], columns=attributes)
df_avpr_feat_cell.loc['AVPR_nll'] = avpr_vec_nll
df_avpr_feat_cell.loc['AVPR_pi'] = avpr_vec_pi
df_avpr_feat_cell.to_csv(folder_output + "/" + mode + "_avpr_features.csv")
# store AUC for features (cell only)
df_auc_feat_cell = | pd.DataFrame([], index=['AUC_nll', 'AUC_pi'], columns=attributes) | pandas.DataFrame |
import pytest
import test_aide as ta
import tests.test_data as d
import pandas as pd
import numpy as np
import tubular
from tubular.capping import CappingTransformer
class TestInit(object):
"""Tests for CappingTransformer.init()."""
def test_arguments(self):
"""Test that init has expected arguments."""
ta.functions.test_function_arguments(
func=CappingTransformer.__init__,
expected_arguments=[
"self",
"capping_values",
"quantiles",
"weights_column",
],
expected_default_values=(None, None, None),
)
@pytest.mark.parametrize(
"method_name",
[
("transform"),
("fit"),
("check_capping_values_dict"),
("weighted_quantile"),
("prepare_quantiles"),
],
)
def test_class_methods(self, method_name):
"""Test that CappingTransformer has transform method."""
x = CappingTransformer(capping_values={"a": [1, 3]})
ta.classes.test_object_method(
obj=x, expected_method=method_name, msg=method_name
)
def test_inheritance(self):
"""Test that CappingTransformer inherits from BaseTransformer."""
x = CappingTransformer(capping_values={"a": [1, 3]})
ta.classes.assert_inheritance(x, tubular.base.BaseTransformer)
def test_capping_values_quantiles_both_none_error(self):
"""Test that an exception is raised if both capping_values and quantiles are passed as None."""
with pytest.raises(
ValueError,
match="both capping_values and quantiles are None, either supply capping values in the "
"capping_values argument or supply quantiles that can be learnt in the fit method",
):
CappingTransformer(capping_values=None, quantiles=None)
def test_capping_values_quantiles_both_specified_error(self):
"""Test that an exception is raised if both capping_values and quantiles are specified."""
with pytest.raises(
ValueError,
match="both capping_values and quantiles are not None, supply one or the other",
):
CappingTransformer(
capping_values={"a": [1, 4]}, quantiles={"a": [0.2, 0.4]}
)
@pytest.mark.parametrize("out_range_value", [(-2), (1.2)])
def test_quantiles_outside_range_error(self, out_range_value):
"""Test that an exception is raised if quanties contain values outisde [0, 1] range."""
with pytest.raises(
ValueError,
match=rf"quantile values must be in the range \[0, 1\] but got {out_range_value} for key f",
):
CappingTransformer(
quantiles={"e": [0.1, 0.9], "f": [out_range_value, None]}
)
def test_super_init_called_capping_values(self, mocker):
"""Test that init calls BaseTransformer.init when capping_values are passed."""
expected_call_args = {
0: {
"args": (),
"kwargs": {"columns": ["a", "b"], "verbose": True, "copy": True},
}
}
with ta.functions.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
CappingTransformer(
capping_values={"a": [1, 3], "b": [None, -1]}, verbose=True, copy=True
)
def test_super_init_called_quantiles(self, mocker):
"""Test that init calls BaseTransformer.init when quantiles are passed."""
expected_call_args = {
0: {
"args": (),
"kwargs": {"columns": ["c", "d"], "verbose": True, "copy": True},
}
}
with ta.functions.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
CappingTransformer(
quantiles={"c": [0, 0.99], "d": [None, 0.01]}, verbose=True, copy=True
)
def test_check_capping_values_dict_called_quantiles(self, mocker):
"""Test that init calls check_capping_values_dict when quantiles are passed."""
expected_call_args = {
0: {
"args": ({"c": [0, 0.99], "d": [None, 0.01]}, "quantiles"),
"kwargs": {},
}
}
with ta.functions.assert_function_call(
mocker,
tubular.capping.CappingTransformer,
"check_capping_values_dict",
expected_call_args,
):
CappingTransformer(quantiles={"c": [0, 0.99], "d": [None, 0.01]})
def test_check_capping_values_dict_called_capping_values(self, mocker):
"""Test that init calls check_capping_values_dict when capping_values are passed."""
expected_call_args = {
0: {
"args": ({"a": [1, 3], "b": [None, -1]}, "capping_values"),
"kwargs": {},
}
}
with ta.functions.assert_function_call(
mocker,
tubular.capping.CappingTransformer,
"check_capping_values_dict",
expected_call_args,
):
CappingTransformer(capping_values={"a": [1, 3], "b": [None, -1]})
def test_values_passed_in_init_set_to_attribute_capping_values(self):
"""Test that the capping_values passed in init are saved in an attribute of the same name."""
capping_values_dict = {"a": [1, 3], "b": [None, -1]}
x = CappingTransformer(capping_values=capping_values_dict)
ta.classes.test_object_attributes(
obj=x,
expected_attributes={
"capping_values": capping_values_dict,
"weights_column": None,
"quantiles": None,
"_replacement_values": capping_values_dict,
},
msg="capping_values attribute for CappingTransformer set in init",
)
def test_values_passed_in_init_set_to_attribute_quantiles(self):
"""Test that the capping_values passed in init are saved in an attribute of the same name."""
quantiles_dict = {"a": [0.2, 1], "b": [None, 0.9]}
x = CappingTransformer(quantiles=quantiles_dict)
ta.classes.test_object_attributes(
obj=x,
expected_attributes={
"quantiles": quantiles_dict,
"weights_column": None,
"capping_values": {},
"_replacement_values": {},
},
msg="quantiles attribute for CappingTransformer set in init",
)
class TestCheckCappingValuesDict(object):
"""Tests for the CappingTransformer.check_capping_values_dict() method."""
def test_arguments(self):
"""Test that check_capping_values_dict has expected arguments."""
ta.functions.test_function_arguments(
func=CappingTransformer.check_capping_values_dict,
expected_arguments=["self", "capping_values_dict", "dict_name"],
expected_default_values=None,
)
def test_capping_values_not_dict_error(self):
"""Test that an exception is raised if capping_values_dict is not a dict."""
x = CappingTransformer(capping_values={"a": [1, 3], "b": [None, -1]})
with pytest.raises(
TypeError,
match="aaa should be dict of columns and capping values",
):
x.check_capping_values_dict(
capping_values_dict=("a", [1, 3], "b", [None, -1]), dict_name="aaa"
)
def test_capping_values_non_str_key_error(self):
"""Test that an exception is raised if capping_values_dict has any non str keys."""
x = CappingTransformer(capping_values={"a": [1, 3], "b": [None, -1]})
with pytest.raises(
TypeError,
match=r"all keys in bbb should be str, but got \<class 'int'\>",
):
x.check_capping_values_dict(
capping_values_dict={"a": [1, 3], 1: [None, -1]}, dict_name="bbb"
)
def test_capping_values_non_list_item_error(self):
"""Test that an exception is raised if capping_values_dict has any non list items."""
x = CappingTransformer(capping_values={"a": [1, 3], "b": [None, -1]})
with pytest.raises(
TypeError,
match=r"each item in ccc should be a list, but got \<class 'tuple'\> for key b",
):
x.check_capping_values_dict(
capping_values_dict={"a": [1, 3], "b": (None, -1)}, dict_name="ccc"
)
def test_capping_values_non_length_2_list_item_error(self):
"""Test that an exception is raised if capping_values_dict has any non length 2 list items."""
x = CappingTransformer(capping_values={"a": [1, 3], "b": [None, -1]})
with pytest.raises(
ValueError,
match="each item in ddd should be length 2, but got 1 for key b",
):
x.check_capping_values_dict(
capping_values_dict={"a": [1, 3], "b": [None]}, dict_name="ddd"
)
def test_capping_values_non_numeric_error(self):
"""Test that an exception is raised if capping_values_dict contains any non-nulls and non-numeric values."""
x = CappingTransformer(capping_values={"a": [1, 3], "b": [None, -1]})
with pytest.raises(
TypeError,
match=r"each item in eee lists must contain numeric values or None, got \<class 'str'\> for key a",
):
x.check_capping_values_dict(
capping_values_dict={"b": [1, 3], "a": [None, "a"]}, dict_name="eee"
)
def test_lower_value_gte_upper_value_error(self):
"""Test that an exception is raised if capping_values_dict[0] >= capping_values_dict[1]."""
x = CappingTransformer(capping_values={"a": [1, 2], "b": [None, -1]})
with pytest.raises(
ValueError,
match="lower value is greater than or equal to upper value for key a",
):
x.check_capping_values_dict(
capping_values_dict={"a": [4, 3], "b": [None, -1]}, dict_name="eee"
)
@pytest.mark.parametrize("value", [(np.NaN), (np.inf), (-np.inf)])
def test_capping_value_nan_inf_error(self, value):
"""Test that an exception is raised if capping_values are np.nan or np.inf values."""
x = CappingTransformer(capping_values={"a": [1, 3], "b": [None, 1]})
with pytest.raises(
ValueError,
match="item in eee lists contains numpy NaN or Inf values",
):
x.check_capping_values_dict(
capping_values_dict={"b": [1, 3], "a": [None, value]}, dict_name="eee"
)
def test_capping_values_both_null_error(self):
"""Test that an exception is raised if both capping_values are null."""
x = CappingTransformer(capping_values={"a": [1, 3], "b": [None, -1]})
with pytest.raises(ValueError, match="both values are None for key a"):
x.check_capping_values_dict(
capping_values_dict={"a": [None, None], "b": [None, 1]}, dict_name="eee"
)
class TestFit(object):
"""Tests for CappingTransformer.fit()."""
def test_arguments(self):
"""Test that fit has expected arguments."""
ta.functions.test_function_arguments(
func=CappingTransformer.fit,
expected_arguments=["self", "X", "y"],
expected_default_values=(None,),
)
def test_quantiles_none_error(self):
"""Test that an exception is raised if quantiles is None when fit is run."""
with pytest.warns(
UserWarning,
match="quantiles not set so no fitting done in CappingTransformer",
):
df = d.create_df_3()
x = CappingTransformer(capping_values={"a": [2, 5], "b": [-1, 8]})
x.fit(df)
def test_super_fit_call(self, mocker):
"""Test the call to BaseTransformer.fit."""
df = d.create_df_9()
x = CappingTransformer(
quantiles={"a": [0.1, 1], "b": [0.5, None]}, weights_column="c"
)
expected_call_args = {0: {"args": (d.create_df_9(), None), "kwargs": {}}}
with ta.functions.assert_function_call(
mocker, tubular.base.BaseTransformer, "fit", expected_call_args
):
x.fit(df)
def test_prepare_quantiles_call_weight(self, mocker):
"""Test the call to prepare_quantiles if weights_column is set."""
df = d.create_df_9()
x = CappingTransformer(
quantiles={"a": [0.1, 1], "b": [0.5, None]}, weights_column="c"
)
expected_call_args = {
0: {
"args": (
d.create_df_9()["a"],
[0.1, 1],
d.create_df_9()["c"],
),
"kwargs": {},
},
1: {
"args": (
d.create_df_9()["b"],
[0.5, None],
d.create_df_9()["c"],
),
"kwargs": {},
},
}
with ta.functions.assert_function_call(
mocker,
tubular.capping.CappingTransformer,
"prepare_quantiles",
expected_call_args,
):
x.fit(df)
def test_prepare_quantiles_call_no_weight(self, mocker):
"""Test the call to prepare_quantiles if weights_column is not set."""
df = d.create_df_9()
x = CappingTransformer(quantiles={"a": [0.1, 1], "b": [0.5, None]})
expected_call_args = {
0: {
"args": (d.create_df_9()["a"], [0.1, 1], None),
"kwargs": {},
},
1: {
"args": (d.create_df_9()["b"], [0.5, None], None),
"kwargs": {},
},
}
with ta.functions.assert_function_call(
mocker,
tubular.capping.CappingTransformer,
"prepare_quantiles",
expected_call_args,
):
x.fit(df)
@pytest.mark.parametrize("weights_column", [("c"), (None)])
def test_prepare_quantiles_output_set_attributes(self, mocker, weights_column):
"""Test the output of prepare_quantiles is set to capping_values and_replacement_values attributes."""
df = d.create_df_9()
x = CappingTransformer(
quantiles={"a": [0.1, 1], "b": [0.5, None]}, weights_column=weights_column
)
mocked_return_values = [["aaaa", "bbbb"], [1234, None]]
mocker.patch(
"tubular.capping.CappingTransformer.prepare_quantiles",
side_effect=mocked_return_values,
)
x.fit(df)
ta.classes.test_object_attributes(
obj=x,
expected_attributes={
"capping_values": {
"a": mocked_return_values[0],
"b": mocked_return_values[1],
},
"_replacement_values": {
"a": mocked_return_values[0],
"b": mocked_return_values[1],
},
},
msg="weighted_quantile output set to capping_values, _replacement_values attributes",
)
@pytest.mark.parametrize("weights_column", [(None), ("c")])
@pytest.mark.parametrize("quantiles", [([0.2, 0.8]), ([None, 0.5]), ([0.6, None])])
def test_quantile_combinations_handled(self, quantiles, weights_column):
"""Test that a given combination of None and non-None quantile values can be calculated successfully."""
df = d.create_df_9()
x = CappingTransformer(
quantiles={"a": quantiles}, weights_column=weights_column
)
try:
x.fit(df)
except Exception as err:
pytest.fail(
f"unexpected exception when calling fit with quantiles {quantiles} - {err}"
)
class TestPrepareQuantiles(object):
"""Tests for the CappingTransformer.prepare_quantiles method."""
def test_arguments(self):
"""Test that transform has expected arguments."""
ta.functions.test_function_arguments(
func=CappingTransformer.prepare_quantiles,
expected_arguments=["self", "values", "quantiles", "sample_weight"],
expected_default_values=(None,),
)
@pytest.mark.parametrize(
"values, quantiles, sample_weight, expected_quantiles",
[
(
d.create_df_9()["a"],
[0.1, 0.6],
d.create_df_9()["c"],
[0.1, 0.6],
),
(
d.create_df_9()["b"],
[0.1, None],
d.create_df_9()["c"],
[0.1],
),
(
d.create_df_9()["a"],
[None, 0.6],
d.create_df_9()["c"],
[0.6],
),
(d.create_df_9()["b"], [0.1, 0.6], None, [0.1, 0.6]),
(d.create_df_9()["a"], [0.1, None], None, [0.1]),
(d.create_df_9()["b"], [None, 0.6], None, [0.6]),
],
)
def test_weighted_quantile_call(
self, mocker, values, quantiles, sample_weight, expected_quantiles
):
"""Test the call to weighted_quantile, inlcuding the filtering out of None values."""
x = CappingTransformer(quantiles={"a": [0.1, 1], "b": [0.5, None]})
mocked = mocker.patch("tubular.capping.CappingTransformer.weighted_quantile")
x.prepare_quantiles(values, quantiles, sample_weight)
assert (
mocked.call_count == 1
), f"unexpected number of calls to weighted_quantile, expecting 1 but got {mocked.call_count}"
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
expected_pos_args = (values, expected_quantiles, sample_weight)
assert (
call_pos_args == expected_pos_args
), f"unexpected positional args in call to weighted_quantile, expecting {expected_pos_args} but got {call_pos_args}"
assert (
call_kwargs == {}
), f"unexpected kwargs in call to weighted_quantile, expecting None but got {call_kwargs}"
@pytest.mark.parametrize(
"values, quantiles, sample_weight, expected_results",
[
(
d.create_df_9()["a"],
[0.1, 0.6],
d.create_df_9()["c"],
["aaaa"],
),
(
d.create_df_9()["b"],
[0.1, None],
d.create_df_9()["c"],
["aaaa", None],
),
(
d.create_df_9()["a"],
[None, 0.6],
d.create_df_9()["c"],
[None, "aaaa"],
),
(d.create_df_9()["b"], [0.1, 0.6], None, ["aaaa"]),
(d.create_df_9()["a"], [0.1, None], None, ["aaaa", None]),
(d.create_df_9()["b"], [None, 0.6], None, [None, "aaaa"]),
],
)
def test_output_from_weighted_quantile_returned(
self, mocker, values, quantiles, sample_weight, expected_results
):
"""Test the output from weighted_quantile is returned from the function, inlcuding None values added back in."""
x = CappingTransformer(quantiles={"a": [0.1, 1], "b": [0.5, None]})
mocker.patch(
"tubular.capping.CappingTransformer.weighted_quantile",
return_value=["aaaa"],
)
results = x.prepare_quantiles(values, quantiles, sample_weight)
assert (
results == expected_results
), f"unexpected value returned from prepare_quantiles, expecting {results} but got {expected_results}"
class TestTransform(object):
"""Tests for CappingTransformer.transform()."""
def expected_df_1():
"""Expected output from test_expected_output_min_and_max."""
df = pd.DataFrame(
{
"a": [2, 2, 3, 4, 5, 5, np.NaN],
"b": [1, 2, 3, np.NaN, 7, 7, 7],
"c": [np.NaN, 1, 2, 3, 0, 0, 0],
}
)
return df
def expected_df_2():
"""Expected output from test_expected_output_max."""
df = pd.DataFrame(
{
"a": [2, 2, 3, 4, 5, 6, 7, np.NaN],
"b": ["a", "b", "c", "d", "e", "f", "g", np.NaN],
"c": ["a", "b", "c", "d", "e", "f", "g", np.NaN],
}
)
df["c"] = df["c"].astype("category")
return df
def test_arguments(self):
"""Test that transform has expected arguments."""
ta.functions.test_function_arguments(
func=CappingTransformer.transform, expected_arguments=["self", "X"]
)
def test_check_is_fitted_call_count(self, mocker):
"""Test there are 2 calls to BaseTransformer check_is_fitted in transform."""
df = d.create_df_3()
x = CappingTransformer(capping_values={"a": [2, 5], "b": [-1, 8]})
with ta.functions.assert_function_call_count(
mocker, tubular.base.BaseTransformer, "check_is_fitted", 2
):
x.transform(df)
def test_check_is_fitted_call_1(self, mocker):
"""Test the first call to BaseTransformer check_is_fitted in transform."""
df = d.create_df_3()
x = CappingTransformer(capping_values={"a": [2, 5], "b": [-1, 8]})
expected_call_args = {
0: {"args": (["capping_values"],), "kwargs": {}},
1: {"args": (["_replacement_values"],), "kwargs": {}},
}
with ta.functions.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x.transform(df)
def test_super_transform_called(self, mocker):
"""Test that BaseTransformer.transform called."""
df = d.create_df_3()
x = CappingTransformer(capping_values={"a": [2, 5], "b": [-1, 8]})
expected_call_args = {0: {"args": (d.create_df_3(),), "kwargs": {}}}
with ta.functions.assert_function_call(
mocker,
tubular.base.BaseTransformer,
"transform",
expected_call_args,
return_value=d.create_df_3(),
):
x.transform(df)
def test_learnt_values_not_modified(self):
"""Test that the replacements from fit are not changed in transform."""
capping_values_dict = {"a": [2, 5], "b": [-1, 8]}
df = d.create_df_3()
x = CappingTransformer(capping_values_dict)
x.transform(df)
ta.classes.test_object_attributes(
obj=x,
expected_attributes={"capping_values": capping_values_dict},
msg="Attributes for CappingTransformer set in init",
)
@pytest.mark.parametrize(
"df, expected",
ta.pandas.adjusted_dataframe_params(d.create_df_3(), expected_df_1()),
)
def test_expected_output_min_and_max_combinations(self, df, expected):
"""Test that capping is applied correctly in transform."""
x = CappingTransformer(
capping_values={"a": [2, 5], "b": [None, 7], "c": [0, None]}
)
df_transformed = x.transform(df)
ta.equality.assert_frame_equal_msg(
actual=df_transformed,
expected=expected,
msg_tag="Unexpected values in CappingTransformer.transform",
)
@pytest.mark.parametrize(
"df, expected",
ta.pandas.adjusted_dataframe_params(d.create_df_4(), expected_df_2()),
)
def test_non_cap_column_left_untouched(self, df, expected):
"""Test that capping is applied only to specific columns, others remain the same."""
x = CappingTransformer(capping_values={"a": [2, 10]})
df_transformed = x.transform(df)
ta.equality.assert_frame_equal_msg(
actual=df_transformed,
expected=expected,
msg_tag="Unexpected values in CappingTransformer.transform, with columns meant to not be transformed",
)
def test_non_numeric_column_error(self):
"""Test that transform will raise an error if a column to transform is not numeric."""
df = d.create_df_5()
x = CappingTransformer(capping_values={"a": [2, 5], "b": [-1, 8], "c": [-1, 8]})
with pytest.raises(
TypeError, match=r"The following columns are not numeric in X; \['b', 'c'\]"
):
x.transform(df)
def test_quantile_not_fit_error(self):
"""Test that transform will raise an error if quantiles are specified in init but fit is not run before calling transform."""
df = d.create_df_9()
x = CappingTransformer(quantiles={"a": [0.2, 1], "b": [0, 1]})
with pytest.raises(
ValueError,
match="capping_values attribute is an empty dict - perhaps the fit method has not been run yet",
):
x.transform(df)
def test_replacement_values_dict_not_set_error(self):
"""Test that transform will raise an error if _replacement_values is an empty dict."""
df = d.create_df_9()
x = CappingTransformer(quantiles={"a": [0.2, 1], "b": [0, 1]})
# manually set attribute to get past the capping_values attribute is an empty dict exception
x.capping_values = {"a": [1, 4]}
with pytest.raises(
ValueError,
match="_replacement_values attribute is an empty dict - perhaps the fit method has not been run yet",
):
x.transform(df)
def test_attributes_unchanged_from_transform(self):
"""Test that attributes are unchanged after transform is run."""
df = d.create_df_9()
x = CappingTransformer(quantiles={"a": [0.2, 1], "b": [0, 1]})
x.fit(df)
x2 = CappingTransformer(quantiles={"a": [0.2, 1], "b": [0, 1]})
x2.fit(df)
x2.transform(df)
assert (
x.capping_values == x2.capping_values
), "capping_values attribute modified in transform"
assert (
x._replacement_values == x2._replacement_values
), "_replacement_values attribute modified in transform"
assert (
x.weights_column == x2.weights_column
), "weights_column attribute modified in transform"
assert x.quantiles == x2.quantiles, "quantiles attribute modified in transform"
class TestWeightedQuantile(object):
"""Tests for the CappingTransformer.weighted_quantile method."""
def test_arguments(self):
"""Test that transform has expected arguments."""
ta.functions.test_function_arguments(
func=CappingTransformer.weighted_quantile,
expected_arguments=["self", "values", "quantiles", "sample_weight"],
expected_default_values=(None,),
)
@pytest.mark.parametrize(
"values, sample_weight, quantiles, expected_quantiles",
[
(
[1, 2, 3],
[1, 1, 1],
[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.2, 1.5, 1.8, 2.1, 2.4, 2.7, 3.0],
),
(
[1, 2, 3],
[0, 1, 0],
[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0],
),
(
[1, 2, 3],
[1, 1, 0],
[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0],
),
(
[1, 2, 3, 4, 5],
[1, 1, 1, 1, 1],
[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
[1.0, 1.0, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0],
),
([1, 2, 3, 4, 5], [1, 0, 1, 0, 1], [0, 0.5, 1.0], [1.0, 2.0, 5.0]),
],
)
def test_expected_output(
self, values, sample_weight, quantiles, expected_quantiles
):
"""Test that weighted_quantile gives the expected outputs."""
x = CappingTransformer(capping_values={"a": [2, 10]})
values = | pd.Series(values) | pandas.Series |
import pytest
from joblib import Parallel, delayed
from collections import defaultdict
import pandas as pd
import numpy as np
from typing import Dict, Iterable
import pkg_resources, os
from natsort import natsorted
from io import StringIO
# from helper.functions
import logging
from rpy2.robjects import r, pandas2ri
pandas2ri.activate()
from rpy2.robjects.robject import RObject
from rpy2.robjects.packages import importr
importr("S4Vectors")
bioc = importr("GenomicRanges")
length_of_rle = r("function (x) sum(runLength(x))")
from epic.scripts.overlaps.files_to_chromosome_coverage import (files_to_chromosome_coverage)
__author__ = "<NAME> https://github.com/endrebak/"
__license__ = "MIT"
def nucleotide_overlaps_per_file(all_files, nb_cpu):
# type: (Iterable[str], int) -> pd.DataFrame
rles = files_to_chromosome_coverage(all_files, nb_cpu)
nucleotide_overlaps = Parallel(n_jobs=nb_cpu)(delayed(_nucleotide_overlaps_per_file)(
f, rles) for f in rles)
print(nucleotide_overlaps)
return | pd.concat(nucleotide_overlaps) | pandas.concat |
# encoding: utf-8
import datetime
import numpy as np
import pandas as pd
def get_next_period_day(current, period, n=1, extra_offset=0):
"""
Get the n'th day in next period from current day.
Parameters
----------
current : int
Current date in format "%Y%m%d".
period : str
Interval between current and next. {'day', 'week', 'month'}
n : int
n times period.
extra_offset : int
n'th business day after next period.
Returns
-------
nxt : int
"""
current_dt = convert_int_to_datetime(current)
if period == 'day':
offset = pd.tseries.offsets.BDay() # move to next business day
# offset = offsets.Day
elif period == 'week':
offset = pd.tseries.offsets.Week(weekday=0) # move to next Monday
elif period == 'month':
offset = pd.tseries.offsets.BMonthBegin() # move to first business day of next month
# offset = offsets.MonthBegin
else:
raise NotImplementedError("Frequency as {} not support".format(period))
offset = offset * n
next_dt = current_dt + offset
if extra_offset:
next_dt = next_dt + extra_offset * pd.tseries.offsets.BDay()
nxt = convert_datetime_to_int(next_dt)
return nxt
def convert_int_to_datetime(dt):
"""Convert int date (%Y%m%d) to datetime.datetime object."""
if isinstance(dt, pd.Series):
dt = dt.astype(str)
elif isinstance(dt, int):
dt = str(dt)
return pd.to_datetime(dt, format="%Y%m%d")
def convert_datetime_to_int(dt):
f = lambda x: x.year * 10000 + x.month * 100 + x.day
if isinstance(dt, (datetime.datetime, datetime.date)):
dt = pd.Timestamp(dt)
res = f(dt)
elif isinstance(dt, np.datetime64):
dt = pd.Timestamp(dt)
res = f(dt)
else:
dt = pd.Series(dt)
res = dt.apply(f)
return res
def shift(date, n_weeks=0):
"""Shift date backward or forward for n weeks.
Parameters
----------
date : int or datetime
The date to be shifted.
n_weeks : int, optional
Positive for increasing date, negative for decreasing date.
Default 0 (no shift).
Returns
-------
res : int or datetime
"""
delta = | pd.Timedelta(weeks=n_weeks) | pandas.Timedelta |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
| Timestamp("2000-02-29", tz="US/Central") | pandas.Timestamp |
import psycopg2
import overpy
import pandas as pd
import geopandas as gpd
import numpy as np
import networkx as nx
from shapely import geometry
from itertools import product
from itertools import chain
from itertools import zip_longest
from shapely.geometry import LineString
from shapely.geometry import Point
from shapely.wkt import loads
import fiona
import requests
from fiona.transform import transform
from shapely.geometry import Polygon
ALLOWED_HIGHWAY_KEYS = ["motorway", "trunk", "primary", "secondary", "tertiary", "unclassified", "residential", "motorway_link", "trunk_link", "primary_link", "secondary_link", "tertiary_link", "living_street", "service"]
DEFAULT_FILTER_DICTIONARY = {k : v for k, v in zip_longest([], ALLOWED_HIGHWAY_KEYS, fillvalue="highway")}
class BoundingBox:
def __init__(self, bounding_box, in_epsg, out_epsg):
self.in_crs = fiona.crs.from_epsg(in_epsg)
self.out_crs = fiona.crs.from_epsg(out_epsg)
xmin, xmax, ymin, ymax = bounding_box
in_polygon = Polygon([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])
self.out_polygon = self._transform_polygon(in_polygon, self.in_crs, self.out_crs)
def _transform_polygon(self, polygon, in_crs, out_crs):
xs = np.array(list(polygon.boundary.coords))[:, 0]
ys = np.array(list(polygon.boundary.coords))[:, 1]
nxs, nys = transform(in_crs, out_crs, xs, ys)
return Polygon(list(zip(nxs, nys)))
def polygon_to_epsg(self, out_epsg):
out_crs = fiona.crs.from_epsg(out_epsg)
return self._transform_polygon(self.out_polygon, self.out_crs, out_crs)
@property
def as_polygon(self):
return self.out_polygon
@property
def as_tuple(self):
bounding_box = self.out_polygon.bounds
return (bounding_box[0], bounding_box[2], bounding_box[1], bounding_box[3])
class PostGISQuery:
"""
A class that enables easily obtaining data from a PostgreSQL database (with PostGIS extension)
containing OSM data. Assumes that database uses EPSG:4326 (longitude-latiude).
Parameters:
---
database -- Name of the database
user -- Name of user with access to database
password -- Password for user
epsg -- Coordinate reference system the data should be converted to
bounding_box -- Tuple of coordinates (xmin, xmax, ymin, ymax)
nodes_table -- Name of the table where nodes are stored
ways_table -- Name of the table where ways are stored
filter_dictionary -- Dictionary that specifies which keys and values are allowed in tags. Ways that do not match any of the key-value pairs are removed
"""
def __init__(
self,
database: str,
user: str,
password: str,
epsg: str,
bounding_box: tuple,
nodes_table: str = "nodes",
ways_table: str = "ways",
filter_dictionary: dict = DEFAULT_FILTER_DICTIONARY,
):
"""See class documentation."""
self.LONLAT_CRS = fiona.crs.from_epsg(4326)
self.LONLAT_EPSG = 4326
self.con = psycopg2.connect(
database=database, user=user, password=password, host="localhost"
)
self.nodes_df = None
self.ways_df = None
self.epsg = epsg
self.crs = fiona.crs.from_epsg(self.epsg)
self.filter_dictionary = filter_dictionary
self.nodes_table = nodes_table
self.ways_table = ways_table
self.xmin, self.xmax, self.ymin, self.ymax = bounding_box
self.query_bounding_box()
self.bounding_box = BoundingBox(bounding_box, self.LONLAT_EPSG, epsg)
def get_nodes_by_id(self, ids: list):
"""Retrieve nodes by their ID."""
node_ids_string = ", ".join(map(str, ids))
node_query = f"""
SELECT *
FROM {self.nodes_table}
WHERE {self.nodes_table}.id IN ({node_ids_string});
"""
self.nodes_df = gpd.GeoDataFrame.from_postgis(
node_query, self.con, geom_col="geom", crs=self.LONLAT_CRS
)
self.nodes_df["x"] = self.nodes_df.geom.map(lambda x: x.x)
self.nodes_df["y"] = self.nodes_df.geom.map(lambda x: x.y)
self._parse_tags(self.nodes_df)
self.nodes_df = (
self.nodes_df.rename(columns={"id": "osmid", "geom": "point"})
.drop(["version", "user_id", "tstamp", "changeset_id"], axis=1)
.set_geometry("point")
)
self.nodes_df.to_crs(epsg=self.epsg, inplace=True)
self.update_node_coordinates()
def update_node_coordinates(self):
"""Update x- and y-columns to match x- and y-attributes of point-column."""
self.nodes_df["x"] = self.nodes_df.point.map(lambda x: x.x)
self.nodes_df["y"] = self.nodes_df.point.map(lambda x: x.y)
def query_bounding_box(self):
"""Get ways intersecting a polygon bounded by the bounding box.
Also gets all nodes contained by the returned ways.
"""
ways_query = f"""
SELECT *
FROM {self.ways_table}
WHERE ST_Intersects(
{self.ways_table}.linestring,
ST_MakeEnvelope({self.xmin}, {self.ymin}, {self.xmax}, {self.ymax}, 4326)
);
"""
self.ways_df = gpd.GeoDataFrame.from_postgis(
ways_query, self.con, geom_col="linestring", crs=self.LONLAT_CRS
)
self.ways_df = (
self.ways_df.rename(columns={"id": "osmid"})
.drop(["version", "user_id", "tstamp", "changeset_id", "bbox"], axis=1)
.to_crs(epsg=self.epsg)
)
self._parse_tags(self.ways_df)
self.filter_ways_by_tags()
self.get_nodes_by_id(self.node_ids)
def _parse_tags(self, gdf: gpd.GeoDataFrame):
"""Reformatting the tags-column."""
assert gdf is not None
gdf["tags"] = gdf.tags.map(self._parse_tag)
def _parse_tag(self, tag: str):
"""Reformatting a tag-string."""
parsed = list(
map(
lambda x: x.strip('"'),
chain(*map(lambda x: x.split("=>"), tag.split(", "))),
)
)
keys = parsed[::2]
values = parsed[1::2]
return {k: v for k, v in zip(keys, values)}
def filter_ways_by_tags(self):
"""Remove ways that do not contain one of the key-value pairs in the filter dictionary."""
keep = self.ways_df.tags.map(
lambda x: self.filter_tags(self.filter_dictionary, x)
)
self.ways_df = self.ways_df[keep]
def filter_tags(self, filter_dictionary: dict, tags: dict):
"""Check if a tag-dictionary contains either of the key-value pairs in filter_dictionary."""
bool_set = set()
for key, values in filter_dictionary.items():
has_key = key in tags
if has_key:
has_value = len(set(tags[key]).intersection(set(values))) > 0
else:
has_value = False
bool_set.add(has_key and has_value)
return any(bool_set) or len(bool_set) == 0
def transform(self, epsg):
self.ways_df.to_crs(crs=fiona.crs.from_epsg(epsg), inplace=True)
self.nodes_df.to_crs(crs=fiona.crs.from_epsg(epsg), inplace=True)
self.nodes_df["x"] = self.nodes_df.point.map(lambda x: x.x)
self.nodes_df["y"] = self.nodes_df.point.map(lambda x: x.y)
return self
@property
def node_ids(self):
"""Get the ID of the all the nodes in the ways dataframe."""
return np.array(list(chain(*self.ways_df.nodes.values)))
class OverpassAPIQuery:
"""
A class that enables easily obtaining data from the Overpass API.
Parameters:
---
epsg -- Coordinate reference system the data should be converted to
bounding_box -- Tuple of coordinates (xmin, xmax, ymin, ymax)
filter_dictionary -- Dictionary that specifies which keys and values are allowed in tags. Ways that do not match any of the key-value pairs are removed
"""
def __init__(
self,
epsg: int,
bounding_box: tuple,
nodes_table: str = "nodes",
ways_table: str = "ways",
filter_dictionary: dict = DEFAULT_FILTER_DICTIONARY,
):
"""See class documentation."""
self.LONLAT_CRS = fiona.crs.from_epsg(4326)
self.LONLAT_EPSG = 4326
self.api = overpy.Overpass()
self.nodes_df = None
self.ways_df = None
self.epsg = epsg
self.crs = fiona.crs.from_epsg(self.epsg)
self.filter_dictionary = filter_dictionary
self.xmin, self.xmax, self.ymin, self.ymax = bounding_box
self.query_bounding_box()
self.bounding_box = BoundingBox(bounding_box, self.LONLAT_EPSG, epsg)
def query_bounding_box(self):
"""Get ways without bounding box, as well as nodes within said ways."""
way_response = self.api.query(
f"""
[out:json];
way({self.ymin}, {self.xmin}, {self.ymax}, {self.xmax});
out body;
"""
)
self.ways_df = self.parse_way_response(way_response)
self.filter_ways_by_tags()
self.nodes_df = self.get_nodes_in()
self.add_missing_nodes()
lines = []
for _, row in self.ways_df.iterrows():
df = pd.DataFrame()
nodes = row["nodes"]
df["node"] = nodes
df = df.merge(self.nodes_df, left_on="node", right_on="osmid")
line = LineString([(row["x"], row["y"]) for _, row in df.iterrows()])
lines.append(line)
self.nodes_df.to_crs(epsg=self.epsg, inplace=True)
self.update_node_coordinates()
self.ways_df["linestring"] = pd.Series(lines)
self.ways_df = gpd.GeoDataFrame(
self.ways_df, geometry="linestring", crs=self.LONLAT_CRS
)
self.ways_df.to_crs(epsg=self.epsg, inplace=True)
def get_nodes_in(self):
"""Get nodes in bounding box."""
node_response = self.api.query(
f"""
[out:json];
node({self.ymin}, {self.xmin}, {self.ymax}, {self.xmax});
out body;
"""
)
return self.parse_node_response(node_response)
def get_nodes_by_id(self, ids):
"""Get nodes by their ID."""
ids_str = ",".join(map(str, ids))
nodes_response = self.api.query(
f"""
node(id:{ids_str});
out;
"""
)
return self.parse_node_response(nodes_response)
def update_node_coordinates(self):
"""Update x- and y-columns to match x- and y-attributes of point-column."""
self.nodes_df["x"] = self.nodes_df.point.map(lambda x: x.x)
self.nodes_df["y"] = self.nodes_df.point.map(lambda x: x.y)
def parse_node_response(self, response):
"""Parsing the response obtained from the Overpass API when requesting nodes."""
osmids = []
xs = []
ys = []
tag_dicts = []
geoms = []
for node in response.nodes:
osmid = node.id
osmids.append(osmid)
x = node.lon
xs.append(x)
y = node.lat
ys.append(y)
tags = node.tags
tag_dicts.append(tags)
geom = Point(x, y)
geoms.append(geom)
return gpd.GeoDataFrame(
{
"osmid": pd.Series(osmids, dtype=np.int64),
"x": pd.Series(xs, dtype=np.float64),
"y": pd.Series(ys, dtype=np.float64),
"tags": pd.Series(tag_dicts),
"point": pd.Series(geoms),
},
geometry="point",
crs=self.LONLAT_CRS,
)
def parse_way_response(self, response):
"""Parsing the response obtained from the Overpass API when requesting ways."""
osmids = []
node_lists = []
tag_dicts = []
for way in response.ways:
osmid = way.id
osmids.append(osmid)
nodes = way._node_ids
node_lists.append(nodes)
tags = way.tags
tag_dicts.append(tags)
return | pd.DataFrame({"osmid": osmids, "nodes": node_lists, "tags": tag_dicts}) | pandas.DataFrame |
import json
from datetime import date, datetime, timedelta
import pandas as pd
import numpy as np
ZONE = "ap-northeast-1c"
DATA_START_DATE = "2019-12-01"
DATA_END_DATE = "2019-12-10"
now = datetime.strptime(DATA_END_DATE, "%Y-%m-%d")
# define index
dates = | pd.date_range(start=DATA_START_DATE, end=DATA_END_DATE) | pandas.date_range |
import pandas as pd
from unittest import TestCase # or `from unittest import ...` if on Python 3.4+
import tests.helpers as th
import numpy as np
import category_encoders as encoders
class TestLeaveOneOutEncoder(TestCase):
def test_leave_one_out(self):
np_X = th.create_array(n_rows=100)
np_X_t = th.create_array(n_rows=50, extras=True)
np_y = np.random.randn(np_X.shape[0]) > 0.5
np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5
X = th.create_dataset(n_rows=100)
X_t = th.create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = pd.DataFrame(np_y_t)
enc = encoders.LeaveOneOutEncoder(verbose=1, sigma=0.1)
enc.fit(X, y)
th.verify_numeric(enc.transform(X_t))
th.verify_numeric(enc.transform(X_t, y_t))
def test_leave_one_out_values(self):
df = pd.DataFrame({
'color': ["a", "a", "a", "b", "b", "b"],
'outcome': [1, 0, 0, 1, 0, 1]})
X = df.drop('outcome', axis=1)
y = df.drop('color', axis=1)
ce_leave = encoders.LeaveOneOutEncoder(cols=['color'])
obtained = ce_leave.fit_transform(X, y['outcome'])
self.assertEqual([0.0, 0.5, 0.5, 0.5, 1.0, 0.5], list(obtained['color']))
def test_leave_one_out_fit_callTwiceOnDifferentData_ExpectRefit(self):
x_a = pd.DataFrame(data=['1', '2', '2', '2', '2', '2'], columns=['col_a'])
x_b = pd.DataFrame(data=['1', '1', '1', '2', '2', '2'], columns=['col_b']) # different values and name
y_dummy = [True, False, True, False, True, False]
encoder = encoders.LeaveOneOutEncoder()
encoder.fit(x_a, y_dummy)
encoder.fit(x_b, y_dummy)
mapping = encoder.mapping
self.assertEqual(1, len(mapping))
self.assertIn('col_b', mapping) # the model should have the updated mapping
expected = pd.DataFrame({'sum': [2.0, 1.0], 'count': [3, 3]}, index=['1', '2'], columns=['sum', 'count'])
np.testing.assert_equal(expected.values, mapping['col_b'].values)
def test_leave_one_out_unique(self):
X = pd.DataFrame(data=['1', '2', '2', '2', '3'], columns=['col'])
y = np.array([1, 0, 1, 0, 1])
encoder = encoders.LeaveOneOutEncoder(handle_unknown='value')
result = encoder.fit(X, y).transform(X, y)
self.assertFalse(result.isnull().any().any(), 'There should not be any missing value')
expected = pd.DataFrame(data=[y.mean(), 0.5, 0, 0.5, y.mean()], columns=['col'])
pd.testing.assert_frame_equal(expected, result)
def test_HandleMissingIsValueAndNanInTrain_ExpectAtValueSet(self):
df = pd.DataFrame({
'color': [np.nan, np.nan, np.nan, "b", "b", "b"],
'outcome': [2, 2, 0, 1, 0, 1]})
X = df.drop('outcome', axis=1)
y = df.drop('color', axis=1)
ce_leave = encoders.LeaveOneOutEncoder(cols=['color'], handle_missing='value')
obtained = ce_leave.fit_transform(X, y['outcome'])
self.assertEqual([1, 1, 2, 0.5, 1.0, 0.5], list(obtained['color']))
def test_HandleMissingIsValueAndNanInTest_ExpectMean(self):
df = pd.DataFrame({
'color': ["a", "a", "a", "b", "b", "b"],
'outcome': [1.6, 0, 0, 1, 0, 1]})
train = df.drop('outcome', axis=1)
target = df.drop('color', axis=1)
test = pd.Series([np.nan, 'b'], name='color')
test_target = pd.Series([0, 0])
ce_leave = encoders.LeaveOneOutEncoder(cols=['color'], handle_missing='value')
ce_leave.fit(train, target['outcome'])
obtained = ce_leave.transform(test, test_target)
self.assertEqual([.6, 1.0], list(obtained['color']))
def test_HandleMissingIsValueAndNanInTestAndNoTestTarget_ExpectMean(self):
df = pd.DataFrame({
'color': ["a", "a", "a", "b", "b", "b"],
'outcome': [1, 0, 0, 1, 0, 1]})
train = df.drop('outcome', axis=1)
target = df.drop('color', axis=1)
test = pd.Series([np.nan, 'b'], name='color')
ce_leave = encoders.LeaveOneOutEncoder(cols=['color'], handle_missing='value')
ce_leave.fit(train, target['outcome'])
obtained = ce_leave.transform(test)
self.assertEqual([.5, 2/3.0], list(obtained['color']))
def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self):
train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color')
target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target')
test = pd.Series(['b', 'c'], name='color')
test_target = pd.Series([0, 0])
ce_leave = encoders.LeaveOneOutEncoder(cols=['color'], handle_unknown='value')
ce_leave.fit(train, target)
obtained = ce_leave.transform(test, test_target)
self.assertEqual([1.0, .6], list(obtained['color']))
def test_leave_one_out_categorical(self):
"""
test that if the input is a pd.Categorical the output is the same as for string columns
:return:
"""
df = pd.DataFrame({
'color_str': ["a", "a", "a", "b", "b", "b"],
'color_num_cat': pd.Categorical([1.0, 1.0, 1.0, 2.0, 2.0, 2.0]),
'color_str_cat': | pd.Categorical(["a", "a", "a", "b", "b", "b"]) | pandas.Categorical |
import sys
import os
import warnings
import itertools
import subprocess
import numpy as np
import pandas as pd
import slack
import scipy.stats as st
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
from matplotlib.gridspec import GridSpec
exec(open(os.path.abspath(os.path.join(
os.path.dirname(__file__), os.path.pardir, 'visualisation', 'light_mode.py'))).read())
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from rotvel_correlation.simstats import Simstats
warnings.filterwarnings("ignore")
pathSave = '/cosma6/data/dp004/dc-alta2/C-Eagle-analysis-work/rotvel_correlation'
def bayesian_blocks(t):
"""Bayesian Blocks Implementation
By <NAME>. License: BSD
Based on algorithm outlined in http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
Parameters
----------
t : ndarray, length N
data to be histogrammed
Returns
-------
bins : ndarray
array containing the (N+1) bin edges
Notes
-----
This is an incomplete implementation: it may fail for some
datasets. Alternate fitness functions and prior forms can
be found in the paper listed above.
"""
# copy and sort the array
t = np.sort(t)
N = t.size
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1],
0.5 * (t[1:] + t[:-1]),
t[-1:]])
block_length = t[-1] - edges
# arrays needed for the iteration
nn_vec = np.ones(N)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
#-----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
#-----------------------------------------------------------------
for K in range(N):
# Compute the width and count of the final bin for all possible
# locations of the K^th changepoint
width = block_length[:K + 1] - block_length[K + 1]
count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]
# evaluate fitness function for these possibilities
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4 # 4 comes from the prior on the number of changepoints
fit_vec[1:] += best[:K]
# find the max of the fitness: this is the K^th changepoint
i_max = np.argmax(fit_vec)
last[K] = i_max
best[K] = fit_vec[i_max]
#-----------------------------------------------------------------
# Recover changepoints by iteratively peeling off the last block
#-----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
def freedman_diaconis(x: np.ndarray) -> np.ndarray:
"""
The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size.
Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to
outliers.
:param x: np.ndarray
The 1-dimensional x-data to bin.
:return: np.ndarray
The bins edges computed using the FD method.
"""
return np.histogram_bin_edges(x, bins='fd')
def equal_number_FD(x: np.ndarray) -> np.ndarray:
"""
Takes the number of bins computed using the FD method, but then selects the bin edges splitting
the dataset in bins with equal number of data-points.
:param x: np.ndarray
The 1-dimensional x-data to bin.
:return: np.ndarray
The bins edges computed using the equal-N method.
"""
nbin = len(np.histogram_bin_edges(x, bins='fd')) - 1
npt = len(x)
return np.interp(np.linspace(0, npt, nbin + 1),
np.arange(npt),
np.sort(x))
# Print some overall stats about the datasets
sys.stdout = open(os.devnull, 'w')
read_apertures = [Simstats(simulation_name='macsis', aperture_id=i).read_simstats() for i in range(20)]
sys.stdout = sys.__stdout__
for apid, stat in enumerate(read_apertures):
print(f"Aperture radius {apid} \t --> \t {stat['R_aperture'][0]/stat['R_200_crit'][0]:1.2f} R_200_crit")
del read_apertures
sys.stdout = open(os.devnull, 'w')
read_redshifts = [Simstats(simulation_name=i, aperture_id=0).read_simstats() for i in ['macsis', 'celr_e']]
sys.stdout = sys.__stdout__
for sim_name, stat in zip(['macsis', 'celr_e'], read_redshifts):
print('\n')
for zid, redshift in enumerate(stat.query('cluster_id == 0')['redshift_float']):
print(f"Simulation: {sim_name:<10s} Redshift {zid:2d} --> {redshift:1.2f}")
del read_redshifts
# Start with one single aperture
aperture_id = 9
simstats = list()
simstats.append(Simstats(simulation_name='macsis', aperture_id=aperture_id))
simstats.append(Simstats(simulation_name='celr_e', aperture_id=aperture_id))
simstats.append(Simstats(simulation_name='celr_b', aperture_id=aperture_id))
stats_out = [sim.read_simstats() for sim in simstats]
attrs = [sim.read_metadata() for sim in simstats]
print(f"\n{' stats_out DATASET INFO ':-^50s}")
print(stats_out[0].info())
# Create SQL query
query_COLLECTIVE = list()
query_COLLECTIVE.append('redshift_float < 0.02')
query_COLLECTIVE.append('M_200_crit > 10**9')
query_COLLECTIVE.append('thermodynamic_merging_index_T < 1')
stats_filtered = [stat.query(' and '.join(query_COLLECTIVE)) for stat in stats_out]
# Generate plots catalog
x_labels = ['redshift_float', 'R_500_crit', 'R_aperture', 'M_2500_crit', 'M_aperture_T',
'peculiar_velocity_T_magnitude', 'angular_momentum_T_magnitude',
'dynamical_merging_index_T', 'thermodynamic_merging_index_T',
'substructure_fraction_T']
y_labels = ['M_200_crit','rotTvelT','rot0rot4','rot1rot4','dynamical_merging_index_T',
'thermodynamic_merging_index_T','substructure_fraction_T']
data_entries = list(itertools.product(x_labels, y_labels))
x_labels = []
y_labels = []
for entry in data_entries:
if entry[0] is not entry[1]:
x_labels.append(entry[0])
y_labels.append(entry[1])
xscale = []
yscale = []
for x in x_labels:
scale = 'log' if 'M' in x or 'velocity' in x else 'linear'
xscale.append(scale)
for y in y_labels:
scale = 'log' if 'M' in y or 'velocity' in y else 'linear'
yscale.append(scale)
data_summary = {
'x' : x_labels,
'y' : y_labels,
'xscale' : xscale,
'yscale' : yscale,
}
summary = pd.DataFrame(data=data_summary, columns=data_summary.keys())
summary = summary[summary['y'].str.contains('rot')]
summary = summary[~summary['x'].str.contains('redshift')]
print(f"\n{' summary DATASET PLOTS INFO ':-^40s}\n", summary)
# Activate the plot factory
print(f"\n{' RUNNING PLOT FACTORY ':-^50s}")
data_entries = summary.to_dict('r')
x_binning = bayesian_blocks
print(f"[+] Binning method for x_data set to `{x_binning.__name__}`.")
for entry_index, data_entry in enumerate(data_entries):
filename = f"{data_entry['x'].replace('_', '')}_{data_entry['y'].replace('_', '')}_aperture{aperture_id}.pdf"
are_files = [os.path.isfile(os.path.join(pathSave, 'scatter', filename)),
os.path.isfile(os.path.join(pathSave, 'kdeplot', filename)),
os.path.isfile(os.path.join(pathSave, 'median', filename))]
#if any(are_files): continue
fig = plt.figure(figsize=(15, 10))
gs = GridSpec(2, 3, figure=fig)
gs.update(wspace=0., hspace=0.)
info_ax0 = fig.add_subplot(gs[0]); info_ax0.axis('off')
ax1 = fig.add_subplot(gs[1])
info_ax1 = fig.add_subplot(gs[2]); info_ax1.axis('off')
ax2 = fig.add_subplot(gs[3], sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(gs[4], sharex=ax2, sharey=ax2)
ax4 = fig.add_subplot(gs[5], sharex=ax3, sharey=ax3)
ax = [ax1, ax2, ax3, ax4]
plt.setp(ax[0].get_xticklabels(), visible=False)
plt.setp(ax[2].get_yticklabels(), visible=False)
plt.setp(ax[3].get_yticklabels(), visible=False)
xlims = [np.min(pd.concat(stats_filtered)[data_entry['x']]), np.max(pd.concat(stats_filtered)[data_entry['x']])]
ylims = [np.min(pd.concat(stats_filtered)[data_entry['y']]), np.max(pd.concat(stats_filtered)[data_entry['y']])]
# Unresolved issue with the Latex labels
# Some contain an extra `$` at the end of the string, which should not be there.
label_x = attrs[0]['Columns/labels'][data_entry['x']]
label_y = attrs[0]['Columns/labels'][data_entry['y']]
if label_x.endswith('$'): label_x = label_x.rstrip('$')
if label_y.endswith('$'): label_y = label_y.rstrip('$')
ax[0].set_ylabel(label_y)
ax[1].set_ylabel(label_y)
ax[1].set_xlabel(label_x)
ax[2].set_xlabel(label_x)
ax[3].set_xlabel(label_x)
simstats_palette = ['#1B9E77','#D95F02','#7570B3','#E7298A']
z_range = [np.min(pd.concat(stats_filtered)['redshift_float']),
np.max( | pd.concat(stats_filtered) | pandas.concat |
import pandas as pd
import numpy as np
from time import time
output = 'xxx.h5'
chunk_size = 500000
num_chunks = 100
num_elements = num_chunks*chunk_size
def job(_):
a = np.random.random(chunk_size).astype(np.float64)
return | pd.DataFrame({"a":a}) | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for Meterstick."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
from typing import Any, Callable, List, Optional, Sequence, Text, Union
from meterstick import sql
from meterstick import utils
import numpy as np
import pandas as pd
def compute_on(df,
split_by=None,
melted=False,
return_dataframe=True,
cache_key=None):
# pylint: disable=g-long-lambda
return lambda x: x.compute_on(df, split_by, melted, return_dataframe,
cache_key)
# pylint: enable=g-long-lambda
def compute_on_sql(
table,
split_by=None,
execute=None,
melted=False,
mode=None,
):
# pylint: disable=g-long-lambda
return lambda m: m.compute_on_sql(
table,
split_by,
execute,
melted,
mode,
)
# pylint: enable=g-long-lambda
def to_sql(table, split_by=None):
return lambda metric: metric.to_sql(table, split_by)
def get_extra_idx(metric):
"""Collects the extra indexes added by Operations for the metric tree.
Args:
metric: A Metric instance.
Returns:
A tuple of column names which are just the index of metric.compute_on(df).
"""
extra_idx = metric.extra_index[:]
children_idx = [
get_extra_idx(c) for c in metric.children if isinstance(c, Metric)
]
if len(set(children_idx)) > 1:
raise ValueError('Incompatible indexes!')
if children_idx:
extra_idx += list(children_idx[0])
return tuple(extra_idx)
def get_global_filter(metric):
"""Collects the filters that can be applied globally to the Metric tree."""
global_filter = sql.Filters()
if metric.where:
global_filter.add(metric.where)
children_filters = [
set(get_global_filter(c))
for c in metric.children
if isinstance(c, Metric)
]
if children_filters:
shared_filter = set.intersection(*children_filters)
global_filter.add(shared_filter)
return global_filter
def is_operation(m):
"""We can't use isinstance because of loop dependancy."""
return isinstance(m, Metric) and m.children and not isinstance(
m, (MetricList, CompositeMetric))
class Metric(object):
"""Core class of Meterstick.
A Metric is defined broadly in Meterstick. It could be a routine metric like
CTR, or an operation like Bootstrap. As long as it taks a DataFrame and
returns a number or a pd.Series, it can be treated as a Metric.
The relations of methods of Metric are
<------------------------------------------------------compute_on-------------------------------------------------------->
<------------------------------compute_through----------------------------> |
| <-------compute_slices-------> | |
| |-> slice1 -> compute | | | |
df -> df.query(where) -> precompute -|-> slice2 -> compute | -> concat -> postcompute -> manipulate -> final_compute -> flush_tmp_cache # pylint: disable=line-too-long
|-> ...
In summary, compute() operates on a slice of data. precompute(),
postcompute(), compute_slices(), compute_through() and final_compute() operate
on the whole data. manipulate() does common data manipulation like melting
and cleaning. Caching is handled in compute_on().
If Metric has children Metrics, then compute_slices is further decomposed to
compute_children() -> compute_on_children(), if they are implemented. For such
Metrics, the decomposition makes the 'mixed' mode of compute_on_sql() simple.
The `mixed` mode computes children in SQL and the rest in Python, so as long
as a compute_children_sql() is implemented and has a similar return to
compute_children(), the compute_on_children() is reused and the `mixed` mode
automatically works.
Depending on your case, you can overwrite most of them, but we suggest you NOT
to overwrite compute_on because it might mess up the caching mechanism. Here
are some rules to help you to decide.
1. If your Metric has no vectorization over slices, overwrite compute(). To
overwrite, you can either create a new class inheriting from Metric or just
pass a lambda function into Metric.
2. If you have vectorization logic over slices, overwrite compute_slices().
See Sum() for an example.
3. As compute() operates on a slice of data, it doesn't have access to the
columns to split_by and the index value of the slice. If you need them, check
out compute_with_split_by(). See Jackknife for a real example.
4. The data passed into manipulate() should be a number, a pd.Series, or a
wide/unmelted pd.DataFrame.
It's possible to cache your result. However, as DataFrame is mutable, it's
slow to hash it (O(shape) complexity). To avoid hashing, for most cases you'd
rely on our MetricList() and CompositeMetric() which we know in one round
of their computation, the DataFrame doesn't change. Or if you have to run
many rounds of computation on the same DataFrame, you can directly assign a
cache_key in compute_on(), then it's your responsibility to ensure
same key always corresponds to the same DataFrame and split_by.
Your Metric shouldn't rely on the index of the input DataFrame. We might
set/reset the index during the computation so put all the information you need
in the columns.
Attributes:
name: Name of the Metric.
children: An iterable of Metric(s) this Metric based upon.
cache: A dict to store cached results.
where: A string or list of strings to be concatenated that will be passed to
df.query() as a prefilter.
precompute: A function. See the workflow chart above for its behavior.
compute: A function. See the workflow chart above for its behavior.
postcompute: A function. See the workflow chart above for its behavior.
compute_slices: A function. See the workflow chart above for its behavior.
final_compute: A function. See the workflow chart above for its behavior.
extra_index: Used by Operation. See the doc there.
computable_in_pure_sql: If the Metric can be completely computed in SQL. For
example, all Models can't.
name_tmpl: Used by Metrics that have children. It's applied to children's
names in the output.
cache_key: The key currently being used in computation.
tmp_cache_keys: The set to track what temporary cache_keys are used during
computation when default caching is enabled. When computation is done, all
the keys in tmp_cache_keys are flushed.
"""
RESERVED_KEY = '_RESERVED'
def __init__(self,
name: Text,
children: Optional[Union['Metric', Sequence[Union['Metric', int,
float]]]] = (),
where: Optional[Union[Text, Sequence[Text]]] = None,
name_tmpl=None,
precompute=None,
compute: Optional[Callable[[pd.DataFrame], Any]] = None,
postcompute=None,
compute_slices=None,
final_compute=None):
self.name = name
self.cache = {}
self.cache_key = None
self.children = [children] if isinstance(children,
Metric) else children or []
if isinstance(where, List):
where = ' and '.join(where)
self.where = where
self.extra_index = []
self.computable_in_pure_sql = True
self.name_tmpl = name_tmpl
if precompute:
self.precompute = precompute
if compute:
self.compute = compute
if postcompute:
self.postcompute = postcompute
if compute_slices:
self.compute_slices = compute_slices
if final_compute:
self.final_compute = final_compute
self.tmp_cache_keys = set()
def compute_with_split_by(self,
df,
split_by: Optional[List[Text]] = None,
slice_value=None):
del split_by, slice_value # In case users need them in derived classes.
return self.compute(df)
def compute_slices(self, df, split_by: Optional[List[Text]] = None):
"""Applies compute() to all slices. Each slice needs a unique cache_key."""
if self.children:
try:
df = self.compute_children(df, split_by + self.extra_index)
return self.compute_on_children(df, split_by)
except NotImplementedError:
pass
if split_by:
# Adapted from http://esantorella.com/2016/06/16/groupby. This is faster
# than df.groupby(split_by).apply(self.compute).
slices = []
result = []
# Different DataFrames need to have different cache_keys. Here as we split
# the df so each slice need to has its own key. And we need to make sure
# the key is recovered so when we continue to compute other Metrics that
# might be vectoriezed, the key we use is the one for the whole df.
for df_slice, slice_i in self.split_data(df, split_by):
cache_key = self.cache_key
slice_i_iter = slice_i if isinstance(slice_i, tuple) else [slice_i]
self.cache_key = self.wrap_cache_key(
cache_key or self.RESERVED_KEY,
slice_val=dict(zip(split_by, slice_i_iter)))
try:
result.append(self.compute_with_split_by(df_slice, split_by, slice_i))
slices.append(slice_i)
finally:
self.cache_key = cache_key
if isinstance(result[0], (pd.Series, pd.DataFrame)):
res = pd.concat(result, keys=slices, names=split_by, sort=False)
else:
if len(split_by) == 1:
ind = | pd.Index(slices, name=split_by[0]) | pandas.Index |
import pandas as pd
import sasoptpy as so
import requests
import os
import time
import random
import string
from subprocess import Popen, DEVNULL
import pathlib
import matplotlib.pyplot as plt
from concurrent.futures import ProcessPoolExecutor
import itertools
def get_data(team_id, gw):
r = requests.get('https://fantasy.premierleague.com/api/bootstrap-static/')
fpl_data = r.json()
element_data = pd.DataFrame(fpl_data['elements'])
team_data = pd.DataFrame(fpl_data['teams'])
elements_team = | pd.merge(element_data, team_data, left_on='team', right_on='id') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 14:39:07 2021
This scripts tests for the (in)dependence between tide and skew surge
@author: acn980
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os,sys,glob
import scipy.stats as sp
import statsmodels.api as sm
sys.path.insert(0,r'E:\github\seasonality_risk\Functions')
from Functions_HCMC import get_skew_surge
#%%
save = False
fn_trunk = 'E:/surfdrive/Documents'
fn = os.path.join(fn_trunk, 'Master2019\Thomas\data\matlab_csv')
fn_files = 'Master2019/Thomas/data'
fn2 = os.path.join(fn_trunk,fn_files)
#%% We import the total water level and tide to obtain the high tide and skew surge
#We import the tide
fn_tide = os.path.join(fn,'Tide_WACC_VungTau_Cleaned_Detrended_Strict_sel_const.csv')
date_parser = lambda x: pd.datetime.strptime(x, "%d-%m-%Y %H:%M:%S")
tide = | pd.read_csv(fn_tide, parse_dates = True, date_parser= date_parser, index_col = 'Date') | pandas.read_csv |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = | pd.DataFrame.from_dict(dicGradB) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import glob
import os
class ddict(dict):
def __missing__(self, key):
return key
# Read in the MNEc Annotations
# Note: this file can be found here: https://github.com/schae234/PonyTools/blob/master/ponytools/data/MNEc2M_Annotation.csv.gz
info = | pd.read_csv("MNEc2M_Annotation.csv.gz",sep=',') | pandas.read_csv |
"""
Plotting code for GPU hardware metrics (i.e., SM occupancy, SM efficiency),
and miscellaneous experiments with GPU utilization.
"""
from rlscope.profiler.rlscope_logging import logger
import argparse
import traceback
import bdb
import copy
import re
import sys
import itertools
import os
import csv
import textwrap
import pprint
import math
from io import StringIO
import json
import codecs
import pandas as pd
from rlscope.parser.plot_utils import setup_matplotlib
setup_matplotlib()
import matplotlib
import matplotlib.ticker
# matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import join as _j, abspath as _a, dirname as _d, exists as _e, basename as _b
from rlscope.profiler.util import pprint_msg
from rlscope.parser.stacked_bar_plots import get_x_env, get_x_algo, xfields_from_xtick_expression, get_capsize, OverlapStackedBarPlot, add_repetition, group_numeric_cols
from rlscope.parser.dataframe import UtilDataframeReader, RLScopeConfig
from rlscope import py_config
from rlscope.parser.common import *
from rlscope.parser import constants
from rlscope.parser.plot_utils import is_pdf, pdf2png
from rlscope.py_config import yes_as_bool
from typing import *
class IMLInvaidArgument(Exception):
pass
def maybe_number(x):
if type(x) != str:
return x
try:
num = int(x)
return num
except ValueError:
pass
try:
num = float(x)
return num
except ValueError:
pass
return x
def parse_filename_attrs(
path : str,
file_prefix : str,
file_suffix : str,
attrs : Iterable[str],
dflt_attrs : Optional[Dict[str, Any]] = None):
attr_name_regex = r'(?:{regex})'.format(
regex='|'.join(sorted(attrs, key=lambda attr: (-1*len(attr), attr)))
)
attr_string_regex = r'(?P<attr_name>{attr_name})_(?P<attr_value>[^\.]*)'.format(
attr_name=attr_name_regex
)
# e.g.
# path = 'GPUHwCounterSampler.thread_blocks_68.thread_block_size_1024.csv'
# e.g.
# ['GPUHwCounterSampler', 'thread_blocks_68', 'thread_block_size_1024', 'csv']
components = re.split(r'\.', _b(path))
assert components[0] == file_prefix
assert components[-1] == file_suffix
attr_strings = components[1:len(components)-1]
attr_vals = dict()
if dflt_attrs is not None:
attr_vals.update(dflt_attrs)
for attr_string in attr_strings:
m = re.fullmatch(attr_string_regex, attr_string)
if not m:
raise RuntimeError(f"""
Not sure how to parse attribute name/value from \"{attr_string}\" found in {_b(path)}.
Attributes we recognize = {attrs}
""")
attr_vals[m.group('attr_name')] = m.group('attr_value')
return attr_vals
def parse_path_attrs(
path : str,
attrs : Iterable[str],
dflt_attrs : Optional[Dict[str, Any]] = None,
attr_types : Optional[Dict[str, Any]] = None,
debug : bool = False,
):
attr_name_regex = r'(?:{regex})'.format(
regex='|'.join(sorted(attrs, key=lambda attr: (-1*len(attr), attr)))
)
attr_string_regex = r'(?P<attr_name>{attr_name})_(?P<attr_value>[^\.]*)\b'.format(
attr_name=attr_name_regex
)
# e.g.
# path = 'GPUHwCounterSampler.thread_blocks_68.thread_block_size_1024.csv'
if debug:
logger.info(f"attr_name_regex = {attr_name_regex}")
attr_vals = dict()
if dflt_attrs is not None:
attr_vals.update(dflt_attrs)
path_components = os.path.split(path)
for path_component in path_components:
# e.g.
# ['GPUHwCounterSampler', 'thread_blocks_68', 'thread_block_size_1024', 'csv']
attr_strings = re.split(r'\.', path_component)
for attr_string in attr_strings:
m = re.search(attr_string_regex, attr_string)
if m:
value = m.group('attr_value')
attr_name = m.group('attr_name')
if attr_types is not None and attr_name in attr_types:
value = attr_types[attr_name](value)
attr_vals[attr_name] = value
# if not m:
# raise RuntimeError(f"""
# Not sure how to parse attribute name/value from \"{attr_string}\" found in {path}.
# Attributes we recognize = {attrs}
# """)
missing_attrs = set(attrs).difference(attr_vals.keys())
if len(missing_attrs) > 0:
raise RuntimeError(f"""
Couldn't find all required attributes in {path}.
Attributes we are missing = {missing_attrs}
""")
return attr_vals
METRIC_NAME_CUPTI_TO_PROF = {
# Deprecated CUPTI metric API -- achieved_occupancy:
# Id = 1205
# Shortdesc = Achieved Occupancy
# Longdesc = Ratio of the average active warps per active cycle to the maximum number of warps supported on a multiprocessor
'achieved_occupancy': "sm__warps_active.avg.pct_of_peak_sustained_active",
# Deprecated CUPTI metric API -- sm_efficiency:
# Id = 1203
# Shortdesc = Multiprocessor Activity
# Longdesc = The percentage of time at least one warp is active on a multiprocessor averaged over all multiprocessors on the GPU
# See CUPTI documentation for mapping to new "Profiling API" metric name:
# https://docs.nvidia.com/cupti/Cupti/r_main.html#metrics_map_table_70
'sm_efficiency': "smsp__cycles_active.avg.pct_of_peak_sustained_elapsed",
# Deprecated CUPTI metric API -- inst_executed:
# Metric# 90
# Id = 1290
# Name = inst_executed
# Shortdesc = Instructions Executed
# Longdesc = The number of instructions executed
'inst_executed': "smsp__inst_executed.sum",
# Deprecated CUPTI metric API -- active_cycles:
# Event# 25
# Id = 2629
# Name = active_cycles
# Shortdesc = Active cycles
# Longdesc = Number of cycles a multiprocessor has at least one active warp.
# Category = CUPTI_EVENT_CATEGORY_INSTRUCTION
'active_cycles': "sm__cycles_active.sum",
# Deprecated CUPTI metric API -- active_warps:
# Event# 26
# Id = 2630
# Name = active_warps
# Shortdesc = Active warps
# Longdesc = Accumulated number of active warps per cycle. For every cycle it increments by the number of active warps in the cycle which can be in the range 0 to 64.
# Category = CUPTI_EVENT_CATEGORY_INSTRUCTION
'active_warps': "sm__warps_active.sum",
# Deprecated CUPTI metric API -- elapsed_cycles_sm:
# Event# 33
# Id = 2193
# Name = elapsed_cycles_sm
# Shortdesc = Elapsed clocks
# Longdesc = Elapsed clocks
# Category = CUPTI_EVENT_CATEGORY_INSTRUCTION
'elapsed_cycles_sm': "sm__cycles_elapsed.sum",
}
PROF_TO_METRIC_NAME_CUPTI = dict((v, k) for k, v in METRIC_NAME_CUPTI_TO_PROF.items())
# HACK: number of total SMs on the RTX 2080 GPU on the "eco" cluster machines
NUM_SMS = 68
SM_OCCUPANCY_TITLE = "SM occupancy: average percent of warps\nthat are in use within an SM"
SM_EFFICIENCY_TITLE = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
SM_EFFICIENCY_Y_LABEL = f"SM efficiency (%)\n# SMs = {NUM_SMS}"
SM_OCCUPANCY_Y_LABEL = "SM occupancy (%)\nmax threads per block = 1024"
SAMPLE_THROUGHPUT_Y_LABEL = "Throughput (samples/second)"
SAMPLE_LATENCY_Y_LABEL = "Minibatch latency (ms)"
CUPTI_METRIC_Y_LABEL = {
'sm_efficiency': SM_EFFICIENCY_Y_LABEL,
'achieved_occupancy': SM_OCCUPANCY_Y_LABEL,
}
CUPTI_METRIC_Y_LABEL_SHORT = {
'sm_efficiency': "SM efficiency (%)",
'achieved_occupancy': "SM occupancy (%)",
}
TRT_METRIC_YLABELS = {
'host_latency_throughput_qps': SAMPLE_THROUGHPUT_Y_LABEL,
'gpu_compute_mean_ms': "Mean GPU compute time (ms)",
'gpu_compute_percentile_99_ms': "99%-tile GPU compute time (ms)",
}
BATCH_SIZE_X_LABEL = "Batch size"
STREAMS_X_LABEL = "# of CUDA streams"
SIMULATOR_X_LABEL = "Simulator"
STEP_THROUGHPUT_Y_LABEL = "Simulation throughput (samples/sec)"
STEP_LATENCY_Y_LABEL = "Simulation latency (ms)"
RLSCOPE_X_LABEL = "(RL algorithm, Simulator)"
SM_ID_X_LABEL = f"SM ID\n# SMs = {NUM_SMS}"
GPU_UTIL_EXPERIMENT_ATTRS = {
'thread_blocks',
'thread_block_size',
'n_launches',
'iterations',
'num_threads',
'processes',
'hw_counters',
}
GPU_UTIL_EXPERIMENT_ATTR_TYPES = {
'thread_blocks': maybe_number,
'thread_block_size': maybe_number,
'n_launches': maybe_number,
'iterations': maybe_number,
'num_threads': maybe_number,
'processes': yes_as_bool,
'hw_counters': yes_as_bool,
}
MULTI_TASK_ATTRS = set(GPU_UTIL_EXPERIMENT_ATTRS)
MULTI_TASK_ATTRS.update({
## From directory attrs
# 'thread_blocks',
# 'thread_block_size',
# 'n_launches',
# 'iterations',
# 'num_threads',
'iterations_per_sched_sample',
# 'processes',
# 'hw_counters',
## GPUComputeSchedInfoKernel.thread_id_9.stream_id_9.trace_id_0.json
'thread_id',
'stream_id',
'trace_id',
})
MULTI_TASK_JSON_ATTRS = {
## From contents of: GPUComputeSchedInfoKernel.thread_id_9.stream_id_9.trace_id_0.json
"globaltimer_ns",
"kernel_id",
"lane_id",
"sm_id",
"stream_id",
"warp_id",
}
MULTI_TASK_ATTR_TYPES = dict(GPU_UTIL_EXPERIMENT_ATTR_TYPES)
MULTI_TASK_ATTR_TYPES.update({
## From directory attrs
# 'thread_blocks',
# 'thread_block_size',
# 'n_launches',
# 'iterations',
# 'num_threads',
'iterations_per_sched_sample': maybe_number,
# 'processes',
# 'hw_counters',
## GPUComputeSchedInfoKernel.thread_id_9.stream_id_9.trace_id_0.json
'thread_id': maybe_number,
'stream_id': maybe_number,
'trace_id': maybe_number,
})
MULTI_TASK_RAW_ATTR_TYPES = dict(MULTI_TASK_ATTR_TYPES)
MULTI_TASK_RAW_ATTR_TYPES.update({
'num_sms': maybe_number,
'sms_allocated': maybe_number,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': maybe_number,
})
# MULTI_TASK_RAW_ATTR_DFLTS = dict(MULTI_TASK)
MULTI_TASK_RAW_ATTR_DFLTS = {
'num_sms': None,
'sms_allocated': None,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': None,
}
MULTI_TASK_RAW_ATTRS = MULTI_TASK_ATTRS.union(MULTI_TASK_RAW_ATTR_TYPES.keys()).difference({
'stream_id',
'thread_id',
'trace_id',
})
# suffix=".num_sms_${NUM_SMS}.sms_allocated_${sms_allocated}.CUDA_MPS_ACTIVE_THREAD_PERCENTAGE_${CUDA_MPS_ACTIVE_THREAD_PERCENTAGE}"
# all_cycles:
# the metric is computed over all cycles on the GPU, including cycles where the GPU
# is idle and not executing any kernels.
# active_cycles:
# the metric is computed over active GPU cycles.
# Measurement periods where the GPU is idle result in a metric value of "0".
MEASUREMENT_PERIOD_ACTIVE_CYCLES = 'active_cycles'
MEASUREMENT_PERIOD_ALL_CYCLES = 'all_cycles'
CUPTI_METRIC_MEASUREMENT_PERIOD = {
'achieved_occupancy': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'sm_efficiency': MEASUREMENT_PERIOD_ALL_CYCLES,
'inst_executed': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'active_cycles': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'active_warps': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'elapsed_cycles_sm': MEASUREMENT_PERIOD_ALL_CYCLES,
}
FLOAT_RE = r'(?:[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)'
UNIT_RE = r'(?:\b(?:ms|s|qps)\b)'
class TrtexecExperiment:
def __init__(self, args):
self.args = args
def run(self):
self.read_df()
self.plot_df()
def read_df(self):
self._read_trtexec_df()
self._read_tf_inference_df()
self._read_simulator_df()
self._read_mps_df()
"""
TODO: merge trtexec_df and tf_inference_df
trtexec_field tf_inference_field
host_latency_throughput_qps throughput_qps
"""
def plot_df(self):
"""
Plot trtexec7 experiments.
:return:
"""
"""
batch_size = 1, 8, 16, 32, 64
streams = 1
plot:
throughput
sm_efficiency
sm_occupancy
"""
def _plot_batch_size_vs(streams, suffix=None):
self._plot_batch_size_vs_throughput(
title="Throughput with increasing batch size",
streams=streams,
suffix=suffix)
def filter_tensorflow(plot_df):
plot_df = plot_df[plot_df['config'] == 'TF']
return plot_df
self._plot_batch_size_vs_throughput(
title="Throughput with increasing batch size",
streams=streams,
filter_df=filter_tensorflow,
suffix=f"{or_empty(suffix)}.just_tensorflow")
self._plot_batch_size_vs_metric(
title=SM_EFFICIENCY_TITLE,
cupti_metric='sm_efficiency',
streams=streams,
suffix=suffix)
self._plot_batch_size_vs_metric(
title=SM_OCCUPANCY_TITLE,
cupti_metric='achieved_occupancy',
streams=streams,
suffix=suffix)
_plot_batch_size_vs(streams=1)
def _plot_streams_vs(batch_size, suffix=None):
def _title(title):
return f"{title}:\n(batch size = {batch_size})"
trt_metric_title = {
'host_latency_throughput_qps': _title("Throughput with increasing streams"),
'gpu_compute_mean_ms': _title("Mean GPU compute time with increasing streams"),
'gpu_compute_percentile_99_ms': _title("99-%tile GPU compute time with increasing streams"),
}
cuda_graph_dict = {
'host_latency_throughput_qps': None,
'gpu_compute_mean_ms': None,
'gpu_compute_percentile_99_ms': None,
}
for trt_metric in trt_metric_title.keys():
self._plot_streams_vs_trt_metric(
trt_metric, batch_size,
title=trt_metric_title[trt_metric],
cuda_graph=cuda_graph_dict.get(trt_metric, None))
# self._plot_streams_vs_throughput(
# title="Throughput with increasing streams\n(batch size = {batch_size})".format(batch_size=batch_size),
# batch_size=batch_size,
# suffix=suffix)
self._plot_streams_vs_metric(
# title="Throughput with increasing streams\n(batch size = {batch_size})".format(batch_size=batch_size),
title=SM_EFFICIENCY_TITLE,
cupti_metric='sm_efficiency',
batch_size=batch_size,
suffix=suffix)
self._plot_streams_vs_metric(
# title="Throughput with increasing streams\n(batch size = {batch_size})".format(batch_size=batch_size),
title=SM_OCCUPANCY_TITLE,
cupti_metric='achieved_occupancy',
batch_size=batch_size,
suffix=suffix)
"""
batch_size = 1
streams = 1, 2, 3, ..., 8
plot:
throughput
sm_efficiency
sm_occupancy
"""
_plot_streams_vs(batch_size=1)
if self.trtexec_df is not None:
"""
batch_size = (best batch size for streams == 1)
streams = 1, 2, 3, ..., 8
plot:
throughput
sm_efficiency
sm_occupancy
"""
best_batch_size = self._compute_best_batch_size()
_plot_streams_vs(batch_size=best_batch_size, suffix='.best_batch_size')
self._plot_simulator_vs_steptime()
self._plot_simulator_vs_throughput()
def _plot_multiprocess_inference(df, throughput_title=None, inference_title=None, filter_df=None, suffix=None):
# if throughput_title is None:
# throughput_title = 'Increasing inference throughput when slicing SMs with CUDA MPS processes'
# if inference_title is None:
# inference_title = 'Inference latency when slicing SMs with CUDA MPS processes'
self._plot_mps_batch_size_vs_metric_by_num_tasks(
df=self.mps_df,
metric='throughput_qps',
title=throughput_title,
xlabel=BATCH_SIZE_X_LABEL,
ylabel=SAMPLE_THROUGHPUT_Y_LABEL,
filter_df=filter_df,
suffix=suffix,
global_ymax=True,
)
self._plot_mps_batch_size_vs_metric_by_num_tasks(
df=self.mps_raw_df,
metric='inference_time_ms',
title=inference_title,
xlabel=BATCH_SIZE_X_LABEL,
ylabel=SAMPLE_LATENCY_Y_LABEL,
filter_df=filter_df,
suffix=suffix,
global_ymax=False,
)
"""
3 different graphs for multi-process experiment:
- Multi-process (CPU) / config_cpu
row['cpu']
assert not row['mps']
- Multi-process MPS (GPU) / config_mps_gpu_evenly
row['mps'] and row['sm_alloc_strategy'] == 'evenly'
assert not row['cpu']
- Multi-process MPS (GPU) / config_mps_gpu_evenly_x2
row['mps'] and row['sm_alloc_strategy'] == 'evenly_x2'
assert not row['cpu']
- Multi-process (GPU, no MPS) / config_gpu
not row['mps'] and not row['cpu']
"""
def is_config_cpu(row):
is_cpu = row['cpu']
if is_cpu:
assert not row['mps']
return is_cpu
# def is_config_mps_gpu_evenly(row):
# is_mps = row['mps']
# if is_mps:
# assert not row['cpu']
# return is_mps and row['sm_alloc_strategy'] == 'evenly'
#
# def is_config_mps_gpu_evenly_x2(row):
# is_mps = row['mps']
# if is_mps:
# assert not row['cpu']
# return is_mps and row['sm_alloc_strategy'] == 'evenly_x2'
def is_config_mps_gpu(row):
is_mps = row['mps']
if is_mps:
assert not row['cpu']
return is_mps
def is_config_gpu(row):
return not row['mps'] and not row['cpu']
def as_row_filter_func(is_config):
def row_filter_func(df):
df = df[df.apply(is_config, axis=1)]
return df
return row_filter_func
# throughput_ymax = self.mps_df['']
sm_alloc_strategies = self.mps_df[self.mps_df['mps']]['sm_alloc_strategy'].unique().tolist()
for sm_alloc_strategy in sm_alloc_strategies:
def _is_config(row):
return is_config_mps_gpu(row) and row['sm_alloc_strategy'] == sm_alloc_strategy
_plot_multiprocess_inference(
self.mps_df,
throughput_title='Inference throughput:\nmulti-process TF scripts (GPU) + CUDA MPS',
inference_title='Inference latency:\nmulti-process TF scripts (GPU) + CUDA MPS',
filter_df=as_row_filter_func(_is_config),
suffix=f".config_mps_gpu_{sm_alloc_strategy}")
# _plot_multiprocess_inference(self.mps_df, filter_df=as_row_filter_func(is_config_mps_gpu_evenly), suffix='.config_mps_gpu_evenly')
# _plot_multiprocess_inference(self.mps_df, filter_df=as_row_filter_func(is_config_mps_gpu_evenly_x2), suffix='.config_mps_gpu_evenly_x2')
_plot_multiprocess_inference(
self.mps_df,
throughput_title='Inference throughput:\nmulti-process TF scripts (CPU)',
inference_title='Inference latency:\nmulti-process TF scripts (CPU)',
filter_df=as_row_filter_func(is_config_cpu),
suffix='.config_cpu')
_plot_multiprocess_inference(
self.mps_df,
throughput_title='Inference throughput:\nmulti-process TF scripts (GPU)',
inference_title='Inference latency:\nmulti-process TF scripts (GPU)',
filter_df=as_row_filter_func(is_config_gpu),
suffix='.config_gpu')
def _compute_best_batch_size(self):
df = self.trtexec_df[self.trtexec_df['streams'] == 1]
max_throughput = df['host_latency_throughput_qps'].max()
batch_sizes = df[df['host_latency_throughput_qps'] == max_throughput]['batch_size'].unique()
assert len(batch_sizes) == 1
best_batch_size = batch_sizes[0]
return best_batch_size
def _plot_streams_vs_metric(self, title, cupti_metric, batch_size, ylabel=None, suffix=None):
if self.trtexec_gpu_hw_df is None:
return
df = copy.copy(self.trtexec_gpu_hw_df)
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
df = df[df['batch_size'] == batch_size]
df = keep_cupti_metric(df, cupti_metric)
add_gpu_hw_fields(df)
df = self._add_config(df, df_type='trtexec')
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
sns.set(style="whitegrid")
# df = df[["thread_blocks", "metric_value", "num_threads"]]
g = sns.catplot(x="streams", y="metric_value",
hue="config",
data=df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
if ylabel is None:
ylabel = CUPTI_METRIC_Y_LABEL[cupti_metric]
g.set_ylabels(ylabel)
g.set_xlabels(STREAMS_X_LABEL)
# title = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_{cupti_metric}.batch_size_{batch_size}{suffix}.svg'))
def _plot_batch_size_vs_metric(self, title, cupti_metric, streams, ylabel=None, suffix=None):
if self.trtexec_gpu_hw_df is None:
return
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
plot_df = pd.DataFrame(columns=['batch_size', 'metric_value', 'config'])
if self.trtexec_gpu_hw_df is not None:
df = copy.copy(self.trtexec_gpu_hw_df)
df = df[df['streams'] == streams]
df = keep_cupti_metric(df, cupti_metric)
add_gpu_hw_fields(df)
df = self._add_config(df, df_type='trtexec')
plot_df = plot_df.append(df[plot_df.columns])
if self.tf_inference_gpu_hw_df is not None:
df = copy.copy(self.tf_inference_gpu_hw_df)
df = df[df['range_name'] == 'inference_loop/inference']
df = keep_cupti_metric(df, cupti_metric)
add_gpu_hw_fields(df)
df = self._add_config(df, df_type='tf_inference')
plot_df = plot_df.append(df[plot_df.columns])
plot_df.sort_values(by=['config', 'batch_size'], inplace=True)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
sns.set(style="whitegrid")
# df = df[["thread_blocks", "metric_value", "num_threads"]]
g = sns.catplot(x="batch_size", y="metric_value",
hue="config",
data=plot_df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
if ylabel is None:
ylabel = CUPTI_METRIC_Y_LABEL[cupti_metric]
g.set_ylabels(ylabel)
g.set_xlabels(BATCH_SIZE_X_LABEL)
# title = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(plot_df, _j(self.args['trtexec_dir'], f'batch_size_vs_{cupti_metric}.streams_{streams}{suffix}.svg'))
def _plot_streams_vs_trt_metric(self, trt_metric, batch_size, title=None, ylabel=None, alias=None, cuda_graph=None, suffix=None):
if self.trtexec_df is None:
return
if alias is None:
alias = trt_metric
df = copy.copy(self.trtexec_df)
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
df = df[df['batch_size'] == batch_size]
# df = keep_cupti_metric(df, cupti_metric)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
df = self._add_config(df, df_type='trtexec')
sns.set(style="whitegrid")
plot_kwargs = dict(
x="streams",
y=trt_metric,
kind="bar",
palette="muted",
)
if cuda_graph is None:
plot_kwargs.update(dict(
hue="config",
))
elif cuda_graph:
df = df[df['cuda_graph']]
else:
df = df[~ df['cuda_graph']]
plot_kwargs.update(dict(
data=df,
))
g = sns.catplot(**plot_kwargs)
g.despine(left=True)
if ylabel is None:
ylabel = TRT_METRIC_YLABELS[trt_metric]
g.set_ylabels(ylabel)
# if xlabel is not None:
g.set_xlabels(STREAMS_X_LABEL)
if title is not None:
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
ss = StringIO()
if cuda_graph is None:
pass
elif cuda_graph:
ss.write(f".cuda_graph_yes")
else:
ss.write(f".cuda_graph_no")
if suffix is not None:
ss.write(f".{suffix}")
ss = ss.getvalue()
save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_{alias}.batch_size_{batch_size}{ss}.svg'))
def _plot_mps_batch_size_vs_metric_by_num_tasks(self, df, metric, title=None, xlabel=None, ylabel=None, filter_df=None, suffix=None, global_ymax=False):
"""
Throughput graph:
Y-axis = throughput
X-axis (major) = batch-size (larger impact on throughput)
X-axis (minor) = num_tasks (lesser impact on throughput)
Latency graph:
Y-axis = latency samples (mean/std across all processes)
X-axis (major) = batch-size (larger impact on latency)
X-axis (minor) = num_tasks (lesser impact on latency)
"""
if df is None:
return
df = copy.copy(df)
assert metric in df
# df = self._add_config(df, df_type='trtexec')
global_df = df
if filter_df is not None:
df = filter_df(df)
sns.set(style="whitegrid")
g = sns.catplot(x="batch_size",
y=metric,
# data=df,
hue="config",
data=df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
if ylabel is not None:
g.set_ylabels(ylabel)
if xlabel is not None:
g.set_xlabels(xlabel)
if title is not None:
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if global_ymax:
new_ymax = global_df[metric].max()
ymin, ymax = g.ax.get_ylim()
g.ax.set_ylim((ymin, max(ymax, new_ymax)))
if suffix is None:
suffix = ""
save_plot(df, _j(self.args['mps_dir'], f'mps_batch_size_vs_{metric}_by_num_tasks{suffix}.svg'))
def _plot_streams_vs_throughput(self, title, batch_size, suffix=None):
if self.trtexec_df is None:
return
df = copy.copy(self.trtexec_df)
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
df = df[df['batch_size'] == batch_size]
# df = keep_cupti_metric(df, cupti_metric)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
df = self._add_config(df, df_type='trtexec')
sns.set(style="whitegrid")
g = sns.catplot(x="streams", y="host_latency_throughput_qps",
# data=df,
hue="config", data=df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
g.set_ylabels(SAMPLE_THROUGHPUT_Y_LABEL)
g.set_xlabels(STREAMS_X_LABEL)
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_throughput.batch_size_{batch_size}{suffix}.svg'))
def _add_config(self, df, df_type):
assert df_type in {'trtexec', 'tf_inference'}
if df_type == 'trtexec':
def _config(row):
if row['cuda_graph']:
return 'TensorRT - CUDA graph ON'
return 'TensorRT'
df['config'] = df.apply(_config, axis=1)
elif df_type == 'tf_inference':
def _config(row):
if row['xla']:
return 'TF - XLA ON'
return 'TF'
df['config'] = df.apply(_config, axis=1)
else:
raise NotImplementedError()
return df
def _plot_batch_size_vs_throughput(self, title, streams, filter_df=None, suffix=None):
if self.trtexec_df is None:
return
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
plot_df = pd.DataFrame(columns=['batch_size', 'throughput_qps', 'config'])
if self.trtexec_df is not None:
df = copy.copy(self.trtexec_df)
df = df[df['streams'] == streams]
df.rename(columns={
'host_latency_throughput_qps': 'throughput_qps',
}, inplace=True)
df = self._add_config(df, df_type='trtexec')
plot_df = plot_df.append(df[plot_df.columns])
if self.tf_inference_result_df is not None:
df = copy.copy(self.tf_inference_result_df)
df = self._add_config(df, df_type='tf_inference')
plot_df = plot_df.append(df[plot_df.columns])
plot_df.sort_values(by=['config', 'batch_size'], inplace=True)
if filter_df is not None:
plot_df = filter_df(plot_df)
# df = keep_cupti_metric(df, cupti_metric)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
sns.set(style="whitegrid")
# df = df[["thread_blocks", "metric_value", "num_threads"]]
g = sns.catplot(x="batch_size", y="throughput_qps",
# data=df,
hue="config", data=plot_df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
g.set_ylabels(SAMPLE_THROUGHPUT_Y_LABEL)
g.set_xlabels(BATCH_SIZE_X_LABEL)
# title = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(plot_df, _j(self.args['trtexec_dir'], f'batch_size_vs_throughput.streams_{streams}{suffix}.svg'))
def parse_trtexec_logs_as_df(self, logs):
def each_field_value(log):
for section in log:
for attr, value in log[section].items():
field = f"{section}_{attr}"
yield field, value
all_fields = set()
if len(logs) > 0:
all_fields = set([field for field, value in each_field_value(logs[0])])
data = dict()
for log in logs:
for field, value in each_field_value(log):
if field not in all_fields:
raise RuntimeError(f"Saw unexpected field={field}; expected one of {all_fields}")
if field not in data:
data[field] = []
data[field].append(value)
df = pd.DataFrame(data)
return df
def parse_trtexec_log(self, trtexec_log_path):
"""
{
'host_latency': {
'min_ms': 0.123,
'mean_ms': 0.123,
...
}
}
:param trtexec_log_path:
:return:
"""
with open(trtexec_log_path) as f:
section = None
data = dict()
def strip_log_prefix(line):
line = re.sub(r'^\[[^\]]+\]\s+\[I\]\s+', '', line)
return line
def as_attr(section):
attr = section
attr = re.sub(' ', '_', attr)
attr = attr.lower()
return attr
def parse_section(line):
m = re.search(r'(?P<section>Host Latency|GPU Compute|Enqueue Time)$', line, flags=re.IGNORECASE)
if m:
section = as_attr(m.group('section'))
return section
return None
def parse_e2e_metric(line):
# NOTE: end-to-end is the time = endOutput - startInput
# non-end-to-end = (endInput + startInput) + (endCompute + startCompute) + (endOutput + startOutput)
# So, "end-to-end" will include some time spent host-side, whereas non-end-to-end just includes time spent GPU side
# (the transfers, the kernel running).
m = re.search(r'(?P<name>min|max|mean|median): (?P<value>{float}) {unit} \(end to end (?P<e2e_value>{float}) (?P<unit>{unit})\)'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
# Just ignore this value...
value = float(m.group('value'))
e2e_value = float(m.group('e2e_value'))
name = "{name}_{unit}".format(name=m.group('name'), unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': e2e_value,
}
return None
def parse_metric_with_unit(line):
m = re.search(r'(?P<name>[a-zA-Z][a-zA-Z ]+): (?P<value>{float}) (?P<unit>{unit})'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
value = float(m.group('value'))
name = "{name}_{unit}".format(name=m.group('name'), unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': value,
}
return None
def parse_percentile(line):
m = re.search(r'(?P<name>percentile): (?P<value>{float}) (?P<unit>{unit}) at (?P<percent>\d+)%'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
value = float(m.group('value'))
name = "{name}_{percent}_{unit}".format(
name=m.group('name'),
percent=m.group('percent'),
unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': value,
}
return None
def parse_e2e_percentile(line):
m = re.search(r'(?P<name>percentile): [^(]+\(end to end (?P<value>{float}) (?P<unit>{unit}) at (?P<percent>\d+)%\)'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
value = float(m.group('value'))
name = "{name}_{percent}_{unit}".format(
name=m.group('name'),
percent=m.group('percent'),
unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': value,
}
return None
def _add_parsed_value(dic):
if section not in data:
data[section] = dict()
data[section][dic['name']] = dic['value']
for lineno, line in enumerate(f, start=1):
line = line.rstrip()
ret = parse_section(line)
if ret:
section = ret
continue
if section is None:
continue
line = strip_log_prefix(line)
ret = parse_e2e_metric(line)
if ret:
_add_parsed_value(ret)
continue
ret = parse_e2e_percentile(line)
if ret:
_add_parsed_value(ret)
continue
ret = parse_percentile(line)
if ret:
_add_parsed_value(ret)
continue
ret = parse_metric_with_unit(line)
if ret:
_add_parsed_value(ret)
continue
if self.debug:
logger.info("Skip {path}:{lineno}: {line}".format(
path=trtexec_log_path,
lineno=lineno,
line=line,
))
return data
@property
def debug(self):
return self.args['debug']
def _read_mps_df(self):
self.mps_df = None
self.mps_raw_df = None
if self.args['mps_dir'] is None:
return
"""
/home/jgleeson/clone/rlscope/output/microbench_inference_multiprocess/batch_size_128.num_tasks_1.env_id_BreakoutNoFrameskip-v4.num_sms_68.sms_allocated_68.CUDA_MPS_ACTIVE_THREAD_PERCENTAGE_100.0
"""
mps_dflt_attrs = {
'num_sms': None,
'sms_allocated': None,
'sm_alloc_strategy': None,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': None,
}
mps_attr_types = {
'mps': yes_as_bool,
'cpu': yes_as_bool,
'batch_size': maybe_number,
'num_tasks': maybe_number,
'env_id': str,
'num_sms': maybe_number,
'sms_allocated': maybe_number,
'sm_alloc_strategy': str,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': maybe_number,
}
mps_attrs = set(mps_attr_types.keys())
dfs = []
raw_dfs = []
for path in each_file_recursive(self.args['mps_dir']):
if not re.search(r'^mode_microbench_inference_multiprocess\.merged\.json$', _b(path)):
continue
js = load_json(path)
df = pd.DataFrame(
dict((k, [v]) for k, v in js['summary_metrics'].items())
)
attr_dict = parse_path_attrs(
path,
mps_attrs,
mps_dflt_attrs,
mps_attr_types,
)
for attr_name, attr_value in attr_dict.items():
df[attr_name] = attr_value
dfs.append(df)
# Q: Should we discard outliers...?
raw_df = pd.DataFrame(data=js['raw_samples'])
for attr_name, attr_value in attr_dict.items():
raw_df[attr_name] = attr_value
raw_dfs.append(raw_df)
self.mps_df = pd.concat(dfs)
self.mps_raw_df = | pd.concat(raw_dfs) | pandas.concat |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
"AAA",
Timestamp("2011-01-03 10:00", tz=tz),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00", tz=tz),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# filling with a naive/other zone, coerce to object
result = ser.fillna(Timestamp("20130101"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
def test_fillna_dt64tz_with_method(self):
# with timezone
# GH#15855
ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="pad"), exp)
ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="bfill"), exp)
def test_fillna_pytimedelta(self):
# GH#8209
ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])
result = ser.fillna(timedelta(1))
expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_fillna_period(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
res = ser.fillna(Period("2012-01", freq="M"))
exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")])
tm.assert_series_equal(res, exp)
assert res.dtype == "Period[M]"
def test_fillna_dt64_timestamp(self, frame_or_series):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
obj = frame_or_series(ser)
# reg fillna
result = obj.fillna(Timestamp("20130104"))
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130104"),
Timestamp("20130103 9:01:01"),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = obj
tm.assert_equal(result, expected)
def test_fillna_dt64_non_nao(self):
# GH#27419
ser = Series([Timestamp("2010-01-01"), NaT, Timestamp("2000-01-01")])
val = np.datetime64("1975-04-05", "ms")
result = ser.fillna(val)
expected = Series(
[Timestamp("2010-01-01"), Timestamp("1975-04-05"), Timestamp("2000-01-01")]
)
tm.assert_series_equal(result, expected)
def test_fillna_numeric_inplace(self):
x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
y = x.copy()
return_value = y.fillna(value=0, inplace=True)
assert return_value is None
expected = x.fillna(value=0)
tm.assert_series_equal(y, expected)
# ---------------------------------------------------------------
# CategoricalDtype
@pytest.mark.parametrize(
"fill_value, expected_output",
[
("a", ["a", "a", "b", "a", "a"]),
({1: "a", 3: "b", 4: "b"}, ["a", "a", "b", "b", "b"]),
({1: "a"}, ["a", "a", "b", np.nan, np.nan]),
({1: "a", 3: "b"}, ["a", "a", "b", "b", np.nan]),
(Series("a"), ["a", np.nan, "b", np.nan, np.nan]),
(Series("a", index=[1]), ["a", "a", "b", np.nan, np.nan]),
(Series({1: "a", 3: "b"}), ["a", "a", "b", "b", np.nan]),
(Series(["a", "b"], index=[3, 4]), ["a", np.nan, "b", "a", "b"]),
],
)
def test_fillna_categorical(self, fill_value, expected_output):
# GH#17033
# Test fillna for a Categorical series
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b"]))
exp = Series(Categorical(expected_output, categories=["a", "b"]))
result = ser.fillna(fill_value)
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"fill_value, expected_output",
[
(Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]),
(Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]),
(
Series(
Categorical(
["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"]
)
),
["a", "d", "b", "d", "a"],
),
],
)
def test_fillna_categorical_with_new_categories(self, fill_value, expected_output):
# GH#26215
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b", "c", "d", "e"]))
exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"]))
result = ser.fillna(fill_value)
tm.assert_series_equal(result, exp)
def test_fillna_categorical_raises(self):
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b"]))
cat = ser._values
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(TypeError, match=msg):
ser.fillna("d")
msg2 = "Length of 'value' does not match."
with pytest.raises(ValueError, match=msg2):
cat.fillna(Series("d"))
with pytest.raises(TypeError, match=msg):
ser.fillna({1: "d", 3: "a"})
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
with pytest.raises(TypeError, match=msg):
ser.fillna(["a", "b"])
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
with pytest.raises(TypeError, match=msg):
ser.fillna(("a", "b"))
msg = (
'"value" parameter must be a scalar, dict '
'or Series, but you passed a "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
ser.fillna(DataFrame({1: ["a"], 3: ["b"]}))
@pytest.mark.parametrize("dtype", [float, "float32", "float64"])
@pytest.mark.parametrize("fill_type", tm.ALL_REAL_NUMPY_DTYPES)
def test_fillna_float_casting(self, dtype, fill_type):
# GH-43424
ser = Series([np.nan, 1.2], dtype=dtype)
fill_values = Series([2, 2], dtype=fill_type)
result = ser.fillna(fill_values)
expected = Series([2.0, 1.2], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_fillna_f32_upcast_with_dict(self):
# GH-43424
ser = Series([np.nan, 1.2], dtype=np.float32)
result = ser.fillna({0: 1})
expected = Series([1.0, 1.2], dtype=np.float32)
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------
# Invalid Usages
def test_fillna_invalid_method(self, datetime_series):
try:
datetime_series.fillna(method="ffil")
except ValueError as inst:
assert "ffil" in str(inst)
def test_fillna_listlike_invalid(self):
ser = Series(np.random.randint(-100, 100, 50))
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
with pytest.raises(TypeError, match=msg):
ser.fillna([1, 2])
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
with pytest.raises(TypeError, match=msg):
ser.fillna((1, 2))
def test_fillna_method_and_limit_invalid(self):
# related GH#9217, make sure limit is an int and greater than 0
ser = Series([1, 2, 3, None])
msg = "|".join(
[
r"Cannot specify both 'value' and 'method'\.",
"Limit must be greater than 0",
"Limit must be an integer",
]
)
for limit in [-1, 0, 1.0, 2.0]:
for method in ["backfill", "bfill", "pad", "ffill", None]:
with pytest.raises(ValueError, match=msg):
ser.fillna(1, limit=limit, method=method)
def test_fillna_datetime64_with_timezone_tzinfo(self):
# https://github.com/pandas-dev/pandas/issues/38851
# different tzinfos representing UTC treated as equal
ser = Series(date_range("2020", periods=3, tz="UTC"))
expected = ser.copy()
ser[1] = NaT
result = ser.fillna(datetime(2020, 1, 2, tzinfo=timezone.utc))
tm.assert_series_equal(result, expected)
# but we dont (yet) consider distinct tzinfos for non-UTC tz equivalent
ts = Timestamp("2000-01-01", tz="US/Pacific")
ser2 = Series(ser._values.tz_convert("dateutil/US/Pacific"))
assert ser2.dtype.kind == "M"
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser2.fillna(ts)
expected = Series([ser[0], ts, ser[2]], dtype=object)
# TODO(2.0): once deprecation is enforced
# expected = Series(
# [ser2[0], ts.tz_convert(ser2.dtype.tz), ser2[2]],
# dtype=ser2.dtype,
# )
tm.assert_series_equal(result, expected)
def test_fillna_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
srs = Series([1, 2, 3, np.nan], dtype=float)
msg = (
r"In a future version of pandas all arguments of Series.fillna "
r"except for the argument 'value' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = srs.fillna(0, None, None)
expected = Series([1, 2, 3, 0], dtype=float)
tm.assert_series_equal(result, expected)
class TestFillnaPad:
def test_fillna_bug(self):
ser = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
filled = ser.fillna(method="ffill")
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], ser.index)
tm.assert_series_equal(filled, expected)
filled = ser.fillna(method="bfill")
expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], ser.index)
tm.assert_series_equal(filled, expected)
def test_ffill(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
ts[2] = np.NaN
tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill"))
def test_ffill_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = Series([1, 2, 3])
msg = (
r"In a future version of pandas all arguments of Series.ffill "
r"will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.ffill(0)
expected = Series([1, 2, 3])
tm.assert_series_equal(result, expected)
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH#14956
series = Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
tm.assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
ts[2] = np.NaN
tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill"))
def test_bfill_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
ser = Series([1, 2, 3])
msg = (
r"In a future version of pandas all arguments of Series.bfill "
r"will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.bfill(0)
expected = Series([1, 2, 3])
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 16:20:13 2019
@author: jcloud
"""
import os, sys
import pandas as pd
import numpy as np
import argparse
import json
def contains_alpha(x):
res = False
for i in x:
if i.isalpha():
res = True
break
return res
#Take input form user
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' %message)
self.print_help()
sys.exit(2)
def gracefulExit():
parser.print_help()
exit(2)
basePath=os.getcwd()
parser=MyParser(prog="COINS_BIDS")
parser.add_argument("--runsheet",dest="runsheet", required=True, help='Path to the COINS run sheet')
parser.add_argument("--keysheet",dest="keysheet", required=True, help='Path to the COINS key sheet')
parser.add_argument("--temp_json",dest="temp_json", required=True, help='Path to config.json')
parser.add_argument("--sub_dir",dest="input_path", required=True, help='Path to subject source directory')
#Checking if attempt has been made to pass arguments
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args=parser.parse_args()
#import COINS run sheet and COINS run sheet key
if args.runsheet:
headr, tailr=os.path.split(args.runsheet)
if headr=='':
headr=basePath
fullpathr=os.path.join(headr,tailr)
df=pd.read_csv(fullpathr)
if args.keysheet:
headk, tailk=os.path.split(args.keysheet)
if headk=='':
headk=basePath
fullpathk=os.path.join(headk,tailk)
keysheet= | pd.read_csv(fullpathk) | pandas.read_csv |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 11 16:00:46 2019
@author: pipolose
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from pathlib2 import Path
from polyML import polyssifier_3 as ps
from scipy.stats import pearsonr, mannwhitneyu, kruskal
from collections import OrderedDict
'''
USER OPTIONS
'''
out_dir = Path('/data2/polo/figures')
do_save = False
'''
SOURCE DATA
'''
task_dir = Path('/data2/polo/half_baked_data/slopes/abs')
single_task_slope_csv = task_dir / 'raw_slopes_ok_subjs_abs.csv'
corrected_single_task_csv = task_dir / 'deCAPed_preHD_slopes_abs.csv'
n_visit_csv = task_dir / 'n_visits_used.csv'
in_mat = Path().cwd().parent / 'VBM_controls' /\
'TON_log_deg_maps_local_gm_corrected.mat'
'''
LOAD
'''
source = 'python'
subject_list = ps.load_subject_list(in_mat.as_posix(), source=source)
n_df = pd.read_csv(n_visit_csv, index_col='subjid')
slope_df = pd.read_csv(single_task_slope_csv, index_col='subjid')
task_names = n_df.columns.values
behav_n_imag = [s for s in subject_list if s in slope_df.index]
x = slope_df.loc[behav_n_imag]['group'] == 'preHD'
preHD_idx = x.loc[x].index.values
n_df.corrwith(slope_df.loc[preHD_idx][task_names])
task_corrs = OrderedDict()
corr_dict = OrderedDict()
corr_dict['task'] = task_names
p_vals = []
corr_vals = []
for task in task_names:
x = n_df.loc[preHD_idx][task]
y = slope_df.loc[preHD_idx][task]
corr, p = pearsonr(x, y)
p_vals.append(p)
corr_vals.append(corr)
corr_dict['p-value'] = p_vals
corr_dict['corr'] = corr_vals
corr_dict['variance-explained'] = [100*(p**2) for p in corr_vals]
corr_dict['Bonferroni p-val'] = [p*len(p_vals) for p in p_vals]
raw_corr_df = | pd.DataFrame(corr_dict) | pandas.DataFrame |
"""Evaluators for the recommender model."""
import numpy as np
import pandas as pd
class Evaluator:
"""RMSE, RMSUE, MAE, MAUE evaluators wrapper."""
def _preprocessing(self, test_file, predictions_file):
test = pd.read_csv(test_file)
predictions = pd.read_csv(predictions_file)
test = test.set_index(['user', 'item']).sort_index()
predictions = predictions.set_index(['user', 'item']).sort_index()
test = test.loc[test.index.isin(predictions.index)]
test_values = test.values
return test_values, predictions
@staticmethod
def _predictions_counter(n_pred, n_r_pred, pred_file):
pred_counter = {
'knn': [n_pred],
'r_knn': [n_r_pred],
'total': [n_pred + n_r_pred]
}
| pd.DataFrame(pred_counter) | pandas.DataFrame |
"""
Video Face Manipulation Detection Through Ensemble of CNNs
Image and Sound Processing Lab - Politecnico di Milano
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
"""
import os
from pathlib import Path
from typing import List
import albumentations as A
import numpy as np
import pandas as pd
import torch
from PIL import Image
from albumentations.pytorch import ToTensorV2
from torch.utils.data import Dataset, IterableDataset
from .utils import extract_bb
'''
:method definition
-------------------------------
:param record 对应照片的df
:param root 对应数据集切下来的脸所在的目录
:param size 图片size
:param scale 图片需要进行的transform的方式
:param transformer 图片transform用的处理器
-------------------------------
:return face[ndarray] 经处理器处理过后的face
'''
def load_face(record: pd.Series, root: str, size: int, scale: str, transformer: A.BasicTransform) -> torch.Tensor:
path = os.path.join(str(root), str(record.name))
# 要是图片size<256,或者使用tight策略时,采用autocache
autocache = size < 256 or scale == 'tight'
if scale in ['crop', 'scale', ]:
cached_path = str(Path(root).joinpath('autocache', scale, str(
size), str(record.name)).with_suffix('.jpg'))
else:
# when self.scale == 'tight' the extracted face is not dependent on size
cached_path = str(Path(root).joinpath(
'autocache', scale, str(record.name)).with_suffix('.jpg'))
face = np.zeros((size, size, 3), dtype=np.uint8)
# 要是图片已经载入到autocache中,直接用即可
if os.path.exists(cached_path):
try:
face = Image.open(cached_path)
face = np.array(face)
if len(face.shape) != 3:
raise RuntimeError('Incorrect format: {}'.format(path))
except KeyboardInterrupt as e:
# We want keybord interrupts to be propagated
raise e
except (OSError, IOError) as e:
print('Deleting corrupted cache file: {}'.format(cached_path))
print(e)
os.unlink(cached_path)
face = np.zeros((size, size, 3), dtype=np.uint8)
# 要是图片并未被载入,将其根据df给定的boundingbox进行裁切,调整大小,保存,转化为ndarray,然后通过处理器,得到处理过后的ndarray
if not os.path.exists(cached_path):
try:
frame = Image.open(path)
bb = record['left'], record['top'], record['right'], record['bottom']
face = extract_bb(frame, bb=bb, size=size, scale=scale)
if autocache:
os.makedirs(os.path.dirname(cached_path), exist_ok=True)
# PIL.image.save(path,此处95即为最高质量,)
face.save(cached_path, quality=95, subsampling='4:4:4')
face = np.array(face)
if len(face.shape) != 3:
raise RuntimeError('Incorrect format: {}'.format(path))
except KeyboardInterrupt as e:
# We want keybord interrupts to be propagated
raise e
except (OSError, IOError) as e:
print('Error while reading: {}'.format(path))
print(e)
face = np.zeros((size, size, 3), dtype=np.uint8)
face = transformer(image=face)['image']
return face
class FrameFaceIterableDataset(IterableDataset):
def __init__(self,
roots: List[str],
dfs: List[pd.DataFrame],
size: int, scale: str,
num_samples: int = -1,
transformer: A.BasicTransform = ToTensorV2(),
output_index: bool = False,
labels_map: dict = None,
seed: int = None):
"""
:param roots: List of root folders for frames cache
:param dfs: List of DataFrames of cached frames with 'bb' column as array of 4 elements (left,top,right,bottom)
and 'label' column
:param size: face size
:param num_samples: the size of maximum_len(df_real,df_fake)
:param scale: Rescale the face to the given size, preserving the aspect ratio.
If false crop around center to the given size
:param transformer:
:param output_index: enable output of df_frames index
:param labels_map: map from 'REAL' and 'FAKE' to actual labels
"""
self.dfs = dfs
self.size = int(size)
# 要是初始化时没有给seed,那么随机生成一个
self.seed0 = int(
seed) if seed is not None else np.random.choice(2 ** 32)
# adapt indices
dfs_adapted = [df.copy() for df in self.dfs]
for df_idx, df in enumerate(dfs_adapted):
mi = pd.MultiIndex.from_tuples(
[(df_idx, key) for key in df.index], names=['df_idx', 'df_key'])
df.index = mi
# Concat
self.df = | pd.concat(dfs_adapted, axis=0, join='inner') | pandas.concat |
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import random
from math import sqrt
from datetime import datetime,timedelta
from pytz import timezone
from time import time
from collections import deque
from IPython.display import clear_output
from statsmodels.tools.eval_measures import mse
from statsmodels.tools.eval_measures import meanabs as mae
def test_index_gen(time_stamp_threshhold = '2008-01-01 00:00:00-08:00',test_time_num = 1800, test_airport_num = 60):
idx2airport=pd.read_csv("idx2airport.csv",index_col=0)['0'].to_dict()
idx2time_stamp=pd.read_csv("idx2time_stamp.csv",index_col=0)['0'].to_dict()
time_stamp2idx=pd.read_csv("time_stamp2idx.csv",index_col=0)['0'].to_dict()
random.seed(4)
test_airport_index = random.sample(idx2airport.keys(), k=test_airport_num)
test_date_index = random.sample(list(idx2time_stamp.keys())[time_stamp2idx[time_stamp_threshhold]:], k=test_time_num)
return test_date_index,test_airport_index
def test_index_gen(time_stamp_threshhold = '2008-01-01 00:00:00-08:00',test_time_num = 1800, test_airport_num = 60):
idx2airport=pd.read_csv("idx2airport.csv",index_col=0)['0'].to_dict()
idx2time_stamp=pd.read_csv("idx2time_stamp.csv",index_col=0)['0'].to_dict()
time_stamp2idx=pd.read_csv("time_stamp2idx.csv",index_col=0)['0'].to_dict()
random.seed(4)
test_airport_index = random.sample(idx2airport.keys(), k=test_airport_num)
test_date_index = random.sample(list(idx2time_stamp.keys())[time_stamp2idx[time_stamp_threshhold]:], k=test_time_num)
return test_date_index,test_airport_index
def rwse_eval(pred_data, test_date_index, test_airport_index):
arr_sche = pd.read_csv("ArrTotalFlights.csv",index_col=0)
dep_sche = pd.read_csv("DepTotalFlights.csv",index_col=0)
DelayRatio = pd.read_csv("DelayRatio.csv",index_col=0)
p = DelayRatio.fillna(0).iloc[test_date_index, test_airport_index]
for i in test_airport_index:
p[str(i)] = p[str(i)].values - pred_data[str(i)].values
numerator = 0
denominator = 0
for i in test_airport_index:
for j in test_date_index:
weight_wae = np.abs(arr_sche[str(i)].values[j]) + np.abs(arr_sche[str(i)].values[j])
numerator += (np.abs(p[str(i)].loc[j])**2) * (weight_wae**2)
for i in test_airport_index:
for j in test_date_index:
denominator += ((np.abs(arr_sche[str(i)].values[j]) + np.abs(arr_sche[str(i)].values[j])))**2
rwse = float(sqrt(numerator/denominator))
return rwse
def wae_eval(pred_data,test_date_index,test_airport_index):
arr_sche = pd.read_csv("ArrTotalFlights.csv",index_col=0)
dep_sche = pd.read_csv("DepTotalFlights.csv",index_col=0)
DelayRatio = pd.read_csv("DelayRatio.csv",index_col=0)
p = DelayRatio.fillna(0).iloc[test_date_index, test_airport_index]
for i in test_airport_index:
p[str(i)] = p[str(i)].values - pred_data[str(i)].values
numerator = 0
denominator = 0
for i in test_airport_index:
for j in test_date_index:
weight_wae = np.abs(arr_sche[str(i)].values[j]) + np.abs(arr_sche[str(i)].values[j])
numerator += np.abs(p[str(i)].loc[j]) * weight_wae
for i in test_airport_index:
for j in test_date_index:
denominator += (np.abs(arr_sche[str(i)].values[j]) + np.abs(arr_sche[str(i)].values[j]))
wae = float(numerator/denominator)
return wae
def model_evaluation(pred_data, test_date_index, test_airport_index):
DelayRatio=pd.read_csv("DelayRatio.csv",index_col=0)
mae_score = np.mean(mae(DelayRatio.fillna(0).iloc[test_date_index, test_airport_index],pred_data.fillna(0),axis=0))
print ('mae metric: ',mae_score)
rmse_score = np.mean(mse(DelayRatio.fillna(0).iloc[test_date_index, test_airport_index],pred_data,axis=0))**0.5
print ('rmse metric: ',rmse_score)
wae_score = wae_eval(pred_data, test_date_index, test_airport_index)
print ('wae metric: ', wae_score)
rwse_score = rwse_eval(pred_data, test_date_index, test_airport_index)
print ('rwse metric: ', rwse_score)
DelayFlights = pd.read_csv("ArrDelayFlights.csv",index_col=0)+pd.read_csv("DepDelayFlights.csv",index_col=0)
TotalFlights = pd.read_csv("ArrTotalFlights.csv",index_col=0)+pd.read_csv("DepTotalFlights.csv",index_col=0)
w_pre_data = TotalFlights.iloc[test_date_index, test_airport_index]*pred_data
#display(w_pre_data)
# w_mae_score = np.mean(mae(DelayFlights.iloc[test_date_index, test_airport_index],w_pre_data,axis=0))
# print ('w_mae metric: ',w_mae_score)
# w_rmse_score = np.mean(mse(DelayFlights.iloc[test_date_index, test_airport_index],w_pre_data,axis=0))**0.5
# print ('w_rmse metric: ',w_rmse_score)
return
class GraphFlow:
#================= 08/30/19 maintained by <NAME>=============
#================= 08/16/19 maintained by <NAME>=============
def __init__(self,idx2airport,airport2idx,idx2time_stamp,time_stamp2idx,
ArrTotalFlights,DepTotalFlights,ArrDelayFlights,DepDelayFlights,
pre_data,G,dt,grid = None ,start_time = None,end_time = None,DelayRatio=None):
self.idx2airport = idx2airport
self.airport2idx = airport2idx
self.idx2time_stamp = idx2time_stamp
self.time_stamp2idx = time_stamp2idx
self.ArrTotalFlights = ArrTotalFlights
self.DepTotalFlights = DepTotalFlights
self.ArrDelayFlights = ArrDelayFlights
self.DepDelayFlights = DepDelayFlights
self.G = G
self.pre_data=pre_data
self.dt = dt
#=========== self.grid=============
if grid is not None:
self.grid = grid
elif (start_time is not None) & (end_time is not None):
self.grid = pd.date_range(start_time,end_time,freq=dt,tz=timezone('America/Los_Angeles'))
else:
raise
#=========== self.DelayRatio=============
if DelayRatio is not None:
self.DelayRatio = DelayRatio
else:
self.DelayRatio = pd.DataFrame( data = (ArrDelayFlights.values + DepDelayFlights.values)/
(ArrTotalFlights.values + DepTotalFlights.values),
index=DepTotalFlights.index,
columns=DepTotalFlights.columns)
#=================self.RealDelayRatio,RealTotalFlights,RealDelayFlights====
self.RealDelayRatio = self.DelayRatio.rename(index = self.idx2time_stamp).rename(columns = self.idx2airport)
self.RealArrDelayFlights = self.ArrDelayFlights.rename(index = self.idx2time_stamp).rename(columns = self.idx2airport)
self.RealDepDelayFlights = self.DepDelayFlights.rename(index = self.idx2time_stamp).rename(columns = self.idx2airport)
self.RealArrTotalFlights = self.ArrTotalFlights.rename(index = self.idx2time_stamp).rename(columns = self.idx2airport)
self.RealDepTotalFlights = self.DepTotalFlights.rename(index = self.idx2time_stamp).rename(columns = self.idx2airport)
return
def draw_network_attr(self, nodes_attr = None , edges_attr = 'weight' , size = 6 , with_pos = True):
plt.figure(1,figsize=(size,size))
G=self.to_undir_G(self.G)
if nodes_attr == 'TimeModifyer':
labels={node:value.total_seconds()/3600 for node,value in nx.get_node_attributes(G,nodes_attr).items()}
elif nodes_attr =='time_zone':
labels={node:value.split('/')[1] for node,value in nx.get_node_attributes(G,nodes_attr).items()}
elif nodes_attr is None:
labels=None
else:
raise
pos=nx.get_node_attributes(G,'pos')
if not with_pos:
pos=nx.spring_layout(G)
nx.draw(G,pos=pos,labels = labels)
nx.draw_networkx_edge_labels(G,pos=pos,edge_labels=nx.get_edge_attributes(G,edges_attr))
plt.show()
return
def real_format(self,df):
return df.rename(columns = self.idx2time_stamp).rename(index = self.idx2airport)
def slice(self,start_time,end_time):
idx2airport = self.idx2airport
airport2idx = self.airport2idx
idx2time_stamp = self.idx2time_stamp
time_stamp2idx =self. time_stamp2idx
dt = self.dt
grid = pd.date_range(start_time,end_time,freq=dt,tz=timezone('America/Los_Angeles'))
G = self.G
t= time()
# 1 generate new pre_data
pre_data = self.pre_data[(self.pre_data.AbsArrTime >= str(grid[0])) &
(self.pre_data.AbsDepTime <= str(grid[-1]))]
print("===============pre_data generated: ", t-time())
# 2 use pre_data to generate G
temp = pre_data.groupby(['Origin','Dest'],as_index=False)['Year'].count().rename(columns = {"Year": "weight"})
nx.set_edge_attributes(G, temp.set_index(['Origin','Dest']).to_dict('index'))
# 3 use pre_data, G to generate TotalFlights, DelayFlights
[ArrTotalFlights, ArrDelayFlights, DepTotalFlights , DepDelayFlights] = map(
lambda x: x[(x.index >= self.time_stamp2idx[grid[0]]) &
(x.index <= self.time_stamp2idx[grid[-1]])],
[self.ArrTotalFlights, self.ArrDelayFlights, self.DepTotalFlights , self.DepDelayFlights] )
return GraphFlow(idx2airport = idx2airport,
airport2idx = airport2idx,
idx2time_stamp = idx2time_stamp,
time_stamp2idx = time_stamp2idx,
ArrTotalFlights = ArrTotalFlights,
DepTotalFlights = DepTotalFlights,
ArrDelayFlights = ArrDelayFlights,
DepDelayFlights = DepDelayFlights,
pre_data = pre_data,
G = G,
dt = dt,
grid = grid)
def sub_graph_flow(self, start_time = None , end_time = None ,sub_nodes = None, edges = None , verbose = False):
idx2airport = self.idx2airport
airport2idx = self.airport2idx
idx2time_stamp = self.idx2time_stamp
time_stamp2idx =self. time_stamp2idx
dt = self.dt
if start_time is None:
start_time = str(self.grid[0])[:-6]
if end_time is None:
end_time = str(self.grid[-1])[:-6]
grid = pd.date_range(start_time,end_time,freq=dt,tz=timezone('America/Los_Angeles'))
# -1 determine if a faster algrithm apllies
if (sub_nodes is None) & (edges is None):
return self.slice(start_time,end_time)
if (sub_nodes is not None) & ((edges is not None)):
print('TODO: sub_nodes and edges can not both None ')
raise
t=time()
# 0 generate edges
if edges is None:
edges = set(self.G.subgraph(sub_nodes).copy().edges)
print("==============edges generated: ", t-time())
# 1 generate new pre_data
pre_data_temp = self.pre_data[(self.pre_data.AbsArrTime >= str(grid[0])) &
(self.pre_data.AbsDepTime <= str(grid[-1]))]
pre_data = pre_data_temp.groupby(['Origin','Dest']).filter(lambda x: x.name in edges)
print("===============pre_data generated: ", t-time())
# 2 use pre_data to generate G
G = self.G.edge_subgraph(edges).copy()
temp = pre_data.groupby(['Origin','Dest'],as_index=False)['Year'].count().rename(columns = {"Year": "weight"})
nx.set_edge_attributes(G, temp.set_index(['Origin','Dest']).to_dict('index'))
#print(dict(G.edges))
print("===============G generated: ", t-time())
# 3 use pre_data, G to generate TotalFlights, DelayFlights
counter=0
attrs=['ArrTotalFlights','ArrDelayFlights','DepTotalFlights','DepDelayFlights']
dfs={x:[] for x in attrs}
ans={}
if (len(airport2idx)>0) & (len(time_stamp2idx)>0) & (len(idx2airport)>0) & (len(idx2time_stamp)>0) :
for airport in G.nodes:
print("=========Testing ",airport,"==========airports remains :", len(G.nodes)-counter,'========time lasted so far: ',time()-t)
temp_raw=fun_1_delay_rate(airport,grid=grid,dt=dt,pre_data=pre_data,G=G)
for attr in attrs:
temp=temp_raw[attr]
temp.index=map(lambda x: time_stamp2idx[x], temp.index)
temp.name=airport2idx[airport]
dfs[attr].append(temp)
counter+=1
for attr in attrs:
ans[attr]=pd.concat(dfs[attr],axis=1).sort_index(axis=1)
print('======================Total time:',time()-t)
ArrTotalFlights = ans['ArrTotalFlights']
ArrTotalFlights.index=pd.to_numeric(ArrTotalFlights.index)
ArrTotalFlights.columns=pd.to_numeric(ArrTotalFlights.columns)
ArrDelayFlights = ans['ArrDelayFlights']
ArrDelayFlights.index=pd.to_numeric(ArrDelayFlights.index)
ArrDelayFlights.columns=pd.to_numeric(ArrDelayFlights.columns)
DepTotalFlights = ans['DepTotalFlights']
DepTotalFlights.index=pd.to_numeric(DepTotalFlights.index)
DepTotalFlights.columns=pd.to_numeric(DepTotalFlights.columns)
DepDelayFlights = ans['DepDelayFlights']
DepDelayFlights.index= | pd.to_numeric(DepDelayFlights.index) | pandas.to_numeric |
import seaborn as sns
import matplotlib.pyplot as plt
import json
from pandas.io.json import json_normalize
import os
import pandas as pd
import numpy as np
sns.set(style="darkgrid")
def read_json(json_path):
results = []
np_names = [
'AP', 'AP:0.50', 'AP:0.75', 'AP:S', 'AP:M', 'AP:L',
'AR', 'AR:0.50', 'AR:0.75', 'AR:S', 'AR:M', 'AR:L',
]
with open(json_path) as fp:
lines = fp.readlines()
for line in lines:
r = json.loads(line)
d = r['data']['bbox']['data']
result = dict(cfg=r['cfg'], uid=r['uid'], mode=r['mode'])
for k, v in zip(np_names, d['coco_eval']):
result[k] = v
for k, v in d['classwise'].items():
result[k] = v
for k1, v1 in d['defect_eval'].items():
if isinstance(v1, list):
result[k1] = np.mean(v1)
elif isinstance(v1, dict):
for k2, v2 in v1['macro avg'].items():
result[k2] = v2
results.append(result)
return results
def phrase_json(json_path):
save_path = json_path[:-5] + '.csv'
if os.path.exists(save_path):
return pd.read_csv(save_path)
results = read_json(json_path)
df = json_normalize(results)
df.to_csv(save_path, index=False)
return df
def get_sns_data(data, x_name, y_names, type):
x, y, hue = np.empty(0), np.empty(0), np.empty(0)
for y_name in y_names:
x = np.append(x, data[x_name])
y = np.append(y, data[y_name])
hue = np.append(hue, [type[y_name]] * data[y_name].shape[0])
return pd.DataFrame(dict(x=x, y=y, type=hue))
def lineplot(sns_data, new_x, new_y, ax=None, markers=True):
sns_data = sns_data.rename(columns={'x': new_x, 'y': new_y})
ax = sns.lineplot(
ax=ax,
x=new_x, y=new_y,
hue="type",
style="type",
markers=markers,
dashes=False,
data=sns_data,
ci=None
)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[1:], labels=labels[1:])
return ax
def draw_figure(json_path, save_path, x_name):
save_path = save_path[:-4]
save_path = save_path.replace('\\', '/')
save_dir = save_path[:save_path.rfind('/')]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
data = phrase_json(json_path)
data = data
sns.set(style="darkgrid")
# ids = []
# for i in range(data.shape[0]):
# r = data.iloc[i]
# arrs = r['cfg'].split('_')
# ids.append(float(arrs[1][:-1]))
data[x_name] = data['uid']
data = data[data['mode'] == 'test']
fig = plt.figure(figsize=(6.4 * 3, 4.8))
axs = [fig.add_subplot(1, 3, i) for i in range(1, 4)]
# draw_ap_weight
y_names = ['AP', 'AP:0.50']
type = {'AP': 'IoU=0.50:0.95', 'AP:0.50': 'IoU=0.50'}
sns_data = get_sns_data(data, x_name, y_names, type)
new_x, new_y = x_name, 'average precision'
lineplot(sns_data, new_x, new_y, axs[0])
# draw_f1_score_weight
y_names = ['f1-score']
type = {'f1-score': 'f1-score'}
sns_data = get_sns_data(data, x_name, y_names, type)
new_x, new_y = x_name, 'macro average f1-score'
lineplot(sns_data, new_x, new_y, axs[1])
# draw_speed_weight
y_names = ['fps', 'defect_fps', 'normal_fps']
type = dict(fps='all images', defect_fps='defect images', normal_fps='normal images')
sns_data = get_sns_data(data, x_name, y_names, type)
new_x, new_y = x_name, 'average time(ms)'
lineplot(sns_data, new_x, new_y, axs[2])
# plt.gca().xaxis.set_major_locator(plt.NullLocator())
# plt.gca().yaxis.set_major_locator(plt.NullLocator())
# plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
# plt.margins(0, 0)
plt.subplots_adjust(left=0.05, right=0.97)
plt.savefig(save_path + '.svg')
plt.savefig(save_path + '.eps')
plt.savefig(save_path + '.jpg')
plt.show()
def count_data(ann_file, head=None):
from pycocotools.coco import COCO
coco = COCO(ann_file)
defect_nums = np.empty(0, dtype=int)
for image in coco.dataset['images']:
cnt = 0
annIds = coco.getAnnIds(imgIds=image['id'])
anns = coco.loadAnns(annIds)
for ann in anns:
if ann['category_id'] != 0:
cnt += 1
defect_nums = np.append(defect_nums, cnt)
normal_shape = np.where(defect_nums == 0)[0]
if head is not None:
print(head + ':\n')
all_cnt, normal_cnt = len(coco.dataset['images']), normal_shape.shape[0]
defect_cnt = defect_nums.shape[0] - normal_shape.shape[0]
print('All images count:', all_cnt)
print('Normal images count:', normal_cnt)
print('Defect images count:', defect_cnt)
print('Normal images : Defect images is ', normal_cnt / defect_cnt)
def draw_avg_infer_time_and_efficient():
t = 0.5
tau = [t] * 50000
rs = np.linspace(0, 50, 50000)
avg_t = [1 - (1 - t) / (1 + 1 / r) for r in rs]
e = [(1 - t) / (1 + 1 / r) for r in rs]
data = | pd.DataFrame({'r': rs, 't': avg_t, 'e': e, 'τ': tau}) | pandas.DataFrame |
import gzip
import io
import anndata as ad
import cv2
import geopandas as gpd
import numpy as np
import pandas as pd
from anndata import AnnData
from scipy.spatial import Delaunay
from scipy.sparse import csr_matrix
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
from skimage import measure
from typing import Optional, Tuple, Union
def read_lasso(path: str) -> pd.DataFrame:
lasso_data = pd.read_csv(
path,
sep="\t",
dtype={
"geneID": "category",
"x": np.uint32,
"y": np.uint32,
"MIDCounts": np.uint16,
"cell": str,
},
)
lasso_data["geneID"] = lasso_data.geneID.astype(str).str.strip('"')
lasso_data["geneID"] = lasso_data["geneID"].astype("category")
return lasso_data
def lasso2adata(
data: pd.DataFrame,
slice: Optional[str] = None,
label_path: Optional[str] = None,
DNB_gap: Optional[float] = 0.5,
z: Union[float] = None,
z_gap: Union[float] = None,
cellbin: bool = False,
) -> AnnData:
"""A helper function that facilitates constructing an AnnData object suitable for downstream spateo analysis
Parameters
----------
data: `pandas.DataFrame`
Lasso data.
slice: `str` or `None` (default: `None`)
Name of the slice. Will be used when displaying multiple slices.
label_path: `str` or `None` (default: `None`)
A string that points to the directory and filename of cell segmentation label matrix(`.npy` or `.npy.gz`).
DNB_gap: `float` (default: `0.5`)
True physical distance (microns) between nano balls.
z: `float` (default: `None`)
Z-axis direction coordinates.
z_gap: `float` (default: `None`)
True physical distance (microns) between slices.
cellbin: `bool` (default: `False`)
Whether to use cell bin as the base unit. Only valid when label_path is not None.
Returns
-------
adata: :class:`~anndata.AnnData`
An AnnData object. Each row of the AnnData object correspond to a spot (aggregated with multiple bins). The
`spatial` key in the .obsm corresponds to the x, y coordinates of the centroids of all spot.
"""
# physical coords
data["x"] = (data["x"].values - data["x"].values.min()) * DNB_gap
data["y"] = (data["y"].values - data["y"].values.min()) * DNB_gap
data["z"] = z * z_gap / DNB_gap if z is not None else 0
# obs
if label_path is not None:
# TODO: Get cell names using labels
if label_path.endswith(".gz"):
with gzip.open(label_path, "r") as f:
label_mtx = np.load(f)
else:
label_mtx = np.load(label_path)
props = measure.regionprops_table(label_mtx, properties=("label", "centroid"))
label_props = pd.DataFrame(props)
label_props.columns = ["cell", "centroid_x", "centroid_y"]
label_props["cell"] = label_props["cell"].astype(str)
label_props["centroid_x"] = label_props["centroid_x"].values * DNB_gap
label_props["centroid_y"] = label_props["centroid_y"].values * DNB_gap
data = | pd.merge(data, label_props, on=["cell"], how="inner") | pandas.merge |
from pathlib import Path
import re
from glob import glob
import gzip
from zipfile import ZipFile
from collections import defaultdict
from datetime import datetime
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point, LineString, Polygon
flights_date_items = [
"FILED OFF BLOCK TIME",
"FILED ARRIVAL TIME",
"ACTUAL OFF BLOCK TIME",
"ACTUAL ARRIVAL TIME",
]
flight_points_date_items = ["Time Over"]
flight_airspaces_date_items = ["Entry Time", "Exit Time"]
def get_df(file, date_items):
if len(date_items) == 0:
return | pd.read_csv(file) | pandas.read_csv |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import pytest
from distutils.version import LooseVersion
import numpy as np
import pytz
import pandas as pd
from xarray import Variable, IndexVariable, Coordinate, Dataset
from xarray.core import indexing
from xarray.core.variable import as_variable, as_compatible_data
from xarray.core.indexing import PandasIndexAdapter, LazilyIndexedArray
from xarray.core.pycompat import PY3, OrderedDict
from xarray.core.common import full_like, zeros_like, ones_like
from . import TestCase, source_ndarray, requires_dask
class VariableSubclassTestCases(object):
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(['time'], data, {'foo': 'bar'})
self.assertEqual(v.dims, ('time',))
self.assertArrayEqual(v.values, data)
self.assertEqual(v.dtype, float)
self.assertEqual(v.shape, (10,))
self.assertEqual(v.size, 10)
self.assertEqual(v.sizes, {'time': 10})
self.assertEqual(v.nbytes, 80)
self.assertEqual(v.ndim, 1)
self.assertEqual(len(v), 10)
self.assertEqual(v.attrs, {'foo': u'bar'})
def test_attrs(self):
v = self.cls(['time'], 0.5 * np.arange(10))
self.assertEqual(v.attrs, {})
attrs = {'foo': 'bar'}
v.attrs = attrs
self.assertEqual(v.attrs, attrs)
self.assertIsInstance(v.attrs, OrderedDict)
v.attrs['foo'] = 'baz'
self.assertEqual(v.attrs['foo'], 'baz')
def test_getitem_dict(self):
v = self.cls(['x'], np.random.randn(5))
actual = v[{'x': 0}]
expected = v[0]
self.assertVariableIdentical(expected, actual)
def _assertIndexedLikeNDArray(self, variable, expected_value0,
expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
self.assertEqual(variable[0].shape, ())
self.assertEqual(variable[0].ndim, 0)
self.assertEqual(variable[0].size, 1)
# test identity
self.assertTrue(variable.equals(variable.copy()))
self.assertTrue(variable.identical(variable.copy()))
# check value is equal for both ndarray and Variable
self.assertEqual(variable.values[0], expected_value0)
self.assertEqual(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
self.assertEqual(type(variable.values[0]), type(expected_value0))
self.assertEqual(type(variable[0].values), type(expected_value0))
elif expected_dtype is not False:
self.assertEqual(variable.values[0].dtype, expected_dtype)
self.assertEqual(variable[0].values.dtype, expected_dtype)
def test_index_0d_int(self):
for value, dtype in [(0, np.int_),
(np.int32(0), np.int32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_),
(np.float32(0.5), np.float32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
for value, dtype in [('foo', np.dtype('U3' if PY3 else 'S3')),
(u'foo', np.dtype('U3'))]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(['x'], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
x = self.cls(['x'], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(['x'], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
x = self.cls(['x'], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
def test_index_0d_not_a_time(self):
d = np.datetime64('NaT', 'ns')
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper(object):
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return '%s(item=%r)' % (type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls('x', [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls('x', listarray)
assert x.data == listarray
assert x[0].data == listarray.squeeze()
assert x.squeeze().data == listarray.squeeze()
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = | pd.date_range('2011-09-01', periods=10) | pandas.date_range |
import os
from typing import cast
import matplotlib.pyplot as plt
import pandas as pd
import pandera as pa
import requests
import seaborn as sns
from dagster_pandera import pandera_schema_to_dagster_type
from pandera.typing import Series
# ****************************************************************************
# ***** TYPES ****************************************************************
class StockPrices(pa.SchemaModel):
"""Open/high/low/close prices for a set of stocks by day."""
name: Series[str] = pa.Field(description="Ticker symbol of stock")
date: Series[pd.Timestamp] = pa.Field(description="Date of prices")
open: Series[float] = pa.Field(ge=0, description="Price at market open")
high: Series[float] = pa.Field(ge=0, description="Highest price of the day")
low: Series[float] = pa.Field(ge=0, description="Lowest price of the day")
close: Series[float] = pa.Field(ge=0, description="Price at market close")
volume: Series[int] = pa.Field(ge=0, description="Number of shares traded for day")
StockPricesDgType = pandera_schema_to_dagster_type(StockPrices)
class BollingerBands(pa.SchemaModel):
"""Bollinger bands for a set of stock prices."""
name: Series[str] = pa.Field(description="Ticker symbol of stock")
date: Series[pd.Timestamp] = pa.Field(description="Date of prices")
upper: Series[float] = pa.Field(ge=0, description="Upper band")
lower: Series[float] = pa.Field(description="Lower band")
BollingerBandsDgType = pandera_schema_to_dagster_type(BollingerBands)
class AnomalousEvents(pa.SchemaModel):
"""Anomalous price events, defined by a day on which a stock's closing price strayed above or
below its Bollinger bands."""
date: Series[pd.Timestamp] = pa.Field(description="Date of price event")
name: Series[str] = pa.Field(description="Ticker symbol of stock")
event: Series[pd.CategoricalDtype] = pa.Field(description="Type of event: 'high' or low'")
AnomalousEventsDgType = pandera_schema_to_dagster_type(AnomalousEvents)
# ****************************************************************************
# ***** FUNCTIONS ************************************************************
DATA_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data"))
SP500_CSV_URL = "https://raw.githubusercontent.com/plotly/datasets/master/all_stocks_5yr.csv"
def normalize_path(path: str) -> str:
return path if path[0] == "/" else os.path.join(DATA_ROOT, path)
def download_file(url: str, path: str):
"""Download a file from a URL to a local path. If relative path, will be resolved relative to `DATA_ROOT`."""
path = normalize_path(path)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
f.write(requests.get(url).content)
def load_prices_csv(path: str) -> pd.DataFrame:
"""Load a CSV file containing stock prices. CSV should conform to the schema in the
`StockPrices` pandera schema above. If relative path, will be resolved relative to
`DATA_ROOT`."""
path = normalize_path(path)
df = cast(pd.DataFrame, pd.read_csv(path, parse_dates=["date"]))
df = df.rename(columns={"Name": "name"})
df = df.dropna()
return df
def load_sp500_prices(download: bool = True) -> pd.DataFrame:
path = normalize_path("all_stocks_5yr.csv")
if not os.path.exists(path):
if download:
download_file(SP500_CSV_URL, path)
else:
raise FileNotFoundError(f"{path} not found")
return load_prices_csv(path)
def compute_bollinger_bands(
df: pd.DataFrame, rate: int = 30, sigma: float = 2.0, dropna=True
) -> pd.DataFrame:
"""Compute Bollinger bands for a single stock over time. The dataframe passed in here should be
represent a single timeseries."""
price = df["close"]
rma = price.rolling(window=rate).mean()
rstd = price.rolling(window=rate).std()
upper = rma + sigma * rstd
lower = rma - sigma * rstd
odf = pd.DataFrame({"name": df["name"], "date": df["date"], "upper": upper, "lower": lower})
if dropna:
odf = odf.dropna()
return odf
def compute_bollinger_bands_multi(df: pd.DataFrame, dropna: bool = True):
"""Compute Bollinger bands for a set of stocks over time. The input dataframe can contain
multiple timeseries grouped by the `name` column."""
odf = df.groupby("name").apply(lambda idf: compute_bollinger_bands(idf, dropna=False))
return odf.dropna().reset_index() if dropna else odf
EVENT_TYPE = pd.CategoricalDtype(["high", "low"], ordered=False)
def compute_anomalous_events(df_prices: pd.DataFrame, df_bollinger: pd.DataFrame):
"""Compute anomalous (high or low) price events for a set of stocks over time."""
df = pd.concat([df_prices, df_bollinger.add_prefix("bol_")], axis=1)
df["event"] = | pd.Series(pd.NA, index=df.index, dtype=EVENT_TYPE) | pandas.Series |
import hashlib
import math
import numpy as np
import pprint
import pytest
import random
import re
import subprocess
import sys
import tempfile
import json
from catboost import (
CatBoost,
CatBoostClassifier,
CatBoostRegressor,
CatBoostError,
EFstrType,
FeaturesData,
Pool,
cv,
sum_models,
train,)
from catboost.eval.catboost_evaluation import CatboostEvaluation, EvalType
from catboost.utils import eval_metric, create_cd, get_roc_curve, select_threshold
from catboost.utils import DataMetaInfo, TargetStats, compute_training_options
import os.path
from pandas import read_table, DataFrame, Series, Categorical
from six import PY3
from six.moves import xrange
from catboost_pytest_lib import (
DelayedTee,
binary_path,
data_file,
local_canonical_file,
permute_dataset_columns,
remove_time_from_json,
test_output_path,
generate_random_labeled_set
)
if sys.version_info.major == 2:
import cPickle as pickle
else:
import _pickle as pickle
pytest_plugins = "list_plugin",
fails_on_gpu = pytest.mark.fails_on_gpu
EPS = 1e-5
BOOSTING_TYPE = ['Ordered', 'Plain']
OVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']
NONSYMMETRIC = ['Lossguide', 'Depthwise']
TRAIN_FILE = data_file('adult', 'train_small')
TEST_FILE = data_file('adult', 'test_small')
CD_FILE = data_file('adult', 'train.cd')
NAN_TRAIN_FILE = data_file('adult_nan', 'train_small')
NAN_TEST_FILE = data_file('adult_nan', 'test_small')
NAN_CD_FILE = data_file('adult_nan', 'train.cd')
CLOUDNESS_TRAIN_FILE = data_file('cloudness_small', 'train_small')
CLOUDNESS_TEST_FILE = data_file('cloudness_small', 'test_small')
CLOUDNESS_CD_FILE = data_file('cloudness_small', 'train.cd')
QUERYWISE_TRAIN_FILE = data_file('querywise', 'train')
QUERYWISE_TEST_FILE = data_file('querywise', 'test')
QUERYWISE_CD_FILE = data_file('querywise', 'train.cd')
QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT = data_file('querywise', 'train.cd.group_weight')
QUERYWISE_CD_FILE_WITH_GROUP_ID = data_file('querywise', 'train.cd.query_id')
QUERYWISE_CD_FILE_WITH_SUBGROUP_ID = data_file('querywise', 'train.cd.subgroup_id')
QUERYWISE_TRAIN_PAIRS_FILE = data_file('querywise', 'train.pairs')
QUERYWISE_TRAIN_PAIRS_FILE_WITH_PAIR_WEIGHT = data_file('querywise', 'train.pairs.weighted')
QUERYWISE_TEST_PAIRS_FILE = data_file('querywise', 'test.pairs')
AIRLINES_5K_TRAIN_FILE = data_file('airlines_5K', 'train')
AIRLINES_5K_TEST_FILE = data_file('airlines_5K', 'test')
AIRLINES_5K_CD_FILE = data_file('airlines_5K', 'cd')
SMALL_CATEGORIAL_FILE = data_file('small_categorial', 'train')
SMALL_CATEGORIAL_CD_FILE = data_file('small_categorial', 'train.cd')
BLACK_FRIDAY_TRAIN_FILE = data_file('black_friday', 'train')
BLACK_FRIDAY_TEST_FILE = data_file('black_friday', 'test')
BLACK_FRIDAY_CD_FILE = data_file('black_friday', 'cd')
OUTPUT_MODEL_PATH = 'model.bin'
OUTPUT_COREML_MODEL_PATH = 'model.mlmodel'
OUTPUT_CPP_MODEL_PATH = 'model.cpp'
OUTPUT_PYTHON_MODEL_PATH = 'model.py'
OUTPUT_JSON_MODEL_PATH = 'model.json'
OUTPUT_ONNX_MODEL_PATH = 'model.onnx'
PREDS_PATH = 'predictions.npy'
PREDS_TXT_PATH = 'predictions.txt'
FIMP_NPY_PATH = 'feature_importance.npy'
FIMP_TXT_PATH = 'feature_importance.txt'
OIMP_PATH = 'object_importances.txt'
JSON_LOG_PATH = 'catboost_info/catboost_training.json'
TARGET_IDX = 1
CAT_FEATURES = [0, 1, 2, 4, 6, 8, 9, 10, 11, 12, 16]
model_diff_tool = binary_path("catboost/tools/model_comparator/model_comparator")
np.set_printoptions(legacy='1.13')
class LogStdout:
def __init__(self, file):
self.log_file = file
def __enter__(self):
self.saved_stdout = sys.stdout
sys.stdout = self.log_file
return self.saved_stdout
def __exit__(self, exc_type, exc_value, exc_traceback):
sys.stdout = self.saved_stdout
self.log_file.close()
def compare_canonical_models(model, diff_limit=0):
return local_canonical_file(model, diff_tool=[model_diff_tool, '--diff-limit', str(diff_limit)])
def map_cat_features(data, cat_features):
result = []
for i in range(data.shape[0]):
result.append([])
for j in range(data.shape[1]):
result[i].append(str(data[i, j]) if j in cat_features else data[i, j])
return result
def _check_shape(pool, object_count, features_count):
return np.shape(pool.get_features()) == (object_count, features_count)
def _check_data(data1, data2):
return np.all(np.isclose(data1, data2, rtol=0.001, equal_nan=True))
def _count_lines(afile):
with open(afile, 'r') as f:
num_lines = sum(1 for line in f)
return num_lines
def _generate_nontrivial_binary_target(num, seed=20181219, prng=None):
'''
Generate binary vector with non zero variance
:param num:
:return:
'''
if prng is None:
prng = np.random.RandomState(seed=seed)
def gen():
return prng.randint(0, 2, size=num)
if num <= 1:
return gen()
y = gen() # 0/1 labels
while y.min() == y.max():
y = gen()
return y
def _generate_random_target(num, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
return prng.random_sample((num,))
def set_random_weight(pool, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
pool.set_weight(prng.random_sample(pool.num_row()))
if pool.num_pairs() > 0:
pool.set_pairs_weight(prng.random_sample(pool.num_pairs()))
def verify_finite(result):
inf = float('inf')
for r in result:
assert(r == r)
assert(abs(r) < inf)
def append_param(metric_name, param):
return metric_name + (':' if ':' not in metric_name else ';') + param
# returns (features DataFrame, cat_feature_indices)
def load_pool_features_as_df(pool_file, cd_file, target_idx):
data = read_table(pool_file, header=None, dtype=str)
data.drop([target_idx], axis=1, inplace=True)
return (data, Pool(pool_file, column_description=cd_file).get_cat_feature_indices())
# Test cases begin here ########################################################
def test_load_file():
assert _check_shape(Pool(TRAIN_FILE, column_description=CD_FILE), 101, 17)
def test_load_list():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = map_cat_features(pool.get_features(), cat_features)
label = pool.get_label()
assert _check_shape(Pool(data, label, cat_features), 101, 17)
def test_load_ndarray():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = np.array(map_cat_features(pool.get_features(), cat_features))
label = np.array(pool.get_label())
assert _check_shape(Pool(data, label, cat_features), 101, 17)
@pytest.mark.parametrize('dataset', ['adult', 'adult_nan', 'querywise'])
def test_load_df_vs_load_from_file(dataset):
train_file, cd_file, target_idx, other_non_feature_columns = {
'adult': (TRAIN_FILE, CD_FILE, TARGET_IDX, []),
'adult_nan': (NAN_TRAIN_FILE, NAN_CD_FILE, TARGET_IDX, []),
'querywise': (QUERYWISE_TRAIN_FILE, QUERYWISE_CD_FILE, 2, [0, 1, 3, 4])
}[dataset]
pool1 = Pool(train_file, column_description=cd_file)
data = read_table(train_file, header=None)
labels = DataFrame(data.iloc[:, target_idx], dtype=np.float32)
data.drop([target_idx] + other_non_feature_columns, axis=1, inplace=True)
cat_features = pool1.get_cat_feature_indices()
pool2 = Pool(data, labels, cat_features)
assert _check_data(pool1.get_features(), pool2.get_features())
assert _check_data([float(label) for label in pool1.get_label()], pool2.get_label())
def test_load_series():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
data = read_table(TRAIN_FILE, header=None)
labels = Series(data.iloc[:, TARGET_IDX])
data.drop([TARGET_IDX], axis=1, inplace=True)
data = Series(list(data.values))
cat_features = pool.get_cat_feature_indices()
pool2 = Pool(data, labels, cat_features)
assert _check_data(pool.get_features(), pool2.get_features())
assert [int(label) for label in pool.get_label()] == pool2.get_label()
def test_pool_cat_features():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
assert np.all(pool.get_cat_feature_indices() == CAT_FEATURES)
def test_pool_cat_features_as_strings():
df = DataFrame(data=[[1, 2], [3, 4]], columns=['col1', 'col2'])
pool = Pool(df, cat_features=['col2'])
assert np.all(pool.get_cat_feature_indices() == [1])
data = [[1, 2, 3], [4, 5, 6]]
pool = Pool(data, feature_names=['col1', 'col2', 'col3'], cat_features=['col2', 'col3'])
assert np.all(pool.get_cat_feature_indices() == [1, 2])
data = [[1, 2, 3], [4, 5, 6]]
with pytest.raises(CatBoostError):
Pool(data, cat_features=['col2', 'col3'])
def test_load_generated():
pool_size = (100, 10)
prng = np.random.RandomState(seed=20181219)
data = np.round(prng.normal(size=pool_size), decimals=3)
label = _generate_nontrivial_binary_target(pool_size[0], prng=prng)
pool = Pool(data, label)
assert _check_data(pool.get_features(), data)
assert _check_data(pool.get_label(), label)
def test_load_dumps():
pool_size = (100, 10)
prng = np.random.RandomState(seed=20181219)
data = prng.randint(10, size=pool_size)
labels = _generate_nontrivial_binary_target(pool_size[0], prng=prng)
pool1 = Pool(data, labels)
lines = []
for i in range(len(data)):
line = [str(labels[i])] + [str(x) for x in data[i]]
lines.append('\t'.join(line))
text = '\n'.join(lines)
with open('test_data_dumps', 'w') as f:
f.write(text)
pool2 = Pool('test_data_dumps')
assert _check_data(pool1.get_features(), pool2.get_features())
assert pool1.get_label() == [int(label) for label in pool2.get_label()]
def test_dataframe_with_pandas_categorical_columns():
df = DataFrame()
df['num_feat_0'] = [0, 1, 0, 2, 3, 1, 2]
df['num_feat_1'] = [0.12, 0.8, 0.33, 0.11, 0.0, 1.0, 0.0]
df['cat_feat_2'] = Series(['A', 'B', 'A', 'C', 'A', 'A', 'A'], dtype='category')
df['cat_feat_3'] = Series(['x', 'x', 'y', 'y', 'y', 'x', 'x'])
df['cat_feat_4'] = Categorical(
['large', 'small', 'medium', 'large', 'small', 'small', 'medium'],
categories=['small', 'medium', 'large'],
ordered=True
)
df['cat_feat_5'] = [0, 1, 0, 2, 3, 1, 2]
labels = [0, 1, 1, 0, 1, 0, 1]
model = CatBoostClassifier(iterations=2)
model.fit(X=df, y=labels, cat_features=[2, 3, 4, 5])
pred = model.predict(df)
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
# feature_matrix is (doc_count x feature_count)
def get_features_data_from_matrix(feature_matrix, cat_feature_indices, order='C'):
object_count = len(feature_matrix)
feature_count = len(feature_matrix[0])
cat_feature_count = len(cat_feature_indices)
num_feature_count = feature_count - cat_feature_count
result_num = np.empty((object_count, num_feature_count), dtype=np.float32, order=order)
result_cat = np.empty((object_count, cat_feature_count), dtype=object, order=order)
for object_idx in xrange(object_count):
num_feature_idx = 0
cat_feature_idx = 0
for feature_idx in xrange(len(feature_matrix[object_idx])):
if (cat_feature_idx < cat_feature_count) and (cat_feature_indices[cat_feature_idx] == feature_idx):
# simplified handling of transformation to bytes for tests
result_cat[object_idx, cat_feature_idx] = (
feature_matrix[object_idx, feature_idx]
if isinstance(feature_matrix[object_idx, feature_idx], bytes)
else str(feature_matrix[object_idx, feature_idx]).encode('utf-8')
)
cat_feature_idx += 1
else:
result_num[object_idx, num_feature_idx] = float(feature_matrix[object_idx, feature_idx])
num_feature_idx += 1
return FeaturesData(num_feature_data=result_num, cat_feature_data=result_cat)
def get_features_data_from_file(data_file, drop_columns, cat_feature_indices, order='C'):
data_matrix_from_file = read_table(data_file, header=None, dtype=str)
data_matrix_from_file.drop(drop_columns, axis=1, inplace=True)
return get_features_data_from_matrix(np.array(data_matrix_from_file), cat_feature_indices, order)
def compare_flat_index_and_features_data_pools(flat_index_pool, features_data_pool):
assert flat_index_pool.shape == features_data_pool.shape
cat_feature_indices = flat_index_pool.get_cat_feature_indices()
num_feature_count = flat_index_pool.shape[1] - len(cat_feature_indices)
flat_index_pool_features = flat_index_pool.get_features()
features_data_pool_features = features_data_pool.get_features()
for object_idx in xrange(flat_index_pool.shape[0]):
num_feature_idx = 0
cat_feature_idx = 0
for flat_feature_idx in xrange(flat_index_pool.shape[1]):
if (
(cat_feature_idx < len(cat_feature_indices))
and (cat_feature_indices[cat_feature_idx] == flat_feature_idx)
):
# simplified handling of transformation to bytes for tests
assert (flat_index_pool_features[object_idx][flat_feature_idx] ==
features_data_pool_features[object_idx][num_feature_count + cat_feature_idx])
cat_feature_idx += 1
else:
assert np.isclose(
flat_index_pool_features[object_idx][flat_feature_idx],
features_data_pool_features[object_idx][num_feature_idx],
rtol=0.001,
equal_nan=True
)
num_feature_idx += 1
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_from_features_data_vs_load_from_files(order):
pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
features_data = get_features_data_from_file(
data_file=TRAIN_FILE,
drop_columns=[TARGET_IDX],
cat_feature_indices=pool_from_files.get_cat_feature_indices(),
order=order
)
pool_from_features_data = Pool(data=features_data)
compare_flat_index_and_features_data_pools(pool_from_files, pool_from_features_data)
def test_features_data_with_empty_objects():
fd = FeaturesData(
cat_feature_data=np.empty((0, 4), dtype=object)
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 4
assert fd.get_num_feature_count() == 0
assert fd.get_cat_feature_count() == 4
assert fd.get_feature_names() == [''] * 4
fd = FeaturesData(
num_feature_data=np.empty((0, 2), dtype=np.float32),
num_feature_names=['f0', 'f1']
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 2
assert fd.get_num_feature_count() == 2
assert fd.get_cat_feature_count() == 0
assert fd.get_feature_names() == ['f0', 'f1']
fd = FeaturesData(
cat_feature_data=np.empty((0, 2), dtype=object),
num_feature_data=np.empty((0, 3), dtype=np.float32)
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 5
assert fd.get_num_feature_count() == 3
assert fd.get_cat_feature_count() == 2
assert fd.get_feature_names() == [''] * 5
def test_features_data_names():
# empty specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
)
assert fd.get_feature_names() == [''] * 5
# full specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
num_feature_names=['weight', 'price', 'volume']
)
assert fd.get_feature_names() == ['weight', 'price', 'volume', 'shop', 'search']
# partial specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
num_feature_names=['weight', 'price', 'volume']
)
assert fd.get_feature_names() == ['weight', 'price', 'volume', '', '']
# partial specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
)
assert fd.get_feature_names() == ['', '', '', 'shop', 'search']
def compare_pools_from_features_data_and_generic_matrix(
features_data,
generic_matrix,
cat_features_indices,
feature_names=None
):
pool1 = Pool(data=features_data)
pool2 = Pool(data=generic_matrix, cat_features=cat_features_indices, feature_names=feature_names)
assert _check_data(pool1.get_features(), pool2.get_features())
assert pool1.get_cat_feature_indices() == pool2.get_cat_feature_indices()
assert pool1.get_feature_names() == pool2.get_feature_names()
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_features_data_good(order):
# 0 objects
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(cat_feature_data=np.empty((0, 4), dtype=object, order=order)),
np.empty((0, 4), dtype=object),
cat_features_indices=[0, 1, 2, 3]
)
# 0 objects
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.empty((0, 2), dtype=object, order=order),
cat_feature_names=['cat0', 'cat1'],
num_feature_data=np.empty((0, 3), dtype=np.float32, order=order),
),
np.empty((0, 5), dtype=object),
cat_features_indices=[3, 4],
feature_names=['', '', '', 'cat0', 'cat1']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order)
),
[[b'amazon', b'bing'], [b'ebay', b'google']],
cat_features_indices=[0, 1]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order)
),
[[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]],
cat_features_indices=[]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order)
),
[[1.0, 2.0, 3.0, b'amazon', b'bing'], [22.0, 7.1, 10.2, b'ebay', b'google']],
cat_features_indices=[3, 4]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
cat_feature_names=['shop', 'search']
),
[[b'amazon', b'bing'], [b'ebay', b'google']],
cat_features_indices=[0, 1],
feature_names=['shop', 'search']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order),
num_feature_names=['weight', 'price', 'volume']
),
[[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]],
cat_features_indices=[],
feature_names=['weight', 'price', 'volume']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order),
num_feature_names=['weight', 'price', 'volume']
),
[[1.0, 2.0, 3.0, b'amazon', b'bing'], [22.0, 7.1, 10.2, b'ebay', b'google']],
cat_features_indices=[3, 4],
feature_names=['weight', 'price', 'volume', 'shop', 'search']
)
def test_features_data_bad():
# empty
with pytest.raises(CatBoostError):
FeaturesData()
# names w/o data
with pytest.raises(CatBoostError):
FeaturesData(cat_feature_data=[[b'amazon', b'bing']], num_feature_names=['price'])
# bad matrix type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=[[b'amazon', b'bing']],
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float32)
)
# bad matrix shape
with pytest.raises(CatBoostError):
FeaturesData(num_feature_data=np.array([[[1.0], [2.0], [3.0]]], dtype=np.float32))
# bad element type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([b'amazon', b'bing'], dtype=object),
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float64)
)
# bad element type
with pytest.raises(CatBoostError):
FeaturesData(cat_feature_data=np.array(['amazon', 'bing']))
# bad names type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'reddit']], dtype=object),
cat_feature_names=[None, 'news_aggregator']
)
# bad names length
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'bing']], dtype=object),
cat_feature_names=['search_engine', 'news_aggregator']
)
# no features
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[], [], []], dtype=object),
num_feature_data=np.array([[], [], []], dtype=np.float32)
)
# number of objects is different
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'bing']], dtype=object),
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float32)
)
def test_predict_regress(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_sklearn_regress(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_sklearn_class(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, loss_function='Logloss:border=0.5', task_type=task_type, devices='0')
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_class_raw(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict(test_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_raw_predict_equals_to_model_predict(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
assert(model.is_fitted())
pred = model.predict(test_pool, prediction_type='RawFormulaVal')
assert np.all(np.isclose(model.get_test_eval(), pred, rtol=1.e-6))
@pytest.mark.parametrize('problem', ['Classifier', 'Regressor'])
def test_predict_and_predict_proba_on_single_object(problem):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
if problem == 'Classifier':
model = CatBoostClassifier(iterations=2)
else:
model = CatBoostRegressor(iterations=2)
model.fit(train_pool)
test_data = read_table(TEST_FILE, header=None)
test_data.drop([TARGET_IDX], axis=1, inplace=True)
pred = model.predict(test_data)
if problem == 'Classifier':
pred_probabilities = model.predict_proba(test_data)
random.seed(0)
for i in xrange(3): # just some indices
test_object_idx = random.randrange(test_data.shape[0])
assert pred[test_object_idx] == model.predict(test_data.values[test_object_idx])
if problem == 'Classifier':
assert np.array_equal(pred_probabilities[test_object_idx], model.predict_proba(test_data.values[test_object_idx]))
def test_model_pickling(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
pred = model.predict(test_pool, prediction_type='RawFormulaVal')
model_unpickled = pickle.loads(pickle.dumps(model))
pred_new = model_unpickled.predict(test_pool, prediction_type='RawFormulaVal')
assert all(pred_new == pred)
def test_fit_from_file(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
predictions1 = model.predict(train_pool)
model.fit(TRAIN_FILE, column_description=CD_FILE)
predictions2 = model.predict(train_pool)
assert all(predictions1 == predictions2)
assert 'train_finish_time' in model.get_metadata()
@fails_on_gpu(how='assert 0.019921323750168085 < EPS, where 0.019921323750168085 = abs((0.03378972364589572 - 0.053711047396063805))')
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_fit_from_features_data(order, task_type):
pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(pool_from_files)
assert(model.is_fitted())
predictions_from_files = model.predict(pool_from_files)
features_data = get_features_data_from_file(
data_file=TRAIN_FILE,
drop_columns=[TARGET_IDX],
cat_feature_indices=pool_from_files.get_cat_feature_indices(),
order=order
)
model.fit(X=features_data, y=pool_from_files.get_label())
predictions_from_features_data = model.predict(Pool(features_data))
for prediction1, prediction2 in zip(predictions_from_files, predictions_from_features_data):
assert abs(prediction1 - prediction2) < EPS
def test_fit_from_empty_features_data(task_type):
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
with pytest.raises(CatBoostError):
model.fit(
X=FeaturesData(num_feature_data=np.empty((0, 2), dtype=np.float32)),
y=np.empty((0), dtype=np.int32)
)
def test_coreml_import_export(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 20, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml")
canon_pred = model.predict(test_pool)
coreml_loaded_model = CatBoostRegressor()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
assert all(canon_pred == coreml_loaded_model.predict(test_pool))
return compare_canonical_models(output_coreml_model_path)
def test_coreml_import_export_one_hot_features(task_type):
train_pool = Pool(SMALL_CATEGORIAL_FILE, column_description=SMALL_CATEGORIAL_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 2, 'task_type': task_type, 'devices': '0', 'one_hot_max_size': 4})
model.fit(train_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml", pool=train_pool)
pred = model.predict(train_pool)
coreml_loaded_model = CatBoostRegressor()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
assert all(pred == coreml_loaded_model.predict(train_pool))
return compare_canonical_models(output_coreml_model_path)
@pytest.mark.parametrize('pool', ['adult', 'higgs'])
def test_convert_model_to_json(task_type, pool):
train_pool = Pool(data_file(pool, 'train_small'), column_description=data_file(pool, 'train.cd'))
test_pool = Pool(data_file(pool, 'test_small'), column_description=data_file(pool, 'train.cd'))
converted_model_path = test_output_path("converted_model.bin")
model = CatBoost({'iterations': 20, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
output_json_model_path = test_output_path(OUTPUT_JSON_MODEL_PATH)
model.save_model(output_model_path)
model.save_model(output_json_model_path, format="json")
model2 = CatBoost()
model2.load_model(output_json_model_path, format="json")
model2.save_model(converted_model_path)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
subprocess.check_call((model_diff_tool, output_model_path, converted_model_path, '--diff-limit', '0.000001'))
return compare_canonical_models(converted_model_path)
def test_coreml_cbm_import_export(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 20, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
canon_pred = model.predict(test_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml")
coreml_loaded_model = CatBoost()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
coreml_loaded_model.save_model(output_model_path)
cbm_loaded_model = CatBoost()
cbm_loaded_model.load_model(output_model_path)
assert all(canon_pred == cbm_loaded_model.predict(test_pool))
return compare_canonical_models(output_coreml_model_path)
def test_cpp_export_no_cat_features(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_cpp_model_path = test_output_path(OUTPUT_CPP_MODEL_PATH)
model.save_model(output_cpp_model_path, format="cpp")
return local_canonical_file(output_cpp_model_path)
def test_cpp_export_with_cat_features(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 20, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_cpp_model_path = test_output_path(OUTPUT_CPP_MODEL_PATH)
model.save_model(output_cpp_model_path, format="cpp")
return local_canonical_file(output_cpp_model_path)
@pytest.mark.parametrize('iterations', [2, 40])
def test_export_to_python_no_cat_features(task_type, iterations):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost({'iterations': iterations, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python")
return local_canonical_file(output_python_model_path)
@pytest.mark.parametrize('iterations', [2, 40])
def test_export_to_python_with_cat_features(task_type, iterations):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': iterations, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python", pool=train_pool)
return local_canonical_file(output_python_model_path)
def test_export_to_python_with_cat_features_from_pandas(task_type):
model = CatBoost({'iterations': 5, 'task_type': task_type, 'devices': '0'})
X = DataFrame([[1, 2], [3, 4]], columns=['Num', 'Categ'])
y = [1, 0]
cat_features = [1]
model.fit(X, y, cat_features)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python", pool=X)
return local_canonical_file(output_python_model_path)
@pytest.mark.parametrize('problem_type', ['binclass', 'multiclass', 'regression'])
def test_onnx_export(problem_type):
if problem_type == 'binclass':
loss_function = 'Logloss'
train_path = TRAIN_FILE
cd_path = CD_FILE
elif problem_type == 'multiclass':
loss_function = 'MultiClass'
train_path = CLOUDNESS_TRAIN_FILE
cd_path = CLOUDNESS_CD_FILE
elif problem_type == 'regression':
loss_function = 'RMSE'
train_path = TRAIN_FILE
cd_path = CD_FILE
else:
raise Exception('Unsupported problem_type: %s' % problem_type)
train_pool = Pool(train_path, column_description=cd_path)
model = CatBoost(
{
'task_type': 'CPU', # TODO(akhropov): GPU results are unstable, difficult to compare models
'loss_function': loss_function,
'iterations': 5,
'depth': 4,
# onnx format export does not yet support categorical features so ignore them
'ignored_features': train_pool.get_cat_feature_indices()
}
)
model.fit(train_pool)
output_onnx_model_path = test_output_path(OUTPUT_ONNX_MODEL_PATH)
model.save_model(
output_onnx_model_path,
format="onnx",
export_parameters={
'onnx_domain': 'ai.catboost',
'onnx_model_version': 1,
'onnx_doc_string': 'test model for problem_type %s' % problem_type,
'onnx_graph_name': 'CatBoostModel_for_%s' % problem_type
}
)
return compare_canonical_models(output_onnx_model_path)
def test_predict_class(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict(test_pool, prediction_type="Class")
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_zero_learning_rate(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0, task_type=task_type, devices='0')
with pytest.raises(CatBoostError):
model.fit(train_pool)
def test_predict_class_proba(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict_proba(test_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
@fails_on_gpu(how='assert 0.031045619651137835 < EPS, where 0.031045619651137835 = <function amax at ...')
@pytest.mark.parametrize('function_name', ['predict', 'predict_proba'])
def test_predict_funcs_from_features_data(function_name, task_type):
function = getattr(CatBoostClassifier, function_name)
train_pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool_from_files)
test_pool_from_files = Pool(TEST_FILE, column_description=CD_FILE)
predictions_from_files = function(model, test_pool_from_files)
train_features_data, test_features_data = [
get_features_data_from_file(
data_file=data_file,
drop_columns=[TARGET_IDX],
cat_feature_indices=train_pool_from_files.get_cat_feature_indices()
)
for data_file in [TRAIN_FILE, TEST_FILE]
]
model.fit(X=train_features_data, y=train_pool_from_files.get_label())
predictions_from_features_data = function(model, test_features_data)
for prediction1, prediction2 in zip(predictions_from_files, predictions_from_features_data):
assert np.max(np.abs(prediction1 - prediction2)) < EPS
# empty
empty_test_features_data = FeaturesData(
num_feature_data=np.empty((0, test_features_data.get_num_feature_count()), dtype=np.float32),
cat_feature_data=np.empty((0, test_features_data.get_cat_feature_count()), dtype=object)
)
empty_predictions = function(model, empty_test_features_data)
assert len(empty_predictions) == 0
def test_no_cat_in_predict(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred1 = model.predict(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()))
pred2 = model.predict(Pool(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()), cat_features=train_pool.get_cat_feature_indices()))
assert _check_data(pred1, pred2)
def test_save_model(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoost({'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
model2 = CatBoost()
model2.load_model(output_model_path)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
def test_multiclass(task_type):
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_multiclass_classes_count_missed_classes(task_type):
prng = np.random.RandomState(seed=0)
pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice([1, 3], size=100))
classifier = CatBoostClassifier(classes_count=4, iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(pool)
classes = new_classifier.predict(pool)
assert pred.shape == (100, 4)
assert np.array(classes).all() in [1, 3]
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
@pytest.mark.parametrize('label_type', ['string', 'int'])
def test_multiclass_custom_class_labels(label_type, task_type):
if label_type == 'int':
train_labels = [1, 2]
elif label_type == 'string':
train_labels = ['Class1', 'Class2']
prng = np.random.RandomState(seed=0)
train_pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice(train_labels, size=100))
test_pool = Pool(prng.random_sample(size=(50, 10)))
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(test_pool)
classes = new_classifier.predict(test_pool)
assert pred.shape == (50, 2)
assert all(((class1 in train_labels) for class1 in classes))
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
def test_multiclass_custom_class_labels_from_files(task_type):
labels = ['a', 'b', 'c', 'd']
cd_path = test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = test_output_path('train.txt')
np.savetxt(train_path, generate_random_labeled_set(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = test_output_path('test.txt')
np.savetxt(test_path, generate_random_labeled_set(25, 10, labels, prng=prng), fmt='%s', delimiter='\t')
train_pool = Pool(train_path, column_description=cd_path)
test_pool = Pool(test_path, column_description=cd_path)
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(test_pool)
classes = new_classifier.predict(test_pool)
assert pred.shape == (25, 4)
assert all(((class1 in labels) for class1 in classes))
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
def test_class_names(task_type):
class_names = ['Small', 'Medium', 'Large']
prng = np.random.RandomState(seed=0)
train_pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice(class_names, size=100))
test_pool = Pool(prng.random_sample(size=(25, 10)))
classifier = CatBoostClassifier(
iterations=2,
loss_function='MultiClass',
class_names=class_names,
thread_count=8,
task_type=task_type,
devices='0'
)
classifier.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(test_pool)
classes = new_classifier.predict(test_pool)
assert pred.shape == (25, 3)
assert all(((class1 in class_names) for class1 in classes))
assert sorted(classifier.classes_) == sorted(class_names)
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
def test_inconsistent_labels_and_class_names():
class_names = ['Small', 'Medium', 'Large']
prng = np.random.RandomState(seed=0)
train_pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice([0, 1, 2], size=100))
classifier = CatBoostClassifier(
iterations=2,
loss_function='MultiClass',
class_names=class_names,
)
with pytest.raises(CatBoostError):
classifier.fit(train_pool)
@pytest.mark.parametrize(
'features_dtype',
['str', 'np.float32'],
ids=['features_dtype=str', 'features_dtype=np.float32']
)
def test_querywise(features_dtype, task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'QueryRMSE', 'iterations': 2, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
pred1 = model.predict(test_pool)
df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_query_id = df.loc[:, 1]
train_target = df.loc[:, 2]
train_data = df.drop([0, 1, 2, 3, 4], axis=1).astype(eval(features_dtype))
df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_data = df.drop([0, 1, 2, 3, 4], axis=1).astype(eval(features_dtype))
model.fit(train_data, train_target, group_id=train_query_id)
pred2 = model.predict(test_data)
assert _check_data(pred1, pred2)
def test_group_weight(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT)
model = CatBoost(params={'loss_function': 'YetiRank', 'iterations': 10, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
pred1 = model.predict(test_pool)
df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_query_weight = df.loc[:, 0]
train_query_id = df.loc[:, 1]
train_target = df.loc[:, 2]
train_data = df.drop([0, 1, 2, 3, 4], axis=1).astype(str)
df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_query_weight = df.loc[:, 0]
test_query_id = df.loc[:, 1]
test_data = Pool(df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32), group_id=test_query_id, group_weight=test_query_weight)
model.fit(train_data, train_target, group_id=train_query_id, group_weight=train_query_weight)
pred2 = model.predict(test_data)
assert _check_data(pred1, pred2)
def test_zero_baseline(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
baseline = np.zeros(pool.num_row())
pool.set_baseline(baseline)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_ones_weight(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.ones(pool.num_row())
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_non_ones_weight(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.arange(1, pool.num_row() + 1)
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_ones_weight_equal_to_nonspecified_weight(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
predictions = []
for set_weights in [False, True]:
if set_weights:
weight = np.ones(train_pool.num_row())
train_pool.set_weight(weight)
model.fit(train_pool)
predictions.append(model.predict(test_pool))
assert _check_data(predictions[0], predictions[1])
def test_py_data_group_id(task_type):
train_pool_from_files = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_ID)
test_pool_from_files = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_ID)
model = CatBoost(
params={'loss_function': 'QueryRMSE', 'iterations': 2, 'thread_count': 4, 'task_type': task_type, 'devices': '0'}
)
model.fit(train_pool_from_files)
predictions_from_files = model.predict(test_pool_from_files)
train_df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_target = train_df.loc[:, 2]
raw_train_group_id = train_df.loc[:, 1]
train_data = train_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32)
test_df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_data = Pool(test_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32))
for group_id_func in (int, str, lambda id: 'myid_' + str(id)):
train_group_id = [group_id_func(group_id) for group_id in raw_train_group_id]
model.fit(train_data, train_target, group_id=train_group_id)
predictions_from_py_data = model.predict(test_data)
assert _check_data(predictions_from_files, predictions_from_py_data)
def test_py_data_subgroup_id(task_type):
train_pool_from_files = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE_WITH_SUBGROUP_ID)
test_pool_from_files = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE_WITH_SUBGROUP_ID)
model = CatBoost(
params={'loss_function': 'QueryRMSE', 'iterations': 2, 'thread_count': 4, 'task_type': task_type, 'devices': '0'}
)
model.fit(train_pool_from_files)
predictions_from_files = model.predict(test_pool_from_files)
train_df = | read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None) | pandas.read_table |
import pandas
import time
from pandas.core.frame import DataFrame
def macd(tickers: list, history, stop_loss: list):
prices = []
print(f"LOGGING REPORT -> {time.ctime()}")
# Add the closing prices to the prices list and make sure we start at greater than 2 dollars to reduce outlier calculations.
for ticker, sl in zip(tickers, stop_loss):
for price in history['Close'][ticker.name]:
prices.append(price)
prices_df = | DataFrame(prices) | pandas.core.frame.DataFrame |
# Load packages
import numpy as np
import pandas as pd
# Combine all tournament and reg season games
tourney_compact_df = pd.read_csv('Data/Kaggle NCAA/NCAATourneyCompactResults.csv')
tourney_compact_df['is_tourney'] = 1.0
regseason_compact_df = pd.read_csv('Data/Kaggle NCAA/RegularSeasonCompactResults.csv')
regseason_compact_df['is_tourney'] = 0.0
all_games_df = regseason_compact_df.append(tourney_compact_df)
all_games_df.head()
all_games_df.tail()
# Add spread, seeds, ranks, and Ken POM data
# Load and merge on point spread
spread_df = pd.read_csv('Data/~Created data/spreads_all.csv')
spread_df = spread_df[['Season','date','line','lineavg','road','home','rscore','hscore','WScore','LScore','HTeamID','RTeamID','WTeamID','LTeamID']]
# Merge point spread on to games
all_games_df = pd.merge(all_games_df, spread_df, left_on=['WTeamID','LTeamID','Season','WScore','LScore'], right_on = ['WTeamID','LTeamID','Season','WScore','LScore'], how='inner')
all_games_df = all_games_df[['Season','date','is_tourney','home','HTeamID','hscore','road','RTeamID','rscore','line','lineavg']]
all_games_df['Hwin'] = 0
all_games_df.loc[all_games_df['hscore']>all_games_df['rscore'], 'Hwin'] = 1
all_games_df.head()
# Add seeds
seeds_df = pd.read_csv('Data/Kaggle NCAA/NCAATourneySeeds.csv')
seeds_df['Seed_num'] = seeds_df['Seed'].str.extract('(\d\d)', expand=True)
seeds_df['Seed_num'] = pd.to_numeric(seeds_df['Seed_num'])
seeds_df = seeds_df[['Season','TeamID','Seed_num']]
seeds_df.rename(columns={'Seed_num': 'Seed'}, inplace=True)
seeds_df.head()
#Note: we merge twice for this merge and subsequent merges, because the data are in a wide format by team (i.e., there are two variables rather than two rows for each game)
home_road = ['H','R']
for hr in home_road:
all_games_df = pd.merge(all_games_df, seeds_df, left_on=[hr+'TeamID','Season'], right_on = ['TeamID','Season'], how='left')
all_games_df.rename(columns={'Seed': hr+'Seed'}, inplace=True)
all_games_df = all_games_df.drop(['TeamID'], axis=1)
all_games_df.head()
# Add Ken Pom data
kp_df = pd.read_csv('Data/~Created data/kp_all.csv')
efficiency_list = ['conf','adjem','adjo','adjd','luck']
for hr in home_road:
all_games_df = pd.merge(all_games_df, kp_df, left_on=[hr+'TeamID','Season'], right_on = ['TeamID','Season'], how='inner')
for metric in efficiency_list:
all_games_df.rename(columns={metric: hr+metric}, inplace=True)
all_games_df = all_games_df.drop(['TeamID','team'], axis=1)
all_games_df.head()
# Add Massey and Ken Pom rankings
massey_df = | pd.read_csv('Data/Kaggle NCAA/MasseyOrdinals_thru_2019_day_128.csv') | pandas.read_csv |
import logging
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
import sentry_sdk
from solarforecastarbiter import utils
def _make_aggobs(obsid, ef=pd.Timestamp('20191001T1100Z'),
eu=None, oda=None):
return {
'observation_id': obsid,
'effective_from': ef,
'effective_until': eu,
'observation_deleted_at': oda
}
nindex = pd.date_range(start='20191004T0000Z',
freq='1h', periods=10)
@pytest.fixture()
def ids():
return ['f2844284-ea0a-11e9-a7da-f4939feddd82',
'f3e310ba-ea0a-11e9-a7da-f4939feddd82',
'09ed7cf6-ea0b-11e9-a7da-f4939feddd82',
'0fe9f2ba-ea0b-11e9-a7da-f4939feddd82',
'67ea9200-ea0e-11e9-832b-f4939feddd82']
@pytest.fixture()
def aggobs(ids):
return tuple([
_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191004T0501Z')),
_make_aggobs(ids[2], eu=pd.Timestamp('20191004T0400Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0700Z'),
eu=pd.Timestamp('20191004T0800Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0801Z')),
_make_aggobs(ids[3], oda=pd.Timestamp('20191005T0000Z')),
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191003T0000Z'))
])
def test_compute_aggregate(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_missing_from_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
aggobs = list(aggobs[:-2]) + [
_make_aggobs('09ed7cf6-ea0b-11e9-a7da-f4939fed889')]
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_empty_data(aggobs, ids):
data = {}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:2], nindex)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_missing_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
data[ids[-1]] = pd.DataFrame({'value': [1] * 8, 'quality_flag': [0] * 8},
index=nindex[:-2])
aggobs = list(aggobs[:-2]) + [_make_aggobs(ids[-1])]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series(
[3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, None, None],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_deleted_not_removed(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids}
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_not_removed_yet(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# with last aggobs, would try and get data before effective_until,
# but was deleted, so raise error
aggobs = list(aggobs[:-2]) + [
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191004T0700Z'))]
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_but_removed_before(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# aggobs[-1] properly removed
aggobs = list(aggobs[:-2]) + [aggobs[-1]]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}))
def test_compute_aggregate_mean(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'mean', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([1.0] * 10, index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_no_overlap(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3], 'quality_flag': [2, 10, 338]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0230Z'])),
ids[1]: pd.DataFrame(
{'value': [3, 2, 1], 'quality_flag': [9, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0200Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'median', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, None, 2.5, None],
'quality_flag': [2, 10, 9, 338 | 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_before_effective(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3, 0, 0], 'quality_flag': [2, 10, 338, 0, 0]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z'])),
ids[1]: pd.DataFrame(
{'value': [None, 2.0, 1.0], 'quality_flag': [0, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0201Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'max', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, 3.0, 2.0, 1.0],
'quality_flag': [2, 10, 338, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_bad_cols():
data = {'a': pd.DataFrame([0], index=pd.DatetimeIndex(
['20191001T1200Z']))}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC',
'mean', [_make_aggobs('a')])
def test_compute_aggregate_index_provided(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
the_index = nindex.copy()[::2]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], the_index)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 3.0],
index=the_index),
'quality_flag': pd.Series([0]*5, index=the_index)})
)
@pytest.mark.parametrize('dfindex,missing_idx', [
(pd.date_range(start='20191004T0000Z', freq='1h', periods=11), -1),
(pd.date_range(start='20191003T2300Z', freq='1h', periods=11), 0),
])
def test_compute_aggregate_missing_values_with_index(
aggobs, ids, dfindex, missing_idx):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], dfindex)
assert pd.isnull(agg['value'][missing_idx])
def test_compute_aggregate_partial_missing_values_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
data[ids[2]] = pd.DataFrame({'value': [1] * 5, 'quality_flag': [0] * 5},
index=nindex[5:])
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], nindex)
expected = pd.DataFrame(
{'value': pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}
)
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_obs_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
aggobs[:-2], nindex)
def test_compute_aggregate_out_of_effective(aggobs, ids):
limited_aggobs = [aggob
for aggob in aggobs
if aggob['effective_until'] is not None]
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
max_time = pd.Series([o['effective_until'] for o in limited_aggobs]).max()
ooe_index = pd.date_range(
max_time + pd.Timedelta('1H'),
max_time + pd.Timedelta('25H'),
freq='60min'
)
with pytest.raises(ValueError) as e:
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
limited_aggobs, ooe_index)
assert str(e.value) == 'No effective observations in data'
def test__observation_valid(aggobs):
out = utils._observation_valid(
nindex, 'f2844284-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(True, index=nindex))
def test__observation_valid_ended(aggobs):
out = utils._observation_valid(
nindex, 'f3e310ba-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series([False] * 6 + [True] * 4,
index=nindex))
def test__observation_valid_many(aggobs):
out = utils._observation_valid(
nindex, '09ed7cf6-ea0b-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(
[True, True, True, True, True, False, False, True, True, True],
index=nindex))
def test__observation_valid_deleted(aggobs):
with pytest.raises(ValueError):
utils._observation_valid(
nindex, '0fe9f2ba-ea0b-11e9-a7da-f4939feddd82', aggobs)
def test__observation_valid_deleted_before(aggobs):
out = utils._observation_valid(
nindex, '67ea9200-ea0e-11e9-832b-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(False, index=nindex))
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0745Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z',
'20191004T0800Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
])
def test__make_aggregate_index(length, label, expected):
test_data = {
0: pd.DataFrame(range(5), index=pd.date_range(
'20191004T0700Z', freq='7min', periods=5)), # end 35
1: pd.DataFrame(range(4), index=pd.date_range(
'20191004T0015-0700', freq='10min', periods=4))} # end 45
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0715Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z'])),
])
def test__make_aggregate_index_offset_right(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0701Z', freq='7min', periods=6)) # end 35
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0645Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0600Z',
'20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0640Z', '20191004T0700Z', '20191004T0720Z'])),
('36min', 'ending', pd.DatetimeIndex(['20191004T0712Z',
'20191004T0748Z'])),
('36min', 'beginning', pd.DatetimeIndex(['20191004T0636Z',
'20191004T0712Z'])),
])
def test__make_aggregate_index_offset_left(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0658Z', freq='7min', periods=6)) # end 32
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_tz():
length = '30min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T1600Z', freq='5min', periods=6)) # end 30
}
expected = pd.DatetimeIndex(['20190101T0900'],
tz='America/Denver')
out = utils._make_aggregate_index(test_data, length, label,
'America/Denver')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_invalid_length():
length = '33min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0158Z', freq='7min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
def test__make_aggregate_index_instant():
length = '30min'
label = 'instant'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0100Z', freq='10min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('start,end', [
(pd.Timestamp('20190101T0000Z'), pd.Timestamp('20190102T0000')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000Z')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000')),
])
def test__make_aggregate_index_localization(start, end):
length = '30min'
label = 'ending'
test_data = {
0: pd.DataFrame(range(1), index=pd.DatetimeIndex([start])),
1: pd.DataFrame(range(1), index=pd.DatetimeIndex([end])),
}
with pytest.raises(TypeError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('inp,oup', [
(pd.DataFrame(dtype=float), pd.Series(dtype=float)),
(pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float),
pd.DataFrame(dtype=float)),
(pd.Series([0, 1]), pd.Series([0, 1])),
(pd.DataFrame([[0, 1], [1, 2]]), pd.DataFrame([[0, 1], [1, 2]])),
pytest.param(
pd.Series([0, 1]),
pd.Series([0, 1], index=pd.date_range(start='now', freq='1min',
periods=2)),
marks=pytest.mark.xfail(type=AssertionError, strict=True)),
pytest.param(
pd.Series([0, 1]),
pd.Series([1, 0]),
marks=pytest.mark.xfail(type=AssertionError, strict=True))
])
def test_sha256_pandas_object_hash(inp, oup):
assert utils.sha256_pandas_object_hash(inp) == utils.sha256_pandas_object_hash(oup) # NOQA
def test_listhandler():
logger = logging.getLogger('testlisthandler')
handler = utils.ListHandler()
logger.addHandler(handler)
logger.setLevel('DEBUG')
logger.warning('Test it')
logger.debug('What?')
out = handler.export_records()
assert len(out) == 1
assert out[0].message == 'Test it'
assert len(handler.export_records(logging.DEBUG)) == 2
def test_listhandler_recreate():
logger = logging.getLogger('testlisthandler')
handler = utils.ListHandler()
logger.addHandler(handler)
logger.setLevel('DEBUG')
logger.warning('Test it')
logger.debug('What?')
out = handler.export_records()
assert len(out) == 1
assert out[0].message == 'Test it'
assert len(handler.export_records(logging.DEBUG)) == 2
l2 = logging.getLogger('testlist2')
h2 = utils.ListHandler()
l2.addHandler(h2)
l2.error('Second fail')
out = h2.export_records()
assert len(out) == 1
assert out[0].message == 'Second fail'
def test_hijack_loggers(mocker):
old_handler = mocker.MagicMock()
new_handler = mocker.MagicMock()
mocker.patch('solarforecastarbiter.utils.ListHandler',
return_value=new_handler)
logger = logging.getLogger('testhijack')
logger.addHandler(old_handler)
assert logger.handlers[0] == old_handler
with utils.hijack_loggers(['testhijack']):
assert logger.handlers[0] == new_handler
assert logger.handlers[0] == old_handler
def test_hijack_loggers_sentry(mocker):
events = set()
def before_send(event, hint):
events.add(event['logger'])
return
sentry_sdk.init(
"https://[email protected]/0",
before_send=before_send)
logger = logging.getLogger('testlog')
with utils.hijack_loggers(['testlog']):
logging.getLogger('root').error('will show up')
logger.error('AHHH')
assert 'root' in events
assert 'testlog' not in events
events = set()
logging.getLogger('root').error('will show up')
logger.error('AHHH')
assert 'root' in events
assert 'testlog' in events
@pytest.mark.parametrize('data,freq,expected', [
(pd.Series(index=pd.DatetimeIndex([]), dtype=float), '5min',
[pd.Series(index=pd.DatetimeIndex([]), dtype=float)]),
(pd.Series([1.0], index=pd.DatetimeIndex(['2020-01-01T00:00Z'])),
'5min',
[pd.Series([1.0], index= | pd.DatetimeIndex(['2020-01-01T00:00Z']) | pandas.DatetimeIndex |
import joblib
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import autorch
from autorch.function import sp2wt
class F(object):
def __init__(self,config):
# simulation data model
self.icg_model = joblib.load(config['icg_model_path'])
self.c620_model = joblib.load(config['c620_model_path'])
self.c660_model = joblib.load(config['c660_model_path'])
self.c670_model = joblib.load(config['c670_model_path'])
# real data model
self.icg_real_data_model = joblib.load(config['icg_model_path_real_data'])
self.c620_real_data_model = joblib.load(config['c620_model_path_real_data'])
self.c660_real_data_model = joblib.load(config['c660_model_path_real_data'])
self.c670_real_data_model = joblib.load(config['c670_model_path_real_data'])
# real data linear model
self.c620_real_data_model_linear = joblib.load(config['c620_model_path_real_data_linear'])
self.c660_real_data_model_linear = joblib.load(config['c660_model_path_real_data_linear'])
self.c670_real_data_model_linear = joblib.load(config['c670_model_path_real_data_linear'])
# columns name
self.icg_col = joblib.load(config['icg_col_path'])
self.c620_col = joblib.load(config['c620_col_path'])
self.c660_col = joblib.load(config['c660_col_path'])
self.c670_col = joblib.load(config['c670_col_path'])
# simple op_col
self.c620_simple_op_col = joblib.load(config['c620_simple_op_col'])
self.c660_simple_op_col = joblib.load(config['c660_simple_op_col'])
self.c670_simple_op_col = joblib.load(config['c670_simple_op_col'])
# other infomation
self.c620_wt_always_same_split_factor_dict = joblib.load(config['c620_wt_always_same_split_factor_dict'])
self.c660_wt_always_same_split_factor_dict = joblib.load(config['c660_wt_always_same_split_factor_dict'])
self.c670_wt_always_same_split_factor_dict = joblib.load(config['c670_wt_always_same_split_factor_dict'])
self.index_9999 = joblib.load(config['index_9999_path'])
self.index_0001 = joblib.load(config['index_0001_path'])
self.V615_density = 0.8626
self.C820_density = 0.8731
self.T651_density = 0.8749
# user can set two mode
self.Recommended_mode = False
self.real_data_mode = False
self._Post_processing = True
self._linear_model = False
def ICG_loop(self,Input):
while True:
if self.real_data_mode == True:
output = pd.DataFrame(self.icg_real_data_model.predict(Input[self.icg_col['x']].values),
index=Input.index,columns=['Simulation Case Conditions_C620 Distillate Rate_m3/hr'])
if self.real_data_mode == False:
output = pd.DataFrame(self.icg_model.predict(Input[self.icg_col['x']].values),
index=Input.index,columns=['Simulation Case Conditions_C620 Distillate Rate_m3/hr'])
dist_rate = output['Simulation Case Conditions_C620 Distillate Rate_m3/hr'].values[0]
na_in_benzene = Input['Simulation Case Conditions_Spec 2 : NA in Benzene_ppmw'].values[0]
print('current Distillate Rate_m3/hr:{} NA in Benzene_ppmw:{}'.format(dist_rate,na_in_benzene))
if output['Simulation Case Conditions_C620 Distillate Rate_m3/hr'].values[0] > 0:
return output,Input
else:
Input['Simulation Case Conditions_Spec 2 : NA in Benzene_ppmw'] -= 30
print('NA in Benzene_ppmw -= 30')
def __call__(self,icg_input,c620_feed,t651_feed):
# get index
idx = icg_input.index
# c620_case
c620_case = pd.DataFrame(index=idx,columns=self.c620_col['case'])
# c620_case(Receiver Temp_oC) = user input
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 1 : Receiver Temp_oC'] = icg_input['Tatoray Stripper C620 Operation_Specifications_Spec 1 : Receiver Temp_oC'].values
if self.Recommended_mode == True:
icg_input['Simulation Case Conditions_Spec 2 : NA in Benzene_ppmw'] = 980.0
icg_input['Simulation Case Conditions_Spec 1 : Benzene in C620 Sidedraw_wt%'] = 70.0
icg_output,icg_input = self.ICG_loop(icg_input)
print(icg_output)
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 2 : Distillate Rate_m3/hr'] = icg_output.values
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 3 : Benzene in Sidedraw_wt%'] = icg_input['Simulation Case Conditions_Spec 1 : Benzene in C620 Sidedraw_wt%'].values
if self.Recommended_mode == False:
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 2 : Distillate Rate_m3/hr'] = icg_input['Tatoray Stripper C620 Operation_Specifications_Spec 2 : Distillate Rate_m3/hr'].values
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 3 : Benzene in Sidedraw_wt%'] = icg_input['Simulation Case Conditions_Spec 1 : Benzene in C620 Sidedraw_wt%'].values
# c620_input(c620_case&c620_feed)
c620_input = c620_case.join(c620_feed)
# c620 output(op&wt)
c620_input = c620_case.join(c620_feed)
c620_output = self.c620_model.predict(c620_input)
c620_sp,c620_op = c620_output.iloc[:,:41*4],c620_output.iloc[:,41*4:]
# update by c620 real data model?
if self.real_data_mode == True:
if self._linear_model == True:
c620_op_real = self.c620_real_data_model_linear.predict(c620_input)[:,41*4:]
c620_op_real = pd.DataFrame(c620_op_real,index=c620_input.index,columns=self.c620_simple_op_col)
c620_sp_real = self.c620_real_data_model_linear.predict(c620_input)[:,:41*4]
c620_sp_real = | pd.DataFrame(c620_sp_real,index=c620_input.index,columns=c620_sp.columns) | pandas.DataFrame |
import base64
import gzip
import json
import os
import pickle
from glob import glob
from urllib.parse import urlparse
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from requests.auth import HTTPBasicAuth
from settings.basic import (CACHE_ENABLED, CACHE_PATH, DATA_PATH,
intrinio_username,
intrinio_password, debug)
def dict_to_str(dct):
return ' '.join(['%s:%s' % (k, v) for k, v in dct.items()])
def get_datasets_name(resample_period, symbols_list_name, thresholds,
target_shift):
normal_name = "normal_%s_%s_%s_%s_y%s" % (
resample_period, symbols_list_name, thresholds[0],
thresholds[1],
target_shift)
z_name = "z-score_%s_%s_%s_%s_y%s" % (
resample_period, symbols_list_name, thresholds[0], thresholds[1],
target_shift)
return normal_name, z_name
def get_headers(trading_params):
header = 'dataset,period,clf,magic,model_params,'
header += ','.join(
[k for k in trading_params.keys() if k != 'dates'])
header += ',start_trade,final_trade,time,min,max,mean,last'
return header
def format_line(dataset_name, clf, magic, trading_params, model_params, pfs,
total_time):
r = [p.total_money for p in pfs]
line = '%s,%s,%s,%s,%s,' % (
dataset_name.split('_')[0], dataset_name.split('_')[1], clf, magic,
dict_to_str(model_params))
line += ','.join(list([str(v) for v in trading_params.values()])[:-1])
line += ',' + trading_params['dates'][0] + ',' + \
trading_params['dates'][1] + ','
line += '%.2f,' % total_time
line += '%.1f,%.1f,%.1f,%.1f' % (np.min(r), np.max(r), np.mean(r), r[-1])
return line
def full_print(res):
with pd.option_context('display.max_rows', None, 'display.max_columns',
None):
print(res)
def exists_obj(name):
return os.path.exists(name + '.pgz')
def save_obj(obj, name):
with gzip.GzipFile(name + '.pgz', 'w') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with gzip.GzipFile(name + '.pgz', 'r') as f:
return pickle.load(f)
def to_df(file: str) -> pd.DataFrame:
df = pd.read_csv(file)
df.set_index(['year', 'quarter'], inplace=True)
df.sort_index(inplace=True)
return df
def plot(x, y):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
plt.plot(x, y)
plt.gcf().autofmt_xdate()
def load_symbol_list(symbols_list_name: str) -> list:
path = os.path.join(DATA_PATH, '%s_symbols.lst' % (symbols_list_name))
return open(path).read().split()
def call_and_cache(url: str, cache=True) -> dict:
"""
Calls the URL with GET method if the url file is not cached
:param url: url to retrieve
:param kwargs: specify no-cache
:return: json.loads of the response (or empty dict if error)
"""
url_parsed = urlparse(url)
cached_file = os.path.join(CACHE_PATH,
url_parsed.netloc + url_parsed.path + "/" +
base64.standard_b64encode(
url_parsed.query.encode()).decode())
if not os.path.exists(os.path.dirname(cached_file)):
os.makedirs(os.path.dirname(cached_file))
data_json = {}
if CACHE_ENABLED and os.path.exists(cached_file) and cache:
if debug:
print(
"Data was present in cache and cache is enabled, loading: %s for %s" %
(cached_file, url))
with open(cached_file, 'r') as f:
data_json = json.loads(f.read())
else:
print(
"Data was either not present in cache or it was disabled calling request: %s" % url)
r = requests.get(url, auth=HTTPBasicAuth(intrinio_username,
intrinio_password))
if r.status_code != 200:
print(
"Request status was: %s for URL: %s" % (r.status_code, url))
return data_json
data_json = json.loads(r.text)
if 'data' in data_json.keys() and not len(data_json['data']) > 0:
print("Data field is empty.\nRequest URL: %s" % (url))
with open(cached_file, 'w') as f:
f.write(json.dumps(data_json))
print(
"Successfully cached url: %s to %s" % (url, cached_file))
return data_json
def plot_2_axis():
import numpy as np
import matplotlib.pyplot as plt
x, y = np.random.random((2, 50))
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.scatter(df['last'], df.C, c='b')
ax2.scatter(df['last'], df.gamma, c='r')
ax1.set_yscale('log')
ax2.set_yscale('log')
def load_trend(file, price, name):
res = load_obj(file)
res = [r for r in res if '%.1f' % r[1][-1].total_money == str(price)]
return to_df_col(res[0][1], name)
def get_trend(results, price, name):
res = []
# from glob import glob
# for file in glob('*/clean_results_*'):
# import ipdb
# ipdb.set_trace()
# result = load_obj(file[:-4])
for result in results:
res.extend(
[r for r in result if '%.1f' % r[1][-1].total_money == str(price)])
# del result
if len(res) == 0:
print("No results found")
return
# break
return to_df_col(res[0][1], name)
def load_all(experiment):
from glob import glob
results = []
for file in glob('%s/*/clean_results_*' % experiment):
print("loading: %s" % file)
results.append(load_obj(file[:-4]))
return results
def new_plot(file, experiment):
cols = ["dataset", "period", "clf", "magic", "model_params", "k",
"bot_thresh", "top_thresh", "mode", "trade_frequency",
"start_trade", "final_trade", "time", "min", "max", "mean", "last"]
results = load_all(experiment)
r1 = pd.read_csv(file, names=cols).sort_values('last').drop('time',
1).drop_duplicates()
best = r1.groupby('clf')[['last']].max()
sp500 = pd.read_csv('sp500.csv').set_index('Date')
sp500.index = | pd.to_datetime(sp500.index) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Spyder Editor
Created on Tue Mar 23 16:41:29 2021
@author: <NAME>
This is Projectjstor.
Data for diachronic and journal analysis
"""
#==============================================================================
# ############################################################## Import library
#==============================================================================
import os
import numpy as np
import pandas as pd
import pickle
import bz2
#==============================================================================
# #################################################### Initialize project paths
#==============================================================================
main_path = os.path.join("your_main_path")
os.chdir(main_path)
#==============================================================================
# ################################################################# Import data
#==============================================================================
with open(os.path.join(main_path,
"0. Data",
"LDA_model_philosophy_of_science_all_K25.pkl"), "rb") as f:
ldamodel_lda = pickle.load(f)
DTM = pd.read_pickle(bz2.BZ2File(
os.path.join(main_path,
"0. Data",
"DTM_philosophy_of_science_all.pbz2"), 'rb'))
with open(os.path.join(main_path,
"0. Data",
"Vocabulary_philosophy_of_science_all.pkl"), "rb") as f:
Vocab = pickle.load(f)
DF_statistique_generale = pd.read_pickle(
os.path.join(main_path,
"0. Data",
"DF_philosophy_of_science_all_metadata.pkl"))
#==============================================================================
# ##################### Data statistic, lda model score and lda hyperparameters
#==============================================================================
df_param=pd.DataFrame(index=['Value'])
df_param['Sparsity']=((DTM.todense() > 0).sum() /
DTM.todense().size*100) #sparsicity (% nonzero)
df_param['Log Likelyhood']=ldamodel_lda.loglikelihood() #Log Likelyhood (higher better)
df_param['Perplexity']='' #Perplexity (lower better, exp(-1. * log-likelihood per word)
df_param['alpha']=ldamodel_lda.alpha
df_param['eta']=ldamodel_lda.eta
df_param['n_iter']=ldamodel_lda.n_iter
df_param['n_components']=ldamodel_lda.n_topics
df_param['random_state']=ldamodel_lda.random_state
df_param['refresh']=ldamodel_lda.refresh
#==============================================================================
# ########################################################### Topic by document
#==============================================================================
#Topic for each document
lda_output=ldamodel_lda.doc_topic_
topicnames = ["Topic_" + str(i) for i in range(len(ldamodel_lda.components_))]
docnames = [i for i in range(DTM.shape[0])]
df_document_topic = pd.DataFrame(lda_output,
columns=topicnames,
index=docnames)
dominant_topic = np.argmax(df_document_topic.values, axis=1)
#add results to statistic general
DF_statistique_generale['Dom_topic'] = dominant_topic
DF_topic=pd.concat([DF_statistique_generale,df_document_topic],
axis=1,
join='inner')
#count document by topic
df_topic_distribution = DF_statistique_generale['Dom_topic'].value_counts(
).reset_index(name="Num_Documents")
df_topic_distribution.columns = ['Topic_Num', 'Num_Doc']
# Topic - keyword Matrix
df_topic_keywords = pd.DataFrame(ldamodel_lda.components_)#every row =1
df_topic_keywords.index = topicnames
#Transpose to topic - keyword matrix
df_keywords_topic = df_topic_keywords.transpose()
df_keywords_topic.index = sorted([i for i in Vocab.keys()])
# Topic - Top Keywords Dataframe
n_top_words = 50+1
DF_Topic_TKW = pd.DataFrame(columns=range(n_top_words-1),index=range(len(ldamodel_lda.components_)))
vocab = sorted([i for i in Vocab.keys()])
topic_word = ldamodel_lda.components_
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-n_top_words:-1]
DF_Topic_TKW.loc[i]=topic_words
DF_Topic_TKW.columns = ['Word_'+str(i) for i in range(DF_Topic_TKW.shape[1])]
DF_Topic_TKW.index = ['Topic_'+str(i) for i in range(DF_Topic_TKW.shape[0])]
DF_Topic_TKW['Sum_Doc'] = np.array(DF_statistique_generale['Dom_topic'].value_counts(
).sort_index())
DF_Topic_TKW['Top-10_Words'] = ''
for idx,row in DF_Topic_TKW.iterrows():
DF_Topic_TKW['Top-10_Words'][idx]=(row['Word_0']+'; '+row['Word_1']+'; '+
row['Word_2']+'; '+row['Word_3']+'; '+row['Word_4']+'; '+
row['Word_5']+'; '+row['Word_6']+'; '+row['Word_7']+'; '+
row['Word_8']+'; '+row['Word_9'])
#==============================================================================
# ############################################################# Topic by period
#==============================================================================
DF_topic['Period']=DF_topic.Year.apply(lambda x: #4 years period
str(int(x)-(int(x)+2)%4)+'-'+str((int(x)-(int(x)+2)%4)+3))
# Topic - Period Matrix
DF_PT=pd.DataFrame(lda_output,
columns=topicnames,
index=docnames)
DF_PT['Period']=DF_topic.Period
DF_PT = DF_PT.groupby(['Period']).sum()
DF_TP = DF_PT.transpose()
DF_TP = DF_TP/DF_TP.sum()
DF_TP_Overall = DF_PT.transpose()
DF_TP_Overall['Raw'] = DF_PT.sum()
DF_TP_Overall['Overall'] = DF_PT.sum() / sum(DF_PT.sum())
# Topic - Period Matrix (not translated)
NT = ~DF_statistique_generale.translated
DF_PTNT=pd.DataFrame(lda_output[NT],
columns=topicnames,
index=np.array(docnames)[NT])
DF_PTNT['Period']=DF_topic.Period[NT]
DF_PTNT = DF_PTNT.groupby(['Period']).sum()
DF_TPNT = DF_PTNT.transpose()
DF_TPNT = DF_TPNT/DF_TPNT.sum()
DF_TPNT_Overall = DF_PTNT.transpose()
DF_TPNT_Overall['Raw'] = DF_PTNT.sum()
DF_TPNT_Overall['Overall'] = DF_PTNT.sum() / sum(DF_PTNT.sum())
# Topic - Period Matrix (translated)
T = DF_statistique_generale.translated
DF_PTT=pd.DataFrame(lda_output[T],
columns=topicnames,
index=np.array(docnames)[T])
DF_PTT['Period']=DF_topic.Period[T]
DF_PTT = DF_PTT.groupby(['Period']).sum()
DF_TPT = DF_PTT.transpose()
DF_TPT = DF_TPT/DF_TPT.sum()
DF_TPT_Overall = DF_PTT.transpose()
DF_TPT_Overall['Raw'] = DF_PTT.sum()
DF_TPT_Overall['Overall'] = DF_PTT.sum() / sum(DF_PTT.sum())
#==============================================================================
# ################################################### Topic by period + journal
#==============================================================================
# Topic - Journal + Period Matrix
DF_temp = pd.DataFrame([[item for sublist in [sorted(set(DF_topic.Period))]*len(set(DF_topic.Journal_id)) for item in sublist],
[item for sublist in [[x]*len(set(DF_topic.Period)) for x in set(DF_topic.Journal_id)] for item in sublist]
], index=['Period','Journal_id']).transpose()
DF_PJT=pd.DataFrame(lda_output,
columns=topicnames,
index=docnames)
DF_PJT['Period']=DF_topic.Period
DF_PJT['Journal_id']=DF_topic.Journal_id
count_all = DF_PJT.groupby(['Journal_id','Period']).count().transpose().iloc[0]
count_t = DF_PJT[T].groupby(['Journal_id','Period']).count().transpose().iloc[0]
DF_PJT = pd.concat([DF_temp,DF_PJT])
DF_PJT = DF_PJT.groupby(['Journal_id','Period']).sum()
DF_PJT['count_all'] = 0
DF_PJT['count_tran'] = 0
DF_TPJ = DF_PJT.transpose()
DF_TPJ = DF_TPJ/DF_TPJ.sum()
DF_TPJ.loc['count_all'] = count_all
DF_TPJ.loc['count_tran'] = count_t
DF_TPJ_Overall = DF_PJT.transpose()
DF_TPJ_Overall['Raw'] = DF_PJT.sum()
DF_TPJ_Overall['Overall'] = DF_PJT.sum() / sum(DF_PJT.sum())
DF_TPJ_Overall.loc['count_all'] = count_all
DF_TPJ_Overall.loc['count_tran'] = count_t
# Topic - Journal Matrix
DF_JT = pd.concat([DF_statistique_generale['Journal_id'],df_document_topic],
axis=1,
join='inner')
DF_JT = DF_JT.groupby(['Journal_id']).sum()
DF_TJ = DF_JT.transpose()
DF_TJ = DF_TJ/DF_TJ.sum()
DF_TJ_Overall = DF_JT.transpose()
DF_TJ_Overall['Raw'] = DF_JT.sum()
DF_TJ_Overall['Overall'] = DF_JT.sum() / sum(DF_JT.sum())
# Periods - Topics top_10 articles Matrix (sorted by year)
DF_PT_T10A=pd.DataFrame(data='', index=DF_TP.columns,columns=DF_TP.index)
for period in DF_TP.columns:
for topic in DF_TP.index:
for idx in DF_topic[DF_topic.Period==period].nlargest(
10,topic).sort_values('Year',ascending=False).index:
DF_PT_T10A[topic][period]=DF_PT_T10A[topic][period]+DF_topic.Citation[idx]+'\n'
#==============================================================================
# ############################################################# Topic by Author
#==============================================================================
# Author - Topic Matrix
authors = set()
for group_author in DF_statistique_generale['Author']:
for author in group_author:
authors.add(author)
authors = sorted(authors)
DF_AT = pd.DataFrame(data='', index=range(len(authors)),columns=topicnames)
for idx,author in enumerate(authors):
list_bool = DF_statistique_generale.Author.apply(lambda x: True if author in x else False)
DF_AT.loc[idx]=sum(lda_output[list_bool])/len(lda_output[list_bool])
DF_AT['Author']={', '.join(x) for x in authors}
# Author T - Matrix
DF_ATT = pd.concat([DF_statistique_generale[T]['Author'],df_document_topic[T]],
axis=1,
join='inner')
DF_ATT.Author = DF_ATT.Author.apply(lambda x: '; '.join(', '.join(e) for e in x))
DF_ATT = DF_ATT.groupby(['Author']).sum()
DF_TAT = DF_ATT.transpose()
count_tran = DF_TAT.sum()
DF_TAT = DF_TAT/count_tran
DF_ATT = DF_TAT.transpose()
DF_ATT['NDoc'] = count_tran
#==============================================================================
# ########################################################### Topic correlation
#==============================================================================
# Topic Pearson Correlation
DF_TfromD = df_document_topic.corr(method='pearson')
DF_TfromD_Stack = pd.DataFrame(columns=['Topic_A','Topic_B','Correlation'])
for id1,topic1 in enumerate(topicnames):
for id2,topic2 in enumerate(topicnames):
n_id = DF_TfromD_Stack.shape[0]
DF_TfromD_Stack.loc[n_id] = [str(id1+1),str(id2+1),DF_TfromD[topic1][topic2]]
##
DF_TfromW=df_topic_keywords.T.corr(method='pearson')
DF_TfromW_Stack = | pd.DataFrame(columns=['Topic_A','Topic_B','Correlation']) | pandas.DataFrame |
"""
BuildingLoad class - post-process bldg level load data
and form it into a pandas dataframe
"""
try:
import pandas as pd
except ImportError:
pd = None
print('pandas is not installed')
class BuildingLoad(object):
def __init__(self, bldg_load_profile):
"""
:param bldg_load_profile: bldg_load_profile: jsonarray returned
from BSH server by calling the building load api
"""
# reform the dict
index_list = list()
self._cooling_unit = ''
self._heating_unit = ''
self._cooling_density_unit = ''
self._heating_density_unit = ''
data = list()
for d_dict in bldg_load_profile:
data_dict = dict()
if len(d_dict.keys()) == 1:
continue
index_list.append(d_dict['model'].upper())
if self._cooling_unit == '':
self._cooling_unit = d_dict['cooling_unit']
if self._heating_unit == '':
self._heating_unit = d_dict['heating_unit']
if self._heating_density_unit == '':
self._heating_density_unit = d_dict['heating_load_density_unit']
if self._cooling_density_unit == '':
self._cooling_density_unit = d_dict['cooling_load_density_unit']
# remove name and units from the dict
data_dict['heating_load'] = d_dict['heating_load']
data_dict['cooling_load'] = d_dict['cooling_load']
data_dict['heating_load_density'] = d_dict['heating_load_density']
data_dict['cooling_load_density'] = d_dict['cooling_load_density']
data.append(data_dict)
self._df = | pd.DataFrame(data, index=index_list) | pandas.DataFrame |
## Feature selection of citation needed and no citation needed (not a claim) datasets
## This code reads in the output from "proto_featureExtract.py"
## 1) the part of speech filtered data sets including both words and parts of speech
## 2) the counts
# import relevant libraries
import pandas as pd
import numpy as np
# load in data
NeedCite = pd.read_pickle('../Data/NeedCiteFilt.pkl') # part of speech filtered clauses and sentences that need citations
NotClaim = pd.read_pickle('../Data/NotClaimFilt.pkl') # that do not need citations
UniqWords = pd.read_pickle('../Data/UniqueWords.pkl') # word occurances in the above datasets
UniqPOS = | pd.read_pickle('../Data/UniquePOS.pkl') | pandas.read_pickle |
# -*- coding: utf-8 -*-
import torch
from pytorch_fid import fid_score
import pandas as pd
from glob import glob
import os, argparse
import numpy as np
# %%
device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')
batch_size = 50
dim = 2048
#path1 = './Datasets/Zurich_patches/fold2/patch_tlevel1/A/test'
#path2 = './Datasets/Zurich_patches_fake/fold2/patch_tlevel1/cyc_A'
# %%
#fid_value = fid_score.calculate_fid_given_paths(
# [path1, path2],
# batch_size, device, dim)
# %%
def calculate_FIDs(dataset, fold=1):
# dataset='Zurich'
# fold=1
assert dataset in ['Balvan', 'Eliceiri', 'Zurich'], "dataset must be in ['Balvan', 'Eliceiri', 'Zurich']"
if dataset == 'Eliceiri':
dataroot_real = f'./Datasets/{dataset}_patches'
dataroot_fake = f'./Datasets/{dataset}_patches_fake'
dataroot_train = f'./Datasets/{dataset}_temp'
else:
dataroot_real = f'./Datasets/{dataset}_patches/fold{fold}'
dataroot_fake = f'./Datasets/{dataset}_patches_fake/fold{fold}'
dataroot_train = f'./Datasets/{dataset}_temp/fold{fold}'
gan_names = ['train2testA', 'train2testB', 'testA', 'testB', 'B2A',
'cyc_A', 'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B', 'comir']
# gan_names = ['cyc_A', 'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B', 'comir', 'B2A']
# csv information
header = [
'Dataset', 'Fold', 'Tlevel', 'GAN_name', 'Path_fake', 'Path_real',
'FID',
]
df = pd.DataFrame(columns=header)
row_dict = {'Dataset': dataset, 'Fold': fold}
for tlevel in [int(tl[-1]) for tl in glob(f'{dataroot_fake}/patch_tlevel*')]:
row_dict['Tlevel'] = tlevel
for gan_name in gan_names:
row_dict['GAN_name'] = gan_name
if gan_name in ['train2testA', 'train2testB']:
row_dict['Path_fake'] = f'{dataroot_train}/{gan_name[-1]}/train/'
row_dict['Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
elif gan_name in ['testA', 'testB']:
row_dict['Path_fake'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
row_dict['Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
elif gan_name == 'comir':
row_dict['Path_fake'] = f'{dataroot_fake}/patch_tlevel{tlevel}/{gan_name}_A/'
row_dict['Path_real'] = f'{dataroot_fake}/patch_tlevel{tlevel}/{gan_name}_B/'
elif gan_name == 'B2A':
row_dict['Path_fake'] = f'{dataroot_real}/patch_tlevel{tlevel}/A/test/'
row_dict['Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/B/test/'
else:
row_dict['Path_fake'] = f'{dataroot_fake}/patch_tlevel{tlevel}/{gan_name}/'
row_dict['Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
row_dict['FID'] = fid_score.calculate_fid_given_paths(
[ row_dict['Path_fake'], row_dict['Path_real'] ],
batch_size, device, dim)
df = df.append(row_dict, ignore_index=True)
result_dir = dataroot_fake
if not os.path.exists(result_dir):
os.makedirs(result_dir)
df.to_csv(f'{result_dir}/FIDs.csv')
return
# %%
def calculate_FIDs_3D(dataset, fold=1):
# dataset='RIRE'
# fold=1
assert dataset in ['RIRE'], "dataset must be in ['RIRE']"
dataroot_real = f'./Datasets/{dataset}_patches_forFID/real/fold{fold}'
dataroot_fake = f'./Datasets/{dataset}_patches_forFID/fake/fold{fold}'
gan_names = ['cyc_A', 'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B', 'comir', 'B2A']
# csv information
header = [
'Dataset', 'Fold', 'Tlevel', 'GAN_name', 'Path_fake', 'Path_real',
'FID',
]
df = | pd.DataFrame(columns=header) | pandas.DataFrame |
import numpy as np
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from tqdm.auto import trange
import time
now_time = time.time()
#### Define your webdriver options ####
url = 'https://www.numbeo.com/cost-of-living/'
options = webdriver.firefox.options.Options()
options.headless = True
driver = webdriver.Firefox(options = options)
#### To scrape only 100 links, choose limited = True. To scrape all data, choose limit = False ####
limited = True
upper_limit = 100
#### Create empty data frame for results ####
d = | pd.DataFrame({'Country':[], 'City':[], 'Category':[],'Name':[], 'Price':[], 'Min':[], 'Max':[] }) | pandas.DataFrame |
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.encoders import MultiClassEncoder, WOEEncoder
ks.set_option("compute.default_index_type", "distributed-sequence")
@pytest.fixture
def data():
X = pd.DataFrame(
{
"A": ["Q", "Q", "Q", "W", "W", "W"],
"B": ["Q", "Q", "W", "W", "W", "W"],
"C": ["Q", "Q", "Q", "Q", "W", "W"],
"D": [1, 2, 3, 4, 5, 6],
}
)
y = pd.Series([0, 0, 1, 2, 1, 2], name="TARGET")
obj = MultiClassEncoder(WOEEncoder()).fit(X, y)
X_expected = pd.DataFrame(
{
"D": {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0},
"A__TARGET_1_WOEEncoder": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0},
"B__TARGET_1_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_1_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"A__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.0,
3: 1.3862943611198906,
4: 1.3862943611198906,
5: 1.3862943611198906,
},
"B__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_2_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
}
)
return obj, X, X_expected
@pytest.fixture
def data_float32():
X = pd.DataFrame(
{
"A": ["Q", "Q", "Q", "W", "W", "W"],
"B": ["Q", "Q", "W", "W", "W", "W"],
"C": ["Q", "Q", "Q", "Q", "W", "W"],
"D": [1, 2, 3, 4, 5, 6],
}
)
y = pd.Series([0, 0, 1, 2, 1, 2], name="TARGET")
obj = MultiClassEncoder(WOEEncoder(), dtype=np.float32).fit(X, y)
X_expected = pd.DataFrame(
{
"D": {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0},
"A__TARGET_1_WOEEncoder": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0},
"B__TARGET_1_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_1_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"A__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.0,
3: 1.3862943611198906,
4: 1.3862943611198906,
5: 1.3862943611198906,
},
"B__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_2_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
}
).astype(np.float32)
return obj, X, X_expected
@pytest.fixture
def data_no_cat():
X = pd.DataFrame(
np.zeros((3, 6)),
columns=list("qweasd"),
)
y = pd.Series([1, 2, 0], name="TARGET")
obj = MultiClassEncoder(WOEEncoder()).fit(X, y)
return obj, X, X.copy()
@pytest.fixture
def data_ks():
X = ks.DataFrame(
{
"A": ["Q", "Q", "Q", "W", "W", "W"],
"B": ["Q", "Q", "W", "W", "W", "W"],
"C": ["Q", "Q", "Q", "Q", "W", "W"],
"D": [1, 2, 3, 4, 5, 6],
}
)
y = ks.Series([0, 0, 1, 2, 1, 2], name="TARGET")
obj = MultiClassEncoder(WOEEncoder()).fit(X, y)
X_expected = pd.DataFrame(
{
"D": {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0},
"A__TARGET_1_WOEEncoder": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0},
"B__TARGET_1_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_1_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"A__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.0,
3: 1.3862943611198906,
4: 1.3862943611198906,
5: 1.3862943611198906,
},
"B__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_2_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
}
)
return obj, X, X_expected
@pytest.fixture
def data_float32_ks():
X = ks.DataFrame(
{
"A": ["Q", "Q", "Q", "W", "W", "W"],
"B": ["Q", "Q", "W", "W", "W", "W"],
"C": ["Q", "Q", "Q", "Q", "W", "W"],
"D": [1, 2, 3, 4, 5, 6],
}
)
y = ks.Series([0, 0, 1, 2, 1, 2], name="TARGET")
obj = MultiClassEncoder(WOEEncoder(), dtype=np.float32).fit(X, y)
X_expected = pd.DataFrame(
{
"D": {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0},
"A__TARGET_1_WOEEncoder": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0},
"B__TARGET_1_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_1_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"A__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.0,
3: 1.3862943611198906,
4: 1.3862943611198906,
5: 1.3862943611198906,
},
"B__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_2_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
}
).astype(np.float32)
return obj, X, X_expected
@pytest.fixture
def data_no_cat_ks():
X = ks.DataFrame(
np.zeros((3, 6)),
columns=list("qweasd"),
)
y = ks.Series([1, 2, 0], name="TARGET")
obj = MultiClassEncoder(WOEEncoder()).fit(X, y)
return obj, X, X.to_pandas().copy()
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = | pd.DataFrame(X_numpy_new, columns=X_expected.columns) | pandas.DataFrame |
from itertools import combinations
from abc import ABCMeta, abstractmethod
from skmob.utils import constants
from skmob.core.trajectorydataframe import TrajDataFrame
from tqdm import tqdm
import pandas as pd
from ..utils.utils import frequency_vector, probability_vector, date_time_precision
class Attack(object):
"""
Abstract class for a generic attack. Defines a series of functions common to all attacks.
Provides basic functions to compute risk for all users in a trajectory dataframe.
Requires the implementation of both a matching function and an assessment function, which are attack dependant.
:param knowledge_length: int
the length of the background knowledge that we want to simulate.
"""
__metaclass__ = ABCMeta
def __init__(self, knowledge_length):
self.knowledge_length = knowledge_length
@property
def knowledge_length(self):
return self._knowledge_length
@knowledge_length.setter
def knowledge_length(self, val):
if val < 1:
raise ValueError("Parameter knowledge_length should not be less than 1")
self._knowledge_length = val
def _all_risks(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Computes risk for all the users in the data. It applies the risk function to every individual in the data.
If it is not required to compute the risk for the entire data, the targets parameter can be used to select
a portion of users to perform the calculation on.
:param traj: TrajectoryDataFrame
the dataframe against which to calculate risk.
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param: show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
if targets is None:
targets = traj
else:
if isinstance(targets, list):
targets = traj[traj[constants.UID].isin(targets)]
if isinstance(targets, TrajDataFrame) or isinstance(targets, pd.DataFrame):
targets = traj[traj[constants.UID].isin(targets[constants.UID])]
if show_progress:
tqdm.pandas(desc="computing risk")
risks = targets.groupby(constants.UID).progress_apply(lambda x: self._risk(x, traj, force_instances))
else:
risks = targets.groupby(constants.UID).apply(lambda x: self._risk(x, traj, force_instances))
if force_instances:
risks = risks.droplevel(1)
risks = risks.reset_index(drop=True)
else:
risks = risks.reset_index(name=constants.PRIVACY_RISK)
return risks
def _generate_instances(self, single_traj):
"""
Return a generator to all the possible background knowledge of length k for a single user_id.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:return: generator
a generator to all the possible instances of length k. Instances are tuples with the values of the actual
records in the combination.
"""
size = len(single_traj.index)
if self.knowledge_length > size:
return combinations(single_traj.values, size)
else:
return combinations(single_traj.values, self.knowledge_length)
def _risk(self, single_traj, traj, force_instances=False):
"""
Computes the risk of reidentification of an individual with respect to the entire population in the data.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param traj: TrajectoryDataFrame
the dataframe with the complete data
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:return: float
the risk for the individual, expressed as a float between 0 and 1
"""
instances = self._generate_instances(single_traj)
risk = 0
if force_instances:
inst_data = {constants.LATITUDE: list(), constants.LONGITUDE: list(),
constants.DATETIME: list(), constants.UID: list(),
constants.INSTANCE: list(), constants.INSTANCE_ELEMENT: list(),
constants.PROBABILITY: list()}
inst_id = 1
for instance in instances:
prob = 1.0 / traj.groupby(constants.UID).apply(lambda x: self._match(x, instance)).sum()
elem_count = 1
for elem in instance:
inst_data[constants.LATITUDE].append(elem[0])
inst_data[constants.LONGITUDE].append(elem[1])
inst_data[constants.DATETIME].append(elem[2])
inst_data[constants.UID].append(elem[3])
inst_data[constants.INSTANCE].append(inst_id)
inst_data[constants.INSTANCE_ELEMENT].append(elem_count)
inst_data[constants.PROBABILITY].append(prob)
elem_count += 1
inst_id += 1
return pd.DataFrame(inst_data)
else:
for instance in instances:
prob = 1.0 / traj.groupby(constants.UID).apply(lambda x: self._match(x, instance)).sum()
if prob > risk:
risk = prob
if risk == 1.0:
break
return risk
@abstractmethod
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Abstract function to assess privacy risk for a whole dataframe of trajectories.
An attack must implement an assessing strategy. This could involve some preprocessing, for example
transforming the original data, and calls to the risk function.
If it is not required to compute the risk for the entire data, the targets parameter can be used to select
a portion of users to perform the assessment on.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
pass
@abstractmethod
def _match(self, single_traj, instance):
"""
Matching function for the attack. It is used to decide if an instance of background knowledge matches a certain
trajectory. The internal logic of an attack is represented by this function, therefore, it must be implemented
depending in the kind of the attack.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
pass
class LocationAttack(Attack):
"""
In a location attack the adversary knows the coordinates of the locations visited by an individual and matches them
against trajectories.
:param knowledge_length: int
the length of the background knowledge that we want to simulate. For this attack, it is the number of
locations known to the adversary.
"""
def __init__(self, knowledge_length):
super(LocationAttack, self).__init__(knowledge_length)
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Assess privacy risk for a whole dataframe of trajectories.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
traj = traj.sort_values(by=[constants.UID, constants.DATETIME])
return self._all_risks(traj, targets, force_instances, show_progress)
def _match(self, single_traj, instance):
"""
Matching function for the attack.
For a location attack, only the coordinates are used in the matching.
If a trajectory presents the same locations as the ones in the instance, a match is found.
Multiple visits to the same location are also handled.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
locs = single_traj.groupby([constants.LATITUDE, constants.LONGITUDE]).size().reset_index(name=constants.COUNT)
inst = pd.DataFrame(data=instance, columns=single_traj.columns)
inst = inst.astype(dtype=dict(single_traj.dtypes))
inst = inst.groupby([constants.LATITUDE, constants.LONGITUDE]).size().reset_index(name=constants.COUNT + "inst")
locs_inst = pd.merge(locs, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],
right_on=[constants.LATITUDE, constants.LONGITUDE])
if len(locs_inst.index) != len(inst.index):
return 0
else:
condition = locs_inst[constants.COUNT] >= locs_inst[constants.COUNT + "inst"]
if len(locs_inst[condition].index) != len(inst.index):
return 0
else:
return 1
class LocationSequenceAttack(Attack):
"""
In a location sequence attack the adversary knows the coordinates of locations visited by an individual and
the order in which they were visited and matches them against trajectories.
:param knowledge_length: int
the length of the background knowledge that we want to simulate. For this attack, it is the number of
locations known to the adversary.
"""
def __init__(self, knowledge_length):
super(LocationSequenceAttack, self).__init__(knowledge_length)
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Assess privacy risk for a whole dataframe of trajectories.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
traj = traj.sort_values(by=[constants.UID, constants.DATETIME])
return self._all_risks(traj, targets, force_instances, show_progress)
def _match(self, single_traj, instance):
"""
Matching function for the attack.
For a location sequence attack, both the coordinates and the order of visit are used in the matching.
If a trajectory presents the same locations in the same order as the ones in the instance, a match is found.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
inst = | pd.DataFrame(data=instance, columns=single_traj.columns) | pandas.DataFrame |
import itertools
import json
import ntpath
import os
import pandas as pd
import re
import spacy
from glob import glob
from pprint import PrettyPrinter
from sentence_transformers import SentenceTransformer
from string import punctuation
from tqdm import tqdm
from src.utils import normalize_punctuations
# Download SpaCy models if needed
spacy_model = 'en_core_web_sm'
try:
nlp = spacy.load(spacy_model)
except OSError:
print("\n\n\n Downloading SpaCy model ...")
spacy.cli.download(spacy_model)
nlp = spacy.load(spacy_model)
# Define useful directories
working_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
root_dir = os.path.dirname(working_dir)
data_dir = os.path.join(root_dir, 'dataset')
# Define useful variables
word_pattern = re.compile(r'\w+')
punct_pattern = re.compile(f"[{punctuation}]")
printer = PrettyPrinter(indent=4)
sentiment_mapper = {"pos": 1, "positive": 1,
"neg": 2, "negative": 2,
"neu": 3, "neutral": 3,
"conflict": 4, }
category_mapper = {"Arrival Experience": 1,
"Room Services": 2,
"Breakfast": 3,
"Dinning": 4,
"Bar & Lounge": 5,
"F&B Services": 6,
"Hotel Services": 7,
"Others": 8, }
attribute_mapper = {"Speed of check-in/out": 1,
"Booking accuracy": 2,
"Room availability": 3,
"Timeliness of service": 4,
"Loyalty Recognition": 5,
"Staff were responsive to my needs": 6,
"Accuracy of delivery of service": 7,
"Staff attitude / Staff anticipation": 8,
"Tools & Equipment": 9,
"Operational Hours": 10,
"Food quality": 11,
"Food variety": 12,
"Food temperature": 13,
"Beverage quality": 14,
"Price / Value": 15,
"Service Culture": 16,
"Problem Identification": 17,
"Service Recovery": 18,
"Hotel Facilities": 19,
"Location & Transportation": 20,
"Parking": 21,
"Disabled-Friendliness": 22,
"Room Cleanliness": 23,
"Room Amenities": 24,
"Room Condition": 25,
"Room Odour": 26,
"Noise Pollution": 27,
"Air-Condition": 28,
"Internet Connectivity": 29,
"Pest": 30,
"Shower / Bath Experience": 31,
"Planning": 32,
"Cleaning Process": 33,
"Others": 34, }
attribute_replacement = {"Queue": "Timeliness of service",
"Knowledge of staff": "Staff were responsive to my needs",
"Food Snack / Menu": "Food variety",
"Food presentation": "Food variety",
"Beverages quality": "Beverage quality",
"Beverage": "Beverage quality",
"Operations Hours": "Operational Hours",
"Primary Issues": "Service Culture",
"Track, Measure & Sustain": "Problem Identification",
"Transportation": "Location & Transportation",
"IHG Way of Clean 5-S Cleaning Process": "Cleaning Process",
"Cleaning tools": "Cleaning Process",
"Audits": "Others",
"PMM": "Others",
"PMM tools": "Others",
"Application of tools": "Others"}
def encode_words_location(text: str) -> dict:
# print(f"\n{text}")
text = normalize_punctuations(text)
# Split sentence into phrases by punctuation
punct_locs = [(p.start(), p.end()) for p in punct_pattern.finditer(text)]
if len(punct_locs)==0:
phrases_dict = {0: [0, len(text), text]}
else:
phrases_dict = dict()
phrase_idx = 0
last_punct_end = 0
for p_i, punct_loc in enumerate(punct_locs):
current_punct_start, current_punct_end = punct_loc
if p_i == 0:
if current_punct_start > 0:
phrases_dict[phrase_idx] = [0, current_punct_start, text[:current_punct_start]]
phrase_idx += 1
elif p_i != 0:
phrases_dict[phrase_idx] = [last_punct_end, current_punct_start, text[last_punct_end:current_punct_start]]
phrase_idx += 1
phrases_dict[phrase_idx] = [current_punct_start, current_punct_end, text[current_punct_start:current_punct_end]]
phrase_idx += 1
if p_i == len(punct_locs)-1:
if current_punct_end < len(text):
phrases_dict[phrase_idx] = [current_punct_end, len(text)-1, text[current_punct_end:]]
last_punct_end = current_punct_end
# printer.pprint(phrases_dict)
# Split phrases into words (offset by sentence, not by current phrase)
words_dict = dict()
word_idx = 0
for phrase_idx in range(len(phrases_dict)):
phrase_start, phrase_end, phrase = phrases_dict[phrase_idx]
if phrase_end-phrase_start == 1: # case of punctuation
words_dict[word_idx] = phrases_dict[phrase_idx]
word_idx += 1
phrase_words_dict = {
w_i+word_idx: [w.start()+phrase_start, w.end()+phrase_start, w.group(0)] \
for w_i, w in enumerate(word_pattern.finditer(phrase))
}
word_idx += len(phrase_words_dict)
words_dict.update(phrase_words_dict)
# printer.pprint(words_dict)
# Convert word dictionary to word dataframe --> easy comparison
words_df = | pd.DataFrame(data=words_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""predictive_ads_analytics_test.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/14GDm9abDQhHrnjPU-AX3fHcT1JLmsB1u
The following document has information about my process of experimenting with various models to solve a classification problem, in which I tested different models of supervised and unsupervised learning. Which then save in a file to be consumed after training.
As it is a proof of concept the code is not ordered and neither optimized.
"""
import pandas as pd
import os
from os import walk
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV, StratifiedKFold
from sklearn.metrics import confusion_matrix, classification_report, precision_score, accuracy_score, recall_score, f1_score, roc_auc_score, roc_curve, auc
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.neural_network import MLPClassifier
import xgboost as xgb
from xgboost import plot_tree
import joblib
import tensorflow as tf
import keras
from keras.wrappers.scikit_learn import KerasClassifier
from keras import Sequential
from keras.layers import Dense
from keras.models import load_model
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'credentials.json'
query_url = """
WITH urls_main_table AS (
SELECT
source,
ad_id,
image_asset_url as url
FROM `omg-latam-prd-adh-datahouse-cl.clients_table.main_url_dh`
where source = 'Facebook ads' and image_asset_url != '0'
group by 1,2,3
UNION ALL
SELECT
source,
ad_id,
promoted_tweet_card_image_url as url
FROM `omg-latam-prd-adh-datahouse-cl.clients_table.main_url_dh`
where source = 'Twitter ads' and promoted_tweet_card_image_url != '0'
group by 1,2,3
),
ad_values AS (
SELECT date,
ad_id, spend, post_engagements as clicks
FROM `main_views_tables.main_ad`
where regexp_contains(account_name, '(?i).*scoti.*')
),
spend_average AS (
select ad_id, avg(spend) as avg_spend, avg(post_engagements) as avg_clicks from `main_views_tables.main_ad`
group by 1
),
categorical_feats AS (
SELECT ad_id, main_topic, main_color, locale, second_topic, second_color, third_topic, third_color
FROM `clients_table.main_categorical_values`
)
SELECT
date, source, url, e.ad_id,
main_topic, main_color, locale, second_topic, second_color, third_topic, third_color,
(CASE
WHEN spend >= avg_spend THEN 1
WHEN spend < avg_spend THEN 0
END) as over_avg_spend,
(CASE
WHEN clicks >= avg_clicks THEN 1
WHEN clicks < avg_clicks THEN 0
END) as over_avg_clicks
FROM
(SELECT
date, source, url, c.ad_id,
main_topic, main_color, locale, second_topic, second_color, third_topic, third_color, spend, clicks
FROM (SELECT date,
a.source, a.url, b.ad_id, b.spend, b.clicks
FROM urls_main_table a
RIGHT JOIN ad_values b
ON a.ad_id = b.ad_id) c
INNER JOIN categorical_feats d
ON c.ad_id = d.ad_id) e
INNER JOIN spend_average f
ON e.ad_id = f.ad_id
"""
dataframe = pd.read_gbq(query_url)
dataframe = dataframe[['main_topic', 'main_color', 'second_topic', 'second_color', 'third_topic', 'third_color', 'locale', 'over_avg_spend', 'over_avg_clicks']]
vars = ['main_topic', 'main_color', 'second_topic', 'second_color', 'third_topic', 'third_color', 'locale']
for var in vars:
cat_list = 'var' + '_' + var
cat_list = pd.get_dummies(dataframe[var], prefix= var)
dataframe1 = dataframe.join(cat_list)
dataframe = dataframe1
dataframe
data_vars = dataframe.columns.values.tolist()
to_keep = [i for i in data_vars if i not in vars]
data_final = dataframe[to_keep]
data_final.columns.values
data_final_vars = data_final.columns.values.tolist()
v_y = ['over_avg_clicks']
v_x = [i for i in data_final_vars if i not in v_y]
X = dataframe[v_x]
y = dataframe[v_y]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state = 40, stratify = y)
model_xgb = xgb.XGBClassifier(objective = 'binary:logistic', n_estimators = 105, max_depth=75, seed = 10, reg_alpha=7)
model_xgb.fit(X_train.values, y_train.values.ravel())
y_pred = model_xgb.predict(X_test.values)
print('Area debajo de la curva:', roc_auc_score(y_test, y_pred))
print('Recall:', recall_score(y_test, y_pred))
print('Precision:', precision_score(y_test, y_pred))
print('accuracy:', round(accuracy_score(y_test, y_pred), 2))
print(confusion_matrix(y_test, y_pred))
base_frame = dict.fromkeys(list(X_train.columns), [0])
main_topic = 'Clothing' #@param ['Table', 'Smile','Laptop', 'video', 'Arm', 'Hair', 'Jaw', 'Forehead','Head','Chin','Clothing','Sleeve','Plant','Tire', 'Eyelash','Hand', 'Mobilephone', 'Glasses','Shorts']
second_topic = 'Jaw' #@param ['Jaw', 'Product', 'Flashphotography', 'Sleeve', 'Beard', 'Computer', 'Glasses', 'Visioncare', 'Fashion', 'Shirt', 'Jeans', 'Wheel', 'Jersey', 'Smile', 'CommunicationDevice', 'Plant', 'Mobilephone', 'Green', 'Chin', 'Human']
third_topic = 'Font' #@param ['Beard', 'Font', 'Jersey', 'Jaw', 'Chin', 'Eyewear', 'Sleeve', 'Cap', 'Smile', 'Tableware', 'Personalcomputer', 'Eyelash', 'Skin', 'Landvehicle', 'Tabletcomputer', 'Gesture', 'Organism', 'Outerwear', 'Flashphotography', 'Sportsuniform', 'Furniture']
locale = 'en' #@param ['es', 'en']
main_color = 'skyblue' #@param ['black', 'darkslategray', 'darkolivegreen', 'cadetblue', 'dodgerblue', 'mediumpurple', 'hotpink', 'skyblue', 'dimgray', 'linen', 'yellowgreen', 'sienna']
second_color = 'darkgray' #@param ['darkslategray', 'dimgray', 'black', 'darkgray', 'seagreen','skyblue', 'maroon', 'paleturquoise', 'silver', 'crimson','darkgreen', 'slategray', 'mediumpurple', 'gray']
third_color = 'lightgray' #@param ['darkslategray', 'slateblue', 'lightgray', 'mistyrose', 'gray', 'maroon', 'black', 'tan', 'darkgray', 'crimson', 'slategray', 'dimgray', 'silver']
over_avg_spend = 'False' #@param ['True', 'False']
if over_avg_spend == 'False':
over_avg_spend = 0
else:
over_avg_spend = 1
dataframe = pd.DataFrame({'main_topic':[main_topic], 'main_color':[main_color], 'second_topic':[second_topic], 'second_color':[second_color],
'third_topic':[third_topic], 'third_color':[third_color], 'locale':[locale], 'over_avg_spend':[over_avg_spend]})
vars = ['main_topic', 'main_color', 'second_topic', 'second_color', 'third_topic', 'third_color', 'locale']
for var in vars:
cat_list = 'var' + '_' + var
cat_list = pd.get_dummies(dataframe[var], prefix= var)
dataframe1 = dataframe.join(cat_list)
dataframe = dataframe1
data_vars = dataframe.columns.values.tolist()
to_keep = [i for i in data_vars if i not in vars]
data_final = dataframe[to_keep]
my_dict_frame = data_final.to_dict('records')
base_frame.update(my_dict_frame[0])
to_predict_frame = | pd.DataFrame(base_frame) | pandas.DataFrame |
# import all the required files i.e. numpy , pandas and math library
from graphlib.financialGraph import Data
import numpy as np
import pandas as pd
from pandas import DataFrame , Series
import math
# All the indicators are defined and arranged in Alphabetical order
# ------------------> A <------------------------
# [0] __ Average True Range (ATR)
# Moving Average of True Range(TR)
def atr(data: DataFrame, period: int = 14) -> Series:
TR = tr(data)
return pd.Series(
TR.rolling(center=False, window=period,
min_periods=1).mean(),
name=f'{period} ATR'
)
# [0] __ Adaptive Price Zone (APZ)
# TODO
def apz(data: DataFrame,period: int = 21,dev_factor: int = 2,
MA: Series = None,adjust: bool = True,) -> DataFrame:
if not isinstance(MA, pd.Series):
MA = dema(data, period)
price_range = pd.Series(
(data["high"] - data["low"]).ewm(span=period, adjust=adjust).mean()
)
volatility_value = pd.Series(
price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val"
)
upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER")
lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER")
return pd.concat([upper_band, lower_band], axis=1)
# ------------------> B <------------------------
# [0] __ Bollinger Bands (BBANDS)
# TODO
def bbands(data: DataFrame,period: int = 20,MA: Series = None,
column: str = "close",std_multiplier: float = 2,) -> DataFrame:
std = data[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(sma(data, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
# [0] __ Bollinger Bands Width (BBWidth)
# TODO
def bbwidth(
data: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
BB = bbands(data, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
# ------------------> D <------------------------
# [0] __ Double Exponential Moving Average (DEMA)
# 2 * EWMA - ewm(EWMA)
def dema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
DEMA = (
2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean()
)
return pd.Series(
DEMA ,
name = f'{period}_DEMA'
)
# [0] __ Directional Movement Index (DMI)
# TODO
def dmi(data: DataFrame, column: str = "close", adjust: bool = True) -> Series:
def _get_time(close):
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = data.iloc[0:index]
else:
subset = data.iloc[(index - time) : index]
return rsi(subset, period=time, adjust=adjust).values[-1]
dates = Series(data.index)
periods = Series(range(14, len(dates)), index=dates.index[14:].values)
t = _get_time(data[column])
return periods.map(lambda x: _dmi(x))
# ------------------> E <------------------------
# [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA)
# Exponential average of prev n day prices
def ema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(span=period, adjust=adjust).mean(),
name = f'{period}_EMA'
)
# [0] __ Kaufman Efficiency indicator (KER) or (ER)
# change in price / volatility Here change and volatility are absolute
def er(data : DataFrame,period: int = 10,column: str ='close') -> Series:
change = data[column].diff(period).abs()
volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum()
return pd.Series(change / volatility,
name=f'{period}_ER'
)
# [0] __ TODO (EVSTC)
# TODO
def evstc(data: DataFrame,period_fast: int = 12,period_slow: int = 30,
k_period: int = 10,d_period: int = 3,adjust: bool = True) -> Series:
ema_slow = evwma(data, period_slow)
ema_fast = evwma(data, period_fast)
macd = ema_fast - ema_slow
STOK = pd.Series((
(macd - macd.rolling(window=k_period).min())
/ (macd.rolling(window=k_period).max() - macd.rolling(window=k_period).min())
) * 100)
STOD = STOK.rolling(window=d_period).mean()
STOD_DoubleSmooth = STOD.rolling(window=d_period).mean()
return pd.Series(STOD_DoubleSmooth, name="{0} period EVSTC".format(k_period))
# [0] __ Elastic Volume Weighted Moving Average (EVWMA)
# x is ((volume sum for n period) - volume ) divided by (volume sum for n period)
# y is volume * close / (volume sum for n period)
def evwma(data, period: int = 20) -> Series:
vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum())
x = (vol_sum - data["volume"]) / vol_sum
y = (data["volume"] * data["close"]) / vol_sum
evwma = [0]
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=data.index,
name=f'{period}_EVWMA'
)
# [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD)
# MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA)
def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40,
signal: int = 9,adjust: bool = True,) -> DataFrame:
evwma_slow = evwma(data, period_slow)
evwma_fast = evwma(data, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="EV MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
# ------------------> F <------------------------
# [0] __ Fisher Transform
# TODO
def fish(data: DataFrame, period: int = 10, adjust: bool = True) -> Series:
from numpy import log, seterr
seterr(divide="ignore")
med = (data["high"] + data["low"]) / 2
ndaylow = med.rolling(window=period).min()
ndayhigh = med.rolling(window=period).max()
raw = (2 * ((med - ndaylow) / (ndayhigh - ndaylow))) - 1
smooth = raw.ewm(span=5, adjust=adjust).mean()
_smooth = smooth.fillna(0)
return pd.Series(
(log((1 + _smooth) / (1 - _smooth))).ewm(span=3, adjust=adjust).mean(),
name="{0} period FISH.".format(period),
)
# [0] __ Fractal Adaptive Moving Average (FRAMA)
# TODO
def FRAMA(data: DataFrame, period: int = 16, batch: int=10) -> Series:
assert period % 2 == 0, print("FRAMA period must be even")
c = data.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=data.index,
name= f'{period} FRAMA'
)
# [0] __ Finite Volume Element (FVE)
# TODO
def fve(data: DataFrame, period: int = 22, factor: int = 0.3) -> Series:
hl2 = (data["high"] + data["low"]) / 2
tp_ = tp(data)
smav = data["volume"].rolling(window=period).mean()
mf = pd.Series((data["close"] - hl2 + tp_.diff()), name="mf")
_mf = pd.concat([data["close"], data["volume"], mf], axis=1)
def vol_shift(row):
if row["mf"] > factor * row["close"] / 100:
return row["volume"]
elif row["mf"] < -factor * row["close"] / 100:
return -row["volume"]
else:
return 0
_mf["vol_shift"] = _mf.apply(vol_shift, axis=1)
_sum = _mf["vol_shift"].rolling(window=period).sum()
return pd.Series((_sum / smav) / period * 100)
# ------------------> H <------------------------
# [0] __ Hull Moving Average (HMA)
# wma of change in wma where change in wma is 2 * (wma half period) - (wma full period)
def hma(data, period: int = 16) -> Series:
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = wma(data, period=half_length)
wmas = wma(data, period=period)
data["deltawma"] = 2 * wmaf - wmas
hma = wma(data, column="deltawma", period=sqrt_length)
return pd.Series(hma, name=f'{period}_HMA')
# ------------------> I <------------------------
# [0] __ Ichimoku Cloud
# TODO
def ichimoku(data: DataFrame,tenkan_period: int = 9,kijun_period: int = 26,
senkou_period: int = 52,chikou_period: int = 26,) -> DataFrame:
tenkan_sen = pd.Series(
(
data["high"].rolling(window=tenkan_period).max()
+ data["low"].rolling(window=tenkan_period).min()
)
/ 2,
name="TENKAN",
) ## conversion line
kijun_sen = pd.Series(
(
data["high"].rolling(window=kijun_period).max()
+ data["low"].rolling(window=kijun_period).min()
)
/ 2,
name="KIJUN",
) ## base line
senkou_span_a = pd.Series(
((tenkan_sen + kijun_sen) / 2), name="senkou_span_a"
) .shift(kijun_period) ## Leading span
senkou_span_b = pd.Series(
(
(
data["high"].rolling(window=senkou_period).max()
+ data["low"].rolling(window=senkou_period).min()
)
/ 2
),
name="SENKOU",
).shift(kijun_period)
chikou_span = pd.Series(
data["close"].shift(-chikou_period),
name="CHIKOU",
)
return pd.concat(
[tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b, chikou_span], axis=1
)
# [0] __ Inverse Fisher Transform (IFTRSI)
# TODO
def ift_rsi(data: DataFrame,column: str = "close",rsi_period: int = 5,
wma_period: int = 9,) -> Series:
v1 = pd.Series(0.1 * (rsi(data, rsi_period) - 50), name="v1")
d = (wma_period * (wma_period + 1)) / 2
weights = np.arange(1, wma_period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_wma = v1.rolling(wma_period, min_periods=wma_period)
v2 = _wma.apply(linear(weights), raw=True)
return pd.Series(
((v2 ** 2 - 1) / (v2 ** 2 + 1)),
name="IFT_RSI"
)
# ------------------> K <------------------------
# [0] __ Kaufman's Adaptive Moving Average (KAMA)
# first KAMA is SMA
# Current KAMA = Previous KAMA + smoothing_constant * (Price - Previous KAMA)
def kama(data,er_: int = 10,ema_fast: int = 2,
ema_slow: int = 30,period: int = 20,
column: str ='close') -> Series:
er_ = er(data)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er_ * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
)
sma = pd.Series(
data[column].rolling(period).mean(), name="SMA"
)
kama = []
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), data[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name=f'{period}_KAMA')
return sma['KAMA']
# [0] __ Keltner Channels (KC)
# TODO
def kc(ohlc: DataFrame,period: int = 20,atr_period: int = 10,
MA: Series = None,kc_mult: float = 2,) -> DataFrame:
if not isinstance(MA, pd.core.series.Series):
middle = pd.Series(ema(ohlc, period), name="KC_MIDDLE")
else:
middle = pd.Series(MA, name="KC_MIDDLE")
up = pd.Series(middle + (kc_mult * atr(ohlc, atr_period)), name="KC_UPPER")
down = pd.Series(
middle - (kc_mult * atr(ohlc, atr_period)), name="KC_LOWER"
)
return pd.concat([up, down], axis=1)
# ------------------> M <------------------------
# [0] __ Moving average convergence divergence (MACD)
# MACD is Difference of ema fast and ema slow
# Here fast period is 12 and slow period is 26
# MACD Signal is ewm of MACD
def macd(data,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",adjust: bool = True
) -> DataFrame:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name=f'{period_fast}_EMA_fast')
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name=f'{period_slow}_EMA_slow')
MACD = pd.Series(EMA_fast - EMA_slow,name='MACD')
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(),name=f'{signal}_SIGNAL'
)
DIFF = pd.Series(
MACD - MACD_signal,
name="diff MACD_MSIGNAL"
)
return pd.concat(
[DIFF, MACD, MACD_signal ],
axis=1
)
# [0] __ Moving Standard Deviation (MSD)
# Standard deviation of a given period for the column passed as arguement
def msd(data: DataFrame, period: int = 21, column: str = "close") -> Series:
return pd.Series(data[column].rolling(period).std(), name="MSD")
# Momentum Breakout Bands (MOBO)
# TODO
def mobo(data: DataFrame,period: int = 10,std_multiplier: float = 0.8,
column: str = "close",) -> DataFrame:
BB = bbands(data, period=10, std_multiplier=0.8, column=column)
return BB
# [0] __ Market momentum (MOM)
def mom(data: DataFrame, period: int = 10, column: str = "close") -> Series:
return pd.Series(data[column].diff(period),
name=f'{period}_MOM'
)
# [0] __ Moving Volume Weighted Average Price (MVWAP)
# SMA of (close * volume ) divided by SMA of volume
def mvwap(data: DataFrame, period:int = 9) -> Series:
data["cv"] =(data["close"] * data["volume"])
return pd.Series(
(sma(data,period = period,column = "cv")/sma(data,period=period,column="volume")),
name="MVWAP."
)
# ------------------> P <------------------------
# ------------|| Pivot ||------------------------
# [0] __ Pivot Camarilla
# TODO
def pivot_camarilla(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
s2 = df_['close']-(1.1*(df_['high']-df_['low'])/6)
s3 = df_['close']-(1.1*(df_['high']-df_['low'])/4)
s4 =df_['close']-(1.1*(df_['high']-df_['low'])/2)
r1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
r2 = df_['close']+(1.1*(df_['high']-df_['low'])/6)
r3 =df_['close']+(1.1*(df_['high']-df_['low'])/4)
r4 = df_['close']+(1.1*(df_['high']-df_['low'])/2)
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Classic
# TODO
def pivot_classic(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = pivot - 2*(df_["high"] - df_["low"])
s4 = pivot - 3*(df_["high"] - df_["low"])
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = pivot + 2*(df_["high"] - df_["low"])
r4 = pivot + 3*(df_["high"] - df_["low"])
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
| pd.Series(r2, name="r2") | pandas.Series |
from django.core.management.base import BaseCommand, CommandError
from etldjango.settings import GCP_PROJECT_ID, BUCKET_NAME, BUCKET_ROOT
from .utils.storage import GetBucketData
from .utils.health_provider import GetHealthFromGoogleMaps
from .utils.extractor import Data_Extractor
from .utils.urllibmod import urlretrieve
from datetime import datetime, timedelta
from etldata.models import DB_farmacias
from .utils.unicodenorm import normalizer_str
from django.db.models import F, Sum, Avg, Count, StdDev, Max, Q
from django.contrib.gis.geos import Point
#from django.utils import timezone
from tqdm import tqdm
import json
import pandas as pd
import numpy as np
from urllib.request import urlopen
import os
import time
import tabula
import re
# datetime.now(tz=timezone.utc) # you can use this value
URL_MINSA_REPORT = "https://www.dge.gob.pe/portalnuevo/covid-19/covid-cajas/situacion-del-covid-19-en-el-peru/"
class Command(BaseCommand):
help = "Command for load the drugstore providers"
bucket = GetBucketData(project_id=GCP_PROJECT_ID)
googleapi = GetHealthFromGoogleMaps()
oxi_pre_loaded = 'farmacias_negocio.csv'
ubigeo = 'ubigeo_gps.csv'
def add_arguments(self, parser):
"""
Example:
"""
parser.add_argument(
'mode', type=str, help="csv/search , csv: load the data from a csv file. search: to search using the googlemaps API")
def print_shell(self, text):
self.stdout.write(self.style.SUCCESS(text))
def download_csv_from_bucket(self, filename):
self.print_shell("Downloading csv from bucket ...")
self.bucket.download_blob(bucket_name=BUCKET_NAME,
source_blob_name="data_source/"+filename,
destination_file_name='temp/'+filename)
def save_table(self, table, db):
records = table.to_dict(orient='records')
records = [db(**record) for record in tqdm(records)]
_ = db.objects.all().delete()
_ = db.objects.bulk_create(records)
def handle(self, *args, **options):
mode = options["mode"]
assert mode in ['search', 'csv'], "Error in --mode argument"
if mode == 'search':
self.download_csv_from_bucket(self.ubigeo)
table = self.search_using_googlemaps_api()
elif mode == 'csv':
# self.download_csv_from_bucket(self.oxi_pre_loaded)
table = self.read_farmacia_preloaded()
table = self.format_table(table)
self.save_table(table, DB_farmacias)
self.print_shell("Work Done!")
def read_ubigeo_gps(self):
ubigeo2 = pd.read_csv('temp/' + self.ubigeo)
ubigeo2['location'] = ubigeo2['location'].apply(
lambda x: json.loads(x.replace("\'", "\"")))
#ubigeo2 = ubigeo2.groupby('NOMBDEP').first().reset_index()
ubigeo2.head()
return ubigeo2
def read_farmacia_preloaded(self):
table = pd.read_csv('temp/'+self.oxi_pre_loaded)
# print(table.info())
# print(table.head())
return table
def search_using_googlemaps_api(self,):
ubigeo = self.read_ubigeo_gps()
whole_places = []
with tqdm(total=len(ubigeo)) as pbar:
for location, departamento in zip(ubigeo.location, ubigeo.NOMBDEP):
places_list = self.googleapi.get_drugstore_places_from_points(
location, 8000, 'Perú')
#_ = [place.update({'departamento': departamento}) for place in places_list]
whole_places = whole_places + places_list
pbar.update(1)
print('Cantidad de records ', len(whole_places))
df = | pd.DataFrame.from_records(whole_places) | pandas.DataFrame.from_records |
from io import StringIO
import geoglows
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import requests
from scipy import interpolate
from scipy import stats
def collect_data(start_id, start_ideam_id, downstream_id, downstream_ideam_id):
# Upstream simulated flow
start_flow = geoglows.streamflow.historic_simulation(start_id)
start_fdc = compute_flow_duration_curve(start_flow.values.flatten())
# Upstream observed flow
start_ideam_flow = get_ideam_flow(start_ideam_id)
start_ideam_flow.dropna(inplace=True)
start_ideam_fdc = compute_flow_duration_curve(start_ideam_flow.values.flatten())
# upstream bias corrected flow
start_bc_flow = geoglows.bias.correct_historical(start_flow, start_ideam_flow)
start_bc_fdc = compute_flow_duration_curve(start_bc_flow.values.flatten())
# Downstream simulated flow
downstream_flow = geoglows.streamflow.historic_simulation(downstream_id)
# downstream_fdc = compute_flow_duration_curve(downstream_flow.values.flatten())
# Downstream observed flow
downstream_ideam_flow = get_ideam_flow(downstream_ideam_id)
downstream_ideam_flow.dropna(inplace=True)
# Downstream bias corrected flow (for comparison to the propagation method
downstream_bc_flow = geoglows.bias.correct_historical(downstream_flow, downstream_ideam_flow)
# Export all as csv
start_flow.to_csv('start_flow.csv')
start_fdc.to_csv('start_fdc.csv')
start_ideam_flow.to_csv('start_ideam_flow.csv')
start_ideam_fdc.to_csv('start_ideam_fdc.csv')
start_bc_flow.to_csv('start_bc_flow.csv')
start_bc_fdc.to_csv('start_bc_fdc.csv')
downstream_flow.to_csv('downstream_flow.csv')
downstream_ideam_flow.to_csv('downstream_ideam_flow.csv')
downstream_bc_flow.to_csv('downstream_bc_flow.csv')
return
def get_ideam_flow(id):
# get the gauged data
url = f'https://www.hydroshare.org/resource/d222676fbd984a81911761ca1ba936bf/' \
f'data/contents/Discharge_Data/{id}.csv'
df = pd.read_csv(StringIO(requests.get(url).text), index_col=0)
df.index = pd.to_datetime(df.index).tz_localize('UTC')
return df
def find_downstream_ids(df: pd.DataFrame, target_id: int, same_order: bool = True):
downstream_ids = []
stream_row = df[df['COMID'] == target_id]
stream_order = stream_row['order_'].values[0]
if same_order:
while stream_row['NextDownID'].values[0] != -1 and stream_row['order_'].values[0] == stream_order:
downstream_ids.append(stream_row['NextDownID'].values[0])
stream_row = df[df['COMID'] == stream_row['NextDownID'].values[0]]
else:
while stream_row['NextDownID'].values[0] != -1:
downstream_ids.append(stream_row['NextDownID'].values[0])
stream_row = df[df['COMID'] == stream_row['NextDownID'].values[0]]
return tuple(downstream_ids)
def compute_flow_duration_curve(hydro: list or np.array, prob_steps: int = 500, exceedence: bool = True):
percentiles = [round((1 / prob_steps) * i * 100, 5) for i in range(prob_steps + 1)]
flows = np.nanpercentile(hydro, percentiles)
if exceedence:
percentiles.reverse()
columns = ['Exceedence Probability', 'Flow']
else:
columns = ['Non-Exceedence Probability', 'Flow']
return pd.DataFrame(np.transpose([percentiles, flows]), columns=columns)
def compute_scalar_bias_fdc(first_series, seconds_series):
first_fdc = compute_flow_duration_curve(first_series)
second_fdc = compute_flow_duration_curve(seconds_series)
ratios = np.divide(first_fdc['Flow'].values.flatten(), second_fdc['Flow'].values.flatten())
scalars_df = pd.DataFrame(np.transpose([first_fdc.values[:, 0], ratios]))
scalars_df.replace(np.inf, np.nan, inplace=True)
scalars_df.dropna(inplace=True)
return scalars_df
# collect_data(9017261, 32037030, 9015333, 32097010)
# collect_data(9012999, 22057070, 9012650, 22057010)
# Read all as csv
start_flow = pd.read_csv('start_flow.csv', index_col=0)
start_fdc = | pd.read_csv('start_fdc.csv', index_col=0) | pandas.read_csv |
# coding: utf-8
# In[5]:
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import Adagrad, Adam, RMSprop
from keras.objectives import mean_squared_error
from keras.regularizers import l2
import seaborn as snb
from utils.GraphUtil import *
from utils.SlidingWindowUtil import SlidingWindow
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import MinMaxScaler
# In[2]:
dat = pd.read_csv('sampling_617685_metric_10min_datetime.csv', index_col=0, parse_dates=True)
# In[3]:
n_sliding_window = 4
scaler = MinMaxScaler()
scale_dat = scaler.fit_transform(dat.cpu_rate)
dat_sliding = np.array(list(SlidingWindow(scale_dat, n_sliding_window)))
X_train_size = int(len(dat_sliding)*0.7)
# sliding = np.array(list(SlidingWindow(dat_sliding, n_sliding_window)))
# sliding = np.array(dat_sliding, dtype=np.int32)
X_train = dat_sliding[:X_train_size]
y_train = scale_dat[n_sliding_window:X_train_size+n_sliding_window].reshape(-1,1)
X_test = dat_sliding[X_train_size:]
y_test = scale_dat[X_train_size+n_sliding_window-1:].reshape(-1,1)
# In[37]:
model = Sequential([
Dense(n_sliding_window+2, input_dim=n_sliding_window, activation='relu',init='uniform'),
Dense(1)
])
np.random.seed(7)
## LSTM neural netwo
# In[43]:
from keras.layers import LSTM
# In[44]:
len_test = 1200
batch_size = 10
time_steps = 1
Xtrain = np.reshape(X_train, (X_train.shape[0], time_steps, n_sliding_window))
ytrain = np.reshape(y_train, (y_train.shape[0], time_steps, y_train.shape[1]))
Xtest = np.reshape(X_test, (X_test.shape[0], time_steps, n_sliding_window))
# In[55]:
model = Sequential()
model.add(LSTM(6,batch_input_shape=(batch_size,time_steps,n_sliding_window),stateful=True,activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adagrad')
# In[56]:
history = model.fit(Xtrain[:2900],y_train[:2900], nb_epoch=6000,batch_size=batch_size,shuffle=False, verbose=1,
validation_data=(Xtest[:len_test],y_test[:len_test]))
# In[57]:
log = history.history
df = | pd.DataFrame.from_dict(log) | pandas.DataFrame.from_dict |
import streamlit as st
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
from nltk.tokenize import sent_tokenize
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
'''
# BERTerReads
---
'''
@st.cache(allow_output_mutation=True)
def load_model():
'''
Function to load (and cache) DistilBERT model
'''
model = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens')
return model
@st.cache(allow_output_mutation=True)
def get_reviews(url):
'''
Function to scrape all the reviews from the first page of a GoodReads book URL
'''
# Download & soupify webpage
r = requests.get(url)
soup = BeautifulSoup(r.content, features='html.parser')
# Find all review text blocks
reviews_src = soup.find_all('div', class_='reviewText stacked')
# Initialize list to store cleaned review text
reviews = []
# Loop through each review text block
for review in reviews_src:
# Extract review text
try:
text = review.find('span', style='display:none').get_text(' ', strip=True)
except:
text = review.get_text(' ', strip=True)
# Remove spoiler tags from review text
text = re.sub(r'\(view spoiler\) \[', '', text)
text = re.sub(r'\(hide spoiler\) \] ', '', text)
# Append review text to list
reviews.append(text)
# Transform review list to dataframe
df = pd.DataFrame(reviews, columns=['review'])
return df
@st.cache
def clean_reviews(df):
'''
Function to clean review text and divide into individual sentences
'''
# Append space to all sentence end characters
df['review'] = df['review'].str.replace('.', '. ').replace('!', '! ').replace('?', '? ')
# Initialize dataframe to store review sentences
sentences_df = pd.DataFrame()
# Loop through each review
for i in range(len(df)):
# Save review to variable
review = df.iloc[i]['review']
# Tokenize review into sentences
sentences = sent_tokenize(review)
# Transform sentences into dataframe
new_sentences = | pd.DataFrame(sentences, columns=['sentence']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
Autor: <NAME>, <NAME>, <NAME>, <NAME>
Version: 1.3
Server fuer das hosten des FaSta-Dashboards
Copyright 2018 The Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================
'''
import sys
import dash
import dash_auth
import dash_core_components
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import flask
import pandas as pd
import plotly.graph_objs as go
import pymongo
import threading
from dash.dependencies import Input, Output
import os
import collections
from pprint import pprint
from pymongo.command_cursor import CommandCursor
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from types import *
import pandas as pd
import numpy as np
from pandas import DataFrame
sys.path.append('./Clients')
import folium
from geopy.geocoders import Nominatim
#from sqlalchemy import create_engine
import psycopg2
########################################################################## #############################################################################################################################################
########################################################################## Web Application #############################################################################################################################################
########################################################################## #############################################################################################################################################
# Konstanten
MONGO_URL = os.environ.get('MONGO_URI')
POSTGRESS_URL = os.environ.get('POSTGRES_URL')
HOST_ID = '0.0.0.0'
PORT = '37002'
print('Fasta Server initialisiert!')
def createGraphDataForEscalatorPage(numberOfLastEntries: int):
ergDF = pd.DataFrame(columns=['Datum', 'Anzahl_Ausfälle'])
facilities_collection = facilities.find({})
pandas_facilities = pd.DataFrame(list(facilities_collection))
pandas_facilities = pandas_facilities[['equipmentnumber', 'datetime', 'state']]
facilities_distinct = pandas_facilities
facilities_distinct.columns = ['ID', 'Datum', 'Status']
facilities_distinct['Datum'] = | pd.to_datetime(facilities_distinct['Datum'], format="%Y-%m-%d_%H-%M-%S") | pandas.to_datetime |
from tabnanny import check
import numpy as np
import pandas as pd
import pytest
from epymetheus import Trade
from epymetheus import trade
from epymetheus.benchmarks import RandomStrategy
from epymetheus.datasets import make_randomwalk
from epymetheus.trade import check_trade
class TestTrade:
# handmade universe
universe_hand = pd.DataFrame(
{
"A": [3.0, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0],
"B": [2.0, 7.0, 1.0, 8.0, 2.0, 8.0, 1.0],
}
)
def test_init_array(self):
t = trade("A", lot=1.0)
assert isinstance(t.asset, np.ndarray)
assert isinstance(t.lot, np.ndarray)
t = trade("A", lot=[1.0])
assert isinstance(t.asset, np.ndarray)
assert isinstance(t.lot, np.ndarray)
t = trade(["A"], lot=[1.0])
assert isinstance(t.asset, np.ndarray)
assert isinstance(t.lot, np.ndarray)
t = trade(["A", "B"], lot=1.0)
t = trade(["A", "B"], lot=[1.0, 2.0])
assert isinstance(t.asset, np.ndarray)
assert isinstance(t.lot, np.ndarray)
def test_init_shape(self):
t = trade("A", lot=1.0)
assert t.asset.shape == (1,)
assert t.lot.shape == (1,)
t = trade(["A"], lot=1.0)
assert t.asset.shape == (1,)
assert t.lot.shape == (1,)
t = trade("A", lot=[1.0])
assert t.asset.shape == (1,)
assert t.lot.shape == (1,)
t = trade(["A", "B"], lot=1.0)
assert t.asset.shape == (2,)
assert t.lot.shape == (2,)
t = trade(["A", "B"], lot=[1.0])
assert t.asset.shape == (2,)
assert t.lot.shape == (2,)
t = trade(["A", "B"], lot=[1.0, 2.0])
assert t.asset.shape == (2,)
assert t.lot.shape == (2,)
def test_repr(self):
t = trade("A")
assert repr(t) == "trade(['A'], lot=[1.])"
t = trade("A", lot=2, take=3.0, stop=-3.0, entry="B0", exit="B1")
assert (
repr(t) == "trade(['A'], lot=[2], entry=B0, exit=B1, take=3.0, stop=-3.0)"
)
def test_array_value_value_hand(self):
t = [2.0, -3.0] * trade(["A", "B"], entry=1, exit=3)
result = t.array_value(self.universe_hand)
expect = np.array(
[
[6.0, -6.0],
[2.0, -21.0],
[8.0, -3.0],
[2.0, -24.0],
[10.0, -6.0],
[18.0, -24.0],
[4.0, -3.0],
]
)
assert np.allclose(result, expect)
t = [-3.0, 2.0] * trade(["B", "A"], entry=1, exit=3)
result = t.array_value(universe=self.universe_hand)
expect = expect[:, [1, 0]]
assert np.allclose(result, expect)
def test_exposure(self):
universe = pd.DataFrame(
{"A0": [1, 2, 3, 4, 5], "A1": [2, 3, 4, 5, 6], "A2": [3, 4, 5, 6, 7]}
)
t = [1, -1] * trade(["A0", "A2"], entry=1, exit=3).execute(universe)
result = t.exposure(universe)
expect = pd.DataFrame(
[[0, 0, 0], [0, 0, 0], [3, 0, -5], [4, 0, -6], [0, 0, 0]],
index=universe.index,
columns=universe.columns,
dtype=float,
)
pd.testing.assert_frame_equal(result, expect)
def test_net_exposure(self):
universe = pd.DataFrame(
{"A0": [1, 2, 3, 4, 5], "A1": [2, 3, 4, 5, 6], "A2": [3, 4, 5, 6, 7]}
)
t = [1, -1] * trade(["A0", "A2"], entry=1, exit=3).execute(universe)
result = t.net_exposure(universe)
expect = pd.Series([0, 0, -2, -2, 0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test_abs_exposure(self):
universe = pd.DataFrame(
{"A0": [1, 2, 3, 4, 5], "A1": [2, 3, 4, 5, 6], "A2": [3, 4, 5, 6, 7]}
)
t = [1, -1] * trade(["A0", "A2"], entry=1, exit=3).execute(universe)
result = t.abs_exposure(universe)
expect = | pd.Series([0, 0, 8, 10, 0], dtype=float) | pandas.Series |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
import platform
import unittest
from itertools import combinations, combinations_with_replacement, product
from numba.core.config import IS_32BITS
from numba.core.errors import TypingError
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (skip_numba_jit,
_make_func_from_text,
gen_frand_array)
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
class TestSeries_ops(TestCase):
def test_series_operators_int(self):
"""Verifies using all various Series arithmetic binary operators on two integer Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
# integers to negative powers are not allowed
if (operator == '**' and np.any(data_right < 0)):
data_right = np.abs(data_right)
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_int_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on an integer Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
scalar_values = [1, -1, 0, 3, 7, -5]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
# integers to negative powers are not allowed
if (operator == '**' and np.any(right < 0)):
right = abs(right)
with self.subTest(left=left, right=right, operator=operator):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(left, right), test_impl(left, right), check_dtype=False)
def test_series_operators_float(self):
"""Verifies using all various Series arithmetic binary operators on two float Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_float_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on a float Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
scalar_values = [1., -1., 0., -0., 7., -5.]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
with self.subTest(left=left, right=right, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar), check_dtype=False)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
A1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
A2 = A1.copy(deep=True)
B = pd.Series(np.ones(n - 1), name='B')
hpat_func(A1, B)
test_impl(A2, B)
pd.testing.assert_series_equal(A1, A2)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace_scalar(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
S1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
S2 = S1.copy(deep=True)
hpat_func(S1, 1)
test_impl(S2, 1)
pd.testing.assert_series_equal(S1, S2)
@skip_numba_jit('operator.neg for SeriesType is not implemented in yet')
def test_series_operator_neg(self):
def test_impl(A):
return -A
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_operators_comp_numeric(self):
"""Verifies using all various Series comparison binary operators on two integer Series with various indexes"""
n = 11
data_left = [1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0]
data_right = [3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1]
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None]}
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for dtype, index_data in dtype_to_index.items():
with self.subTest(operator=operator, index_dtype=dtype, index=index_data):
A = pd.Series(data_left, index=index_data)
B = pd.Series(data_right, index=index_data)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operators_comp_numeric_scalar(self):
"""Verifies using all various Series comparison binary operators on an integer Series and scalar values"""
S = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0])
scalar_values = [2, 2.0, -3, np.inf, -np.inf, np.PZERO, np.NZERO]
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
def test_series_operators_comp_str_scalar(self):
"""Verifies using all various Series comparison binary operators on an string Series and scalar values"""
S = pd.Series(['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None])
scalar_values = ['a', 'aa', 'ab', 'ba', '']
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
@skip_numba_jit
def test_series_operators_inplace_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = self.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 1)
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion2(self):
def test_impl(A, B):
S = B + 2
if A.iat[0] == 0:
S = A + 1
return S + B
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 3)
def test_series_operator_add_numeric_scalar(self):
"""Verifies Series.operator.add implementation for numeric series and scalar second operand"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd']}
int_scalar = 24
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
result = hpat_func(A, int_scalar)
result_ref = test_impl(A, int_scalar)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
float_scalar = 24.0
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
ref_result = test_impl(A, float_scalar)
result = hpat_func(A, float_scalar)
pd.testing.assert_series_equal(result, ref_result, check_dtype=False, check_names=False)
def test_series_operator_add_numeric_same_index_default(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), dtype=dtype_left)
B = pd.Series(np.arange(n)**2, dtype=dtype_right)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
@skip_numba_jit
def test_series_operator_add_numeric_same_index_numeric(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with the same numeric indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_numeric_fixme(self):
""" Same as test_series_operator_add_same_index_numeric but with w/a for the problem.
Can be deleted when the latter is fixed """
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
# FIXME: skip the sub-test if one of the dtypes is float and the other is integer
if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer)
or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)):
continue
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with the same string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(n), index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
B = pd.Series(np.arange(n)**2, index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_int(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'ae', 'b', 'ccc', 'cccc', 'oo', 's']
index_B = ['', '', 'aa', 'aa', 'cc', 'cccc', 'e', 'f', 'h', 'oo', 's']
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
@skip_numba_jit('TODO: fix Series.sort_values to handle both None and '' in string series')
def test_series_operator_add_numeric_align_index_str_fixme(self):
"""Same as test_series_operator_add_align_index_str but with None values in string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'b', 'ccc', 'cccc', 'oo', None, None]
index_B = ['', '', 'aa', 'aa', 'cccc', 'f', 'h', 'oo', 's', None, None]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_other_dtype(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with non-equal integer indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(3*n), index=np.arange(-n, 2*n, 1, dtype=np.int64))
B = pd.Series(np.arange(3*n)**2, index=np.arange(0, 3*n, 1, dtype=np.float64))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_diff_series_sizes(self):
"""Verifies implementation of Series.operator.add between two numeric Series with different sizes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
size_A, size_B = 7, 25
A = pd.Series(np.arange(size_A))
B = pd.Series(np.arange(size_B)**2)
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
def test_series_operator_add_align_index_int_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of numeric indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 20000
np.random.seed(0)
index1 = np.random.randint(-30, 30, n)
index2 = np.random.randint(-30, 30, n)
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_align_index_str_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of string indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 2000
np.random.seed(0)
valid_ids = ['', 'aaa', 'a', 'b', 'ccc', 'ef', 'ff', 'fff', 'fa', 'dddd']
index1 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
index2 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_align_index_int(self):
"""Verifies implementation of Series.operator.add between two string Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
data = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
A = pd.Series(data, index=index_A)
B = pd.Series(data, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_result_name1(self):
"""Verifies name of the Series resulting from appying Series.operator.add to different arguments"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_names = ['A', '', None, 'B']
for left_name, right_name in combinations(series_names, 2):
S1 = pd.Series(np.arange(n), name=left_name)
S2 = pd.Series(np.arange(n, 0, -1), name=right_name)
with self.subTest(left_series_name=left_name, right_series_name=right_name):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
# also verify case when second operator is scalar
scalar = 3.0
with self.subTest(scalar=scalar):
S1 = pd.Series(np.arange(n), name='A')
pd.testing.assert_series_equal(hpat_func(S1, scalar), test_impl(S1, scalar), check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_result_name2(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in returning unnamed Series when both operands are named Series with the same name"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
S1 = pd.Series(np.arange(n), name='A')
S2 = pd.Series(np.arange(n, 0, -1), name='A')
result = hpat_func(S1, S2)
result_ref = test_impl(S1, S2)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(result, result_ref, check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_series_dtype_promotion(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in dtype of resulting Series that is fixed to float64"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.array(np.arange(n), dtype=dtype_left))
B = pd.Series(np.array(np.arange(n)**2, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_add_str_scalar(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [' ', 'wq', '', '23']
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_add_str_unsupported(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
other_operands = [
1,
3.0,
pd.Series(np.arange(n)),
pd.Series([True, False, False, True, False, True, True]),
]
for operand in other_operands:
with self.subTest(right=operand):
with self.assertRaises(TypingError) as raises:
hpat_func(S, operand)
expected_msg = 'Operator add(). Not supported for not-comparable operands.'
self.assertIn(expected_msg, str(raises.exception))
def test_series_operator_mul_str_scalar(self):
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', ' ', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [-1, 0, 2, 5]
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_mul_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series([-1, 2, 0, 5, 3, -5, 4])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_mul_str_align_index_int1(self):
""" Verifies implementation of Series.operator.add between two string Series
with integer indexes containg same unique values (so alignment doesn't produce NaNs) """
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
shuffled_data = np.arange(n, dtype=np.int)
np.random.shuffle(shuffled_data)
index_A = shuffled_data
np.random.shuffle(shuffled_data)
index_B = shuffled_data
str_series_values = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
int_series_values = np.random.randint(-5, 5, n)
A = pd.Series(str_series_values, index=index_A)
B = pd.Series(int_series_values, index=index_B)
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
@unittest.expectedFailure # pandas can't calculate this due to adding NaNs to int series during alignment
def test_series_operator_mul_str_align_index_int2(self):
""" Verifies implementation of Series.operator.add between two string Series
with integer indexes that cannot be aligned without NaNs """
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
str_series_values = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
int_series_values = np.random.randint(-5, 5, n)
A = pd.Series(str_series_values, index=index_A)
B = pd.Series(int_series_values, index=index_B)
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_mul_str_unsupported(self):
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
other_operands = [
'abc',
3.0,
pd.Series(series_data),
pd.Series([True, False, False, True, False, True, True]),
]
for operand in other_operands:
with self.subTest(right=operand):
with self.assertRaises(TypingError) as raises:
hpat_func(S, operand)
expected_msg = 'Operator mul(). Not supported between operands of types:'
self.assertIn(expected_msg, str(raises.exception))
def test_series_operator_lt_index_mismatch1(self):
"""Verifies correct exception is raised when comparing Series with non equal integer indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index1 = np.arange(n)
index2 = np.copy(index1)
np.random.shuffle(index2)
A = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0], index=index1)
B = pd.Series([3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1], index=index2)
with self.assertRaises(Exception) as context:
test_impl(A, B)
exception_ref = context.exception
self.assertRaises(type(exception_ref), hpat_func, A, B)
def test_series_operator_lt_index_mismatch2(self):
"""Verifies correct exception is raised when comparing Series of different size with default indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
A = pd.Series([1, 2, -1, 3, 4, 2])
B = pd.Series([3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1])
with self.assertRaises(Exception) as context:
test_impl(A, B)
exception_ref = context.exception
self.assertRaises(type(exception_ref), hpat_func, A, B)
@skip_numba_jit('Numba propagates different exception:\n'
'numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)\n'
'Internal error at <numba.core.typeinfer.IntrinsicCallConstraint ...\n'
'\'Signature\' object is not iterable')
def test_series_operator_lt_index_mismatch3(self):
"""Verifies correct exception is raised when comparing two Series with non-comparable indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, -1, 3, 4, 2])
S2 = pd.Series(['a', 'b', '', None, '2', 'ccc'])
with self.assertRaises(TypingError) as raises:
hpat_func(S1, S2)
msg = 'Operator lt(). Not supported for series with not-comparable indexes.'
self.assertIn(msg, str(raises.exception))
@skip_numba_jit("TODO: find out why pandas aligning series indexes produces Int64Index when common dtype is float\n"
"AssertionError: Series.index are different\n"
"Series.index classes are not equivalent\n"
"[left]: Float64Index([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype='float64')\n"
"[right]: Int64Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype='int64')\n")
def test_series_operator_lt_index_dtype_promotion(self):
"""Verifies implementation of Series.operator.lt between two numeric Series
with the same numeric indexes of different dtypes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_lt_index_dtype_promotion_fixme(self):
""" Same as test_series_operator_lt_index_dtype_promotion but with w/a for the problem.
Can be deleted when the latter is fixed """
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
# FIXME: skip the sub-test if one of the dtypes is float and the other is integer
if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer)
or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)):
continue
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_lt_unsupported_dtypes(self):
"""Verifies correct exception is raised when comparing two Series with non-comparable dtypes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, -1, 3, 4, 2])
S2 = pd.Series(['a', 'b', '', None, '2', 'ccc'])
with self.assertRaises(TypingError) as raises:
hpat_func(S1, S2)
msg = 'Operator lt(). Not supported for not-comparable operands.'
self.assertIn(msg, str(raises.exception))
def test_series_operator_lt_str(self):
"""Verifies implementation of Series.operator.lt between two string Series with default indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo'])
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
def test_series_binops_numeric(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
n = 11
cases_series = [
pd.Series(np.arange(1, n), name='A'),
pd.Series(np.ones(n - 1), name='B'),
pd.Series(np.arange(1, n) / 2, name='C'),
]
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = self.jit(test_impl)
for S1, S2 in combinations(cases_series, 2):
with self.subTest(S1=S1, S2=S2, method=method):
# check_dtype=False because SDC arithmetic methods return only float Series
pd.testing.assert_series_equal(
hpat_func(S1, S2),
test_impl(S1, S2),
check_dtype=False)
def test_series_binops_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
n = 11
cases_series = [
pd.Series(np.arange(1, n)),
pd.Series(np.ones(n - 1)),
pd.Series(np.arange(1, n) / 2),
]
cases_scalars = [0, 5, 0.5]
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = self.jit(test_impl)
for S1, scalar in product(cases_series, cases_scalars):
with self.subTest(S1=S1, scalar=scalar, method=method):
# check_dtype=False because SDC arithmetic methods return only float Series
pd.testing.assert_series_equal(
hpat_func(S1, scalar),
test_impl(S1, scalar),
check_dtype=False)
def test_series_binops_comp_numeric(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float64),
gen_frand_array(n),
np.ones(n, dtype=np.int32),
np.random.randint(-5, 5, n)]
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = self.jit(test_impl)
for data1, data2 in product(data_to_test, repeat=2):
A = pd.Series(data1)
B = pd.Series(data2)
with self.subTest(A=A, B=B):
pd.testing.assert_series_equal(
hpat_func(A, B),
test_impl(A, B),
check_names=False)
def test_series_binops_comp_numeric_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float64),
gen_frand_array(n),
np.ones(n, dtype=np.int32),
np.random.randint(-5, 5, n)]
scalar_values = [1, -1, 0, 3, 7, -5, 4.2]
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = self.jit(test_impl)
for data, scalar in product(data_to_test, scalar_values):
S = pd.Series(data)
with self.subTest(S=S, scalar=scalar, method=method):
pd.testing.assert_series_equal(
hpat_func(S, scalar),
test_impl(S, scalar),
check_names=False)
def test_series_binop_add_numeric(self):
"""Verifies implementation of Series.add method and fill_value param support on two float Series"""
def test_impl(S1, S2, value):
return S1.add(S2, fill_value=value)
sdc_func = self.jit(test_impl)
n = 100
np.random.seed(0)
cases_data = [
np.arange(n, dtype=np.float64),
gen_frand_array(n, nancount=25),
]
cases_index = [
None,
np.arange(n),
np.random.choice(np.arange(n), n, replace=False),
]
cases_value = [
None,
np.nan,
4,
5.5
]
for value, (arr1, arr2), (index1, index2) in product(
cases_value,
combinations_with_replacement(cases_data, 2),
combinations_with_replacement(cases_index, 2)):
S1 = pd.Series(arr1, index1)
S2 = | pd.Series(arr2, index2) | pandas.Series |
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import math
def writecsv(results, filename):
print('writing %s.csv' % filename)
try:
csv_file = open('%s.csv' % filename, 'wb')
results.to_csv(csv_file)
except:
csv_file = open('%s.csv' % filename, 'w')
results.to_csv(csv_file)
csv_file.close()
def calc_realized(last, fut, period, time):
if last != 0:
fut = pd.concat([ | pd.DataFrame([last]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Purpose: Perform automated testing on pdvalidate.
:Platform: Linux/Windows | Python 3.5
:Developer: <NAME>
:Email: <EMAIL>
"""
# pylint: disable=protected-access
# pylint: disable=wrong-import-position
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import datetime
import numpy as np
import pytest
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pdvalidate.validation import ei, \
validate as pdv, \
ValidationWarning
class TestReturnTypes():
strings = pd.Series(['1', '1', 'ab\n', 'a b', 'Ab', 'AB', np.nan])
masks = [pd.Series([False, False, False, True, True, False, False]),
pd.Series([True, True, False, True, True, False, True])]
def test_return_mask_series(self):
assert_series_equal(pdv._get_return_object(self.masks, self.strings, 'mask_series'),
pd.Series([True, True, False, True, True, False, True]))
def test_return_mask_frame(self):
assert_frame_equal(pdv._get_return_object(self.masks, self.strings, 'mask_frame'),
pd.concat(self.masks, axis='columns'))
def test_return_values(self):
assert_series_equal(pdv._get_return_object(self.masks, self.strings, 'values'),
pd.Series([np.nan, np.nan, 'ab\n', np.nan, np.nan, 'AB', np.nan]))
def test_wrong_return_type(self):
with pytest.raises(ValueError):
pdv._get_return_object(self.masks, self.strings, 'wrong return type')
class TestMaskNonconvertible():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7), '2014'])
inconvertible_numeric = pd.Series([False, False, False, True, True, False])
inconvertible_exact_dates = pd.Series([True, True, False, True, True, False])
inconvertible_inexact_dates = pd.Series([True, True, False, True, False, False])
def test_numeric(self):
assert_series_equal(pdv.mask_nonconvertible(self.mixed, 'numeric'),
self.inconvertible_numeric)
def test_datetime_exact_date(self):
assert_series_equal(pdv.mask_nonconvertible(self.mixed,
'datetime',
datetime_format='%Y',
exact_date=True),
self.inconvertible_exact_dates)
assert_series_equal(pdv.mask_nonconvertible(self.mixed,
'datetime',
datetime_format='%Y', exact_date=False),
self.inconvertible_inexact_dates)
class TestToDatetime():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7), '2014'])
def test_exact(self):
expected_result1 = [pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT,
pd.Timestamp('2014-01-01 00:00:00')]
assert (pdv.to_datetime(self.mixed,
datetime_format='%Y',
exact=True).tolist() == expected_result1)
expected_result2 = [pd.NaT, pd.NaT, pd.NaT, pd.NaT,
pd.Timestamp('2014-01-07 00:00:00'),
pd.Timestamp('2014-01-01 00:00:00')]
assert (pdv.to_datetime(self.mixed,
datetime_format='%Y/%m/%d',
exact=False).tolist() == expected_result2)
class TestToNumeric():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7), '2014'])
def test_conversion(self):
assert (pdv.to_numeric(self.mixed).sum() == 2017.3)
pytest.warns(ValidationWarning, pdv.to_numeric, self.mixed)
class TestToString():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7)])
numeric_as_strings = pd.Series(['1', '2.3', np.nan, 'abc', pd.datetime(2014, 1, 7)])
datetimes_as_strings = pd.Series([1, 2.3, np.nan, 'abc', '2014-01-07'])
all_values_as_strings = | pd.Series(['1', '2.3', np.nan, 'abc', '2014-01-07']) | pandas.Series |
import requests #FROM: https://www.kaggle.com/abrarhossainhimself/understat-data-for-teams-players-2014-present
import json
import pandas as pd
import os
def scrape_understat(payload):
url = 'https://understat.com/main/getPlayersStats/'
headers = {'content-type':'application/json; charset=utf-8',
'Host': 'understat.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:73.0) Gecko/20100101 Firefox/73.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Content-Length': '39',
'Origin': 'https: // understat.com',
'Connection': 'keep - alive',
'Referer': 'https: // understat.com / league / EPL'
}
response = requests.post(url, data=payload, headers = headers, verify=True)
response_json = response.json()
inner_wrapper = response_json['response']
json_player_data = inner_wrapper['players']
return json_player_data
def clean_df(player_df, weeks):
player_df = player_df.rename(columns={'goals':'goals_'+weeks,'xG':'xG_'+weeks,'assists':'assists_'+weeks, 'xA':'xA_'+weeks, 'shots':'shots_'+weeks, 'key_passes':
'key_passes_'+weeks,'npg':'npg_'+weeks,'npxG':'npxG_'+weeks})
return(player_df)
def gw_data(no_of_gw, season = '2020'):
json_player_data = scrape_understat({'league':'EPL', 'season':season, 'n_last_matches': no_of_gw})
gw_table = | pd.DataFrame(json_player_data) | pandas.DataFrame |
import json
import os
from imblearn.over_sampling import ADASYN, RandomOverSampler
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
import pandas as pd
import numpy as np
import random
from sklearn.model_selection import KFold, GridSearchCV
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, roc_auc_score, confusion_matrix, roc_curve, classification_report
from sklearn.model_selection import train_test_split
import sklearn
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.utils import resample
from DecisionTree.MyDecisionTree import MyDecisionTree
from LogisticRegression.MyLogisticRegression import MyLogisticRegression
from NaiveBayes.MyNaiveBayes import MyGaussianNB
pd.set_option('display.max_rows', 1000) # 具体的行数或列数可自行设置
pd.set_option('display.max_columns', 1000)
class MyModel():
def __init__(self):
self.train_df = pd.read_csv("VI_train.csv")
self.test_df = pd.read_csv("VI_test.csv")
self.train_df = self.train_df.drop(['Unnamed: 0'], axis=1)
# 对训练集和测试集进行标准化
def standardData(self, X_train, X_valid, X_test):
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
X_valid = sc_X.transform(X_valid)
return X_train, X_valid, X_test
def upsampleFeature(self, df):
def upsample(df, u_feature, n_upsampling):
df_temp = df.copy()
ones = df.copy()
# 根据不同特征,在其周边随机生成
for n in range(n_upsampling):
if u_feature == 'Annual_Premium':
df_temp[u_feature] = ones[u_feature].apply(
lambda x: x + random.randint(-1, 1) * x * 0.05) # change Annual_premiun in the range of 5%
else:
df_temp[u_feature] = ones[u_feature].apply(
lambda x: x + random.randint(-5, 5)) # change Age in the range of 5 years
if n == 0:
df_new = df_temp.copy()
else:
df_new = pd.concat([df_new, df_temp])
return df_new
df_train_up_age = upsample(df.loc[df['Response'] == 1], 'Age', 1)
df_train_up_vintage = upsample(df.loc[df['Response'] == 1], 'Vintage', 1)
df_ext = pd.concat([df, df_train_up_age])
df_ext = pd.concat([df_ext, df_train_up_vintage])
# X_train = df_ext.drop(columns=['Response'])
# y_train = df_ext.Response
print(len(df_ext))
return df_ext
def upsampleData(self, df):
ros = RandomOverSampler(random_state=42, sampling_strategy='minority')
x_train_sampled, y_train_sampled = ros.fit_resample(df.drop('Response', axis=1), df['Response'])
ada = ADASYN(random_state=42)
x_train_sampled, y_train_sampled = ada.fit_resample(df.drop('Response', axis=1), df['Response'])
x_train_sampled['Response'] = y_train_sampled
print(len(x_train_sampled))
return x_train_sampled
def downsample(self, df):
df_no_response = df[df['Response'] == 0]
df_response = df[df['Response'] == 1]
df_no_response_downsampled = resample(df_no_response,
replace=False,
n_samples=int(len(df_response)*2),
random_state=42)
df_downsample = pd.concat([df_no_response_downsampled, df_response])
print(len(df_downsample))
return df_downsample
def featureEngineer(self, df_train, df_test):
# 获得特征名
df_train_response = df_train.loc[df_train.Response == 1].copy()
categorical_features = ['Gender', 'Driving_License', 'Region_Code', 'Previously_Insured', 'Vehicle_Age',
'Vehicle_Damage', 'Policy_Sales_Channel']
text_features = ['Gender', 'Vehicle_Age', 'Vehicle_Damage']
# 对于文本特征进行编码
labelEncoder = preprocessing.LabelEncoder()
for f in text_features:
df_train[f] = labelEncoder.fit_transform(df_train[f])
df_test[f] = labelEncoder.fit_transform(df_test[f])
# 更改数据类型
df_train.Region_Code = df_train.Region_Code.astype('int32')
df_train.Policy_Sales_Channel = df_train.Policy_Sales_Channel.astype('int32')
df_test.Region_Code = df_test.Region_Code.astype('int32')
df_test.Policy_Sales_Channel = df_test.Policy_Sales_Channel.astype('int32')
# 对年龄按照年龄段进行编码
bins = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
df_train['bin_age'] = | pd.cut(df_train['Age'], bins) | pandas.cut |
import os.path
import pandas as pd
import numpy as np
import exporter
c_ids = []
c_types = []
c_names = []
c_comments = []
c_likes = []
c_replies = []
c_key_words = []
c_themes = []
stats = {}
PATH_TO_DATA_FILE = r'/src/data/instaComments.xlsx'
PATH_TO_GROUPINGS_FILE = r'/src/data/groupings.xlsx'
def initialize():
global i_df, g_df, g_df_columns
i_df = pd.read_excel(os.getcwd() + PATH_TO_DATA_FILE)
g_df = pd.read_excel(os.getcwd() + PATH_TO_GROUPINGS_FILE)
g_df.replace(r'\s+|^$', np.nan, regex=True)
g_df_columns = g_df.columns.values
for i in range(len(g_df_columns)):
stats.update({g_df_columns[i]: [0, 0, 0]})
def iterate_sheet():
for i_index, i_row in i_df.iterrows():
key_words = None
themes = None
likes = i_row['likes']
replies = i_row['replies']
comment = i_row["comment"]
for g_index, g_row in g_df.iterrows():
for i in range(len(g_df_columns)):
current_column_theme = g_df_columns[i]
current_column_word = g_row[g_df_columns[i]]
if | pd.isna(current_column_word) | pandas.isna |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.add,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = self._query_compiler.add(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pandas.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas(pandas.DataFrame.agg, arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas(pandas.DataFrame.agg, func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.align,
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def all(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def any(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pandas.Series, dict)):
if isinstance(other, dict):
other = pandas.Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = pandas.Index([other.name], name=self.index.name)
# Create a Modin DataFrame from this Series for ease of development
other = DataFrame(pandas.DataFrame(other).T, index=index)._query_compiler
elif isinstance(other, list):
if not isinstance(other[0], DataFrame):
other = pandas.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = DataFrame(other.loc[:, self.columns])._query_compiler
else:
other = DataFrame(other)._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = self.index.append(other.index)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def apply(
self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, string_types):
if axis == 1:
kwds["axis"] = axis
return getattr(self, func)(*args, **kwds)
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif is_list_like(func):
if axis == 1:
raise TypeError(
"(\"'list' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
elif not callable(func):
return
query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)
if isinstance(query_compiler, pandas.Series):
return query_compiler
return DataFrame(query_compiler=query_compiler)
def as_blocks(self, copy=True):
return self._default_to_pandas(pandas.DataFrame.as_blocks, copy=copy)
def as_matrix(self, columns=None):
"""Convert the frame to its Numpy-array representation.
Args:
columns: If None, return all columns, otherwise,
returns specified columns.
Returns:
values: ndarray
"""
# TODO this is very inefficient, also see __array__
return to_pandas(self).as_matrix(columns)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
return self._default_to_pandas(
pandas.DataFrame.asfreq,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def asof(self, where, subset=None):
return self._default_to_pandas(pandas.DataFrame.asof, where, subset=subset)
def assign(self, **kwargs):
return self._default_to_pandas(pandas.DataFrame.assign, **kwargs)
def astype(self, dtype, copy=True, errors="raise", **kwargs):
col_dtypes = {}
if isinstance(dtype, dict):
if not set(dtype.keys()).issubset(set(self.columns)) and errors == "raise":
raise KeyError(
"Only a column name can be used for the key in"
"a dtype mappings argument."
)
col_dtypes = dtype
else:
for column in self.columns:
col_dtypes[column] = dtype
new_query_compiler = self._query_compiler.astype(col_dtypes, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, not copy)
def at_time(self, time, asof=False):
return self._default_to_pandas(pandas.DataFrame.at_time, time, asof=asof)
def between_time(self, start_time, end_time, include_start=True, include_end=True):
return self._default_to_pandas(
pandas.DataFrame.between_time,
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')"""
new_df = self.fillna(
method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError(
"""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all()."""
)
else:
return to_pandas(self).bool()
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs
)
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
# validate inputs
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
if is_list_like(lower) or is_list_like(upper):
if axis is None:
raise ValueError("Must specify axis = 0 or 1")
self._validate_other(lower, axis)
self._validate_other(upper, axis)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = numpy_compat.function.validate_clip_with_axis(axis, args, kwargs)
# any np.nan bounds are treated as None
if lower is not None and np.any(np.isnan(lower)):
lower = None
if upper is not None and np.any(np.isnan(upper)):
upper = None
new_query_compiler = self._query_compiler.clip(
lower=lower, upper=upper, axis=axis, inplace=inplace, *args, **kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
return self.clip(lower=threshold, axis=axis, inplace=inplace)
def clip_upper(self, threshold, axis=None, inplace=False):
return self.clip(upper=threshold, axis=axis, inplace=inplace)
def combine(self, other, func, fill_value=None, overwrite=True):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.combine,
other,
func,
fill_value=fill_value,
overwrite=overwrite,
)
def combine_first(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.combine_first, other=other)
def compound(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.compound, axis=axis, skipna=skipna, level=level
)
def consolidate(self, inplace=False):
return self._default_to_pandas(pandas.DataFrame.consolidate, inplace=inplace)
def convert_objects(
self,
convert_dates=True,
convert_numeric=False,
convert_timedeltas=True,
copy=True,
):
return self._default_to_pandas(
pandas.DataFrame.convert_objects,
convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy,
)
def corr(self, method="pearson", min_periods=1):
return self._default_to_pandas(
pandas.DataFrame.corr, method=method, min_periods=min_periods
)
def corrwith(self, other, axis=0, drop=False):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop
)
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.count(
axis=axis, level=level, numeric_only=numeric_only
)
def cov(self, min_periods=None):
return self._default_to_pandas(pandas.DataFrame.cov, min_periods=min_periods)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummax(
axis=axis, skipna=skipna, **kwargs
)
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummin(
axis=axis, skipna=skipna, **kwargs
)
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumprod(
axis=axis, skipna=skipna, **kwargs
)
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumsum(
axis=axis, skipna=skipna, **kwargs
)
)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
if include is not None:
if not is_list_like(include):
include = [include]
include = [np.dtype(i) for i in include]
if exclude is not None:
if not is_list_like(include):
exclude = [exclude]
exclude = [np.dtype(e) for e in exclude]
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return DataFrame(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
def diff(self, periods=1, axis=0):
"""Finds the difference between elements on the axis requested
Args:
periods: Periods to shift for forming difference
axis: Take difference over rows or columns
Returns:
DataFrame with the diff applied
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.diff(periods=periods, axis=axis)
)
def div(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.div,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.div(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def divide(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for div.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self.div(other, axis, level, fill_value)
def dot(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.dot, other)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""Return new object with labels in requested axis removed.
Args:
labels: Index or column labels to drop.
axis: Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
level: For MultiIndex
inplace: If True, do operation inplace and return None.
errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
"""
# TODO implement level
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.drop,
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis = pandas.DataFrame()._get_axis_name(axis)
axes = {axis: labels}
elif index is not None or columns is not None:
axes, _ = pandas.DataFrame()._construct_axes_from_arguments(
(index, columns), {}
)
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
# TODO Clean up this error checking
if "index" not in axes:
axes["index"] = None
elif axes["index"] is not None:
if not is_list_like(axes["index"]):
axes["index"] = [axes["index"]]
if errors == "raise":
non_existant = [obj for obj in axes["index"] if obj not in self.index]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["index"] = [obj for obj in axes["index"] if obj in self.index]
# If the length is zero, we will just do nothing
if not len(axes["index"]):
axes["index"] = None
if "columns" not in axes:
axes["columns"] = None
elif axes["columns"] is not None:
if not is_list_like(axes["columns"]):
axes["columns"] = [axes["columns"]]
if errors == "raise":
non_existant = [
obj for obj in axes["columns"] if obj not in self.columns
]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["columns"] = [
obj for obj in axes["columns"] if obj in self.columns
]
# If the length is zero, we will just do nothing
if not len(axes["columns"]):
axes["columns"] = None
new_query_compiler = self._query_compiler.drop(
index=axes["index"], columns=axes["columns"]
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def drop_duplicates(self, subset=None, keep="first", inplace=False):
return self._default_to_pandas(
pandas.DataFrame.drop_duplicates, subset=subset, keep=keep, inplace=inplace
)
def duplicated(self, subset=None, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.duplicated, subset=subset, keep=keep
)
def eq(self, other, axis="columns", level=None):
"""Checks element-wise that this is equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the eq over.
level: The Multilevel index level to apply eq over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.eq, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.eq(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
if isinstance(other, pandas.DataFrame):
# Copy into a Ray DataFrame to simplify logic below
other = DataFrame(other)
if not self.index.equals(other.index) or not self.columns.equals(other.columns):
return False
return all(self.eq(other).all())
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
if isinstance(new_query_compiler, pandas.Series):
return new_query_compiler
else:
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
freq=None,
adjust=True,
ignore_na=False,
axis=0,
):
return self._default_to_pandas(
pandas.DataFrame.ewm,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
return self._default_to_pandas(
pandas.DataFrame.expanding,
min_periods=min_periods,
freq=freq,
center=center,
axis=axis,
)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='ffill')
"""
new_df = self.fillna(
method="ffill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
**kwargs
):
"""Fill NA/NaN values using the specified method.
Args:
value: Value to use to fill holes. This value cannot be a list.
method: Method to use for filling holes in reindexed Series pad.
ffill: propagate last valid observation forward to next valid
backfill.
bfill: use NEXT valid observation to fill gap.
axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
limit: If method is specified, this is the maximum number of
consecutive NaN values to forward/backward fill. In other
words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method
is not specified, this is the maximum number of entries along
the entire axis where NaNs will be filled. Must be greater
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
filled: DataFrame
"""
# TODO implement value passed as DataFrame
if isinstance(value, pandas.DataFrame) or isinstance(value, pandas.Series):
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.fillna,
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)._query_compiler
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__)
)
if value is None and method is None:
raise ValueError("must specify a fill method or value")
if value is not None and method is not None:
raise ValueError("cannot specify both a fill method and value")
if method is not None and method not in ["backfill", "bfill", "pad", "ffill"]:
expecting = "pad (ffill) or backfill (bfill)"
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
new_query_compiler = self._query_compiler.fillna(
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def filter(self, items=None, like=None, regex=None, axis=None):
"""Subset rows or columns based on their labels
Args:
items (list): list of labels to subset
like (string): retain labels where `arg in label == True`
regex (string): retain labels matching regex input
axis: axis to filter on
Returns:
A new DataFrame with the filter applied.
"""
nkw = com._count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if nkw == 0:
raise TypeError("Must pass either `items`, `like`, or `regex`")
if axis is None:
axis = "columns" # This is the default info axis for dataframes
axis = pandas.DataFrame()._get_axis_number(axis)
labels = self.columns if axis else self.index
if items is not None:
bool_arr = labels.isin(items)
elif like is not None:
def f(x):
return like in to_str(x)
bool_arr = labels.map(f).tolist()
else:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
bool_arr = labels.map(f).tolist()
if not axis:
return self[bool_arr]
return self[self.columns[bool_arr]]
def first(self, offset):
return self._default_to_pandas(pandas.DataFrame.first, offset)
def first_valid_index(self):
"""Return index for first non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.first_valid_index()
def floordiv(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.floordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.floordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
@classmethod
def from_csv(
cls,
path,
header=0,
sep=", ",
index_col=0,
parse_dates=True,
encoding=None,
tupleize_cols=None,
infer_datetime_format=False,
):
from .io import read_csv
return read_csv(
path,
header=header,
sep=sep,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format,
)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None):
ErrorMessage.default_to_pandas()
return from_pandas(pandas.DataFrame.from_dict(data, orient=orient, dtype=dtype))
@classmethod
def from_items(cls, items, columns=None, orient="columns"):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_items(items, columns=columns, orient=orient)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ge, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.ge(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
result = self.dtypes.value_counts()
result.index = result.index.map(lambda x: str(x))
return result
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return self.ftypes.value_counts().sort_index()
def get_value(self, index, col, takeable=False):
return self._default_to_pandas(
pandas.DataFrame.get_value, index, col, takeable=takeable
)
def get_values(self):
return self._default_to_pandas(pandas.DataFrame.get_values)
def gt(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.gt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.gt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def head(self, n=5):
"""Get the first n rows of the DataFrame.
Args:
n (int): The number of rows to return.
Returns:
A new DataFrame with the first n rows of the DataFrame.
"""
if n >= len(self.index):
return self.copy()
return DataFrame(query_compiler=self._query_compiler.head(n))
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwargs
)
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmax(axis=axis, skipna=skipna)
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmin(axis=axis, skipna=skipna)
def infer_objects(self):
return self._default_to_pandas(pandas.DataFrame.infer_objects)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
"""Print a concise summary of a DataFrame, which includes the index
dtype and column dtypes, non-null values and memory usage.
Args:
verbose (bool, optional): Whether to print the full summary. Defaults
to true
buf (writable buffer): Where to send output. Defaults to sys.stdout
max_cols (int, optional): When to switch from verbose to truncated
output. By defualt, this is 100.
memory_usage (bool, str, optional): Specifies whether the total memory
usage of the DataFrame elements (including index) should be displayed.
True always show memory usage. False never shows memory usage. A value
of 'deep' is equivalent to "True with deep introspection". Memory usage
is shown in human-readable units (base-2 representation). Without deep
introspection a memory estimation is made based in column dtype and
number of rows assuming values consume the same memory amount for
corresponding dtypes. With deep memory introspection, a real memory
usage calculation is performed at the cost of computational resources.
Defaults to True.
null_counts (bool, optional): Whetehr to show the non-null counts. By
default, this is shown only when the frame is smaller than 100 columns
and 1690785 rows. A value of True always shows the counts and False
never shows the counts.
Returns:
Prints the summary of a DataFrame and returns None.
"""
# We will default to pandas because it will be faster than doing two passes
# over the data
buf = sys.stdout if not buf else buf
import io
with io.StringIO() as tmp_buf:
self._default_to_pandas(
pandas.DataFrame.info,
verbose=verbose,
buf=tmp_buf,
max_cols=max_cols,
memory_usage=memory_usage,
null_counts=null_counts,
)
result = tmp_buf.getvalue()
result = result.replace(
"pandas.core.frame.DataFrame", "modin.pandas.dataframe.DataFrame"
)
buf.write(result)
return None
index = self.index
columns = self.columns
dtypes = self.dtypes
# Set up default values
verbose = True if verbose is None else verbose
buf = sys.stdout if not buf else buf
max_cols = 100 if not max_cols else max_cols
memory_usage = True if memory_usage is None else memory_usage
if not null_counts:
if len(columns) < 100 and len(index) < 1690785:
null_counts = True
else:
null_counts = False
# Determine if actually verbose
actually_verbose = True if verbose and max_cols > len(columns) else False
if type(memory_usage) == str and memory_usage == "deep":
memory_usage_deep = True
else:
memory_usage_deep = False
# Start putting together output
# Class denoted in info() output
class_string = "<class 'modin.pandas.dataframe.DataFrame'>\n"
# Create the Index info() string by parsing self.index
index_string = index.summary() + "\n"
if null_counts:
counts = self._query_compiler.count()
if memory_usage:
memory_usage_data = self._query_compiler.memory_usage(
deep=memory_usage_deep, index=True
)
if actually_verbose:
# Create string for verbose output
col_string = "Data columns (total {0} columns):\n".format(len(columns))
for col, dtype in zip(columns, dtypes):
col_string += "{0}\t".format(col)
if null_counts:
col_string += "{0} not-null ".format(counts[col])
col_string += "{0}\n".format(dtype)
else:
# Create string for not verbose output
col_string = "Columns: {0} entries, {1} to {2}\n".format(
len(columns), columns[0], columns[-1]
)
# A summary of the dtypes in the dataframe
dtypes_string = "dtypes: "
for dtype, count in dtypes.value_counts().iteritems():
dtypes_string += "{0}({1}),".format(dtype, count)
dtypes_string = dtypes_string[:-1] + "\n"
# Create memory usage string
memory_string = ""
if memory_usage:
if memory_usage_deep:
memory_string = "memory usage: {0} bytes".format(memory_usage_data)
else:
memory_string = "memory usage: {0}+ bytes".format(memory_usage_data)
# Combine all the components of the info() output
result = "".join(
[class_string, index_string, col_string, dtypes_string, memory_string]
)
# Write to specified output buffer
buf.write(result)
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if len(self.index) == 0:
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
else:
if not is_list_like(value):
value = np.full(len(self.index), value)
if not isinstance(value, pandas.Series) and len(value) != len(self.index):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
downcast=None,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
downcast=downcast,
**kwargs
)
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
index_iter = iter(self.index)
def iterrow_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.iterrows()
partition_iterator = PartitionIterator(self._query_compiler, 0, iterrow_builder)
for v in partition_iterator:
yield v
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
col_iter = iter(self.columns)
def items_builder(df):
df.columns = [next(col_iter)]
df.index = self.index
return df.items()
partition_iterator = PartitionIterator(self._query_compiler, 1, items_builder)
for v in partition_iterator:
yield v
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name="Pandas"):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
index_iter = iter(self.index)
def itertuples_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.itertuples(index=index, name=name)
partition_iterator = PartitionIterator(
self._query_compiler, 0, itertuples_builder
)
for v in partition_iterator:
yield v
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
"""Join two or more DataFrames, or a DataFrame with a collection.
Args:
other: What to join this DataFrame with.
on: A column name to use from the left for the join.
how: What type of join to conduct.
lsuffix: The suffix to add to column names that match on left.
rsuffix: The suffix to add to column names that match on right.
sort: Whether or not to sort.
Returns:
The joined DataFrame.
"""
if on is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.join,
other,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
if isinstance(other, pandas.Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
# Joining the empty DataFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
pandas.DataFrame(columns=self.columns).join(
pandas.DataFrame(columns=other.columns),
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
other._query_compiler,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
else:
# This constraint carried over from Pandas.
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
# See note above about error checking with an empty join.
pandas.DataFrame(columns=self.columns).join(
[pandas.DataFrame(columns=obj.columns) for obj in other],
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
[obj._query_compiler for obj in other],
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurt,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurtosis,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def last(self, offset):
return self._default_to_pandas(pandas.DataFrame.last, offset)
def last_valid_index(self):
"""Return index for last non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.last_valid_index()
def le(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the le over.
level: The Multilevel index level to apply le over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.le, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.le(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def lookup(self, row_labels, col_labels):
return self._default_to_pandas(pandas.DataFrame.lookup, row_labels, col_labels)
def lt(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the lt over.
level: The Multilevel index level to apply lt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.lt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.lt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mad(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.mad, axis=axis, skipna=skipna, level=level
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
raise_on_error=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mask,
cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
errors=errors,
try_cast=try_cast,
raise_on_error=raise_on_error,
)
def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.max(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes mean across the DataFrame.
Args:
axis (int): The axis to take the mean on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The mean of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.mean(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes median across the DataFrame.
Args:
axis (int): The axis to take the median on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The median of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
return self._query_compiler.median(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
return self._default_to_pandas(
pandas.DataFrame.melt,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
def memory_usage(self, index=True, deep=False):
"""Returns the memory usage of each column in bytes
Args:
index (bool): Whether to include the memory usage of the DataFrame's
index in returned Series. Defaults to True
deep (bool): If True, introspect the data deeply by interrogating
objects dtypes for system-level memory consumption. Defaults to False
Returns:
A Series where the index are the column names and the values are
the memory usage of each of the columns in bytes. If `index=true`,
then the first value of the Series will be 'Index' with its memory usage.
"""
result = self._query_compiler.memory_usage(index=index, deep=deep)
result.index = self.columns
if index:
index_value = self.index.memory_usage(deep=deep)
return pandas.Series(index_value, index=["Index"]).append(result)
return result
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""Database style join, where common columns in "on" are merged.
Args:
right: The DataFrame to merge against.
how: What type of join to use.
on: The common column name(s) to join on. If None, and left_on and
right_on are also None, will default to all commonly named
columns.
left_on: The column(s) on the left to use for the join.
right_on: The column(s) on the right to use for the join.
left_index: Use the index from the left as the join keys.
right_index: Use the index from the right as the join keys.
sort: Sort the join keys lexicographically in the result.
suffixes: Add this suffix to the common names not in the "on".
copy: Does nothing in our implementation
indicator: Adds a column named _merge to the DataFrame with
metadata from the merge about each row.
validate: Checks if merge is a specific type.
Returns:
A merged Dataframe
"""
if not isinstance(right, DataFrame):
raise ValueError(
"can not merge DataFrame with instance of type "
"{}".format(type(right))
)
if left_index is False or right_index is False:
if isinstance(right, DataFrame):
right = right._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.merge,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.min(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mod(self, other, axis="columns", level=None, fill_value=None):
"""Mods this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the mod against this.
axis: The axis to mod over.
level: The Multilevel index level to apply mod over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Mod applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mod,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mod(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mode(self, axis=0, numeric_only=False):
"""Perform mode across the DataFrame.
Args:
axis (int): The axis to take the mode on.
numeric_only (bool): if True, only apply to numeric columns.
Returns:
DataFrame: The mode of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.mode(
axis=axis, numeric_only=numeric_only
)
)
def mul(self, other, axis="columns", level=None, fill_value=None):
"""Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mul,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mul(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def multiply(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for mul.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
return self.mul(other, axis, level, fill_value)
def ne(self, other, axis="columns", level=None):
"""Checks element-wise that this is not equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the ne over.
level: The Multilevel index level to apply ne over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ne, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.ne(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def nlargest(self, n, columns, keep="first"):
return self._default_to_pandas(pandas.DataFrame.nlargest, n, columns, keep=keep)
def notna(self):
"""Perform notna across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notna())
def notnull(self):
"""Perform notnull across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notnull())
def nsmallest(self, n, columns, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.nsmallest, n, columns, keep=keep
)
def nunique(self, axis=0, dropna=True):
"""Return Series with number of distinct
observations over requested axis.
Args:
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Returns:
nunique : Series
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.nunique(axis=axis, dropna=dropna)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.pct_change,
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
**kwargs
)
def pipe(self, func, *args, **kwargs):
"""Apply func(self, *args, **kwargs)
Args:
func: function to apply to the df.
args: positional arguments passed into ``func``.
kwargs: a dictionary of keyword arguments passed into ``func``.
Returns:
object: the return type of ``func``.
"""
return com._pipe(self, func, *args, **kwargs)
def pivot(self, index=None, columns=None, values=None):
return self._default_to_pandas(
pandas.DataFrame.pivot, index=index, columns=columns, values=values
)
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
):
return self._default_to_pandas(
pandas.DataFrame.pivot_table,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
)
@property
def plot(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=False,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwargs
):
return to_pandas(self).plot
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
result = self[item]
del self[item]
return result
def pow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.pow,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.pow(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
prod : Series or DataFrame (if level specified)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)
return self._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def product(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
product : Series or DataFrame (if level specified)
"""
return self.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""Return values at the given quantile over requested axis,
a la numpy.percentile.
Args:
q (float): 0 <= q <= 1, the quantile(s) to compute
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
interpolation: {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specifies which interpolation method to use
Returns:
quantiles : Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the
values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values
are the quantiles.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
def check_dtype(t):
return is_numeric_dtype(t) or is_datetime_or_timedelta_dtype(t)
if not numeric_only:
# If not numeric_only and columns, then check all columns are either
# numeric, timestamp, or timedelta
if not axis and not all(check_dtype(t) for t in self.dtypes):
raise TypeError("can't multiply sequence by non-int of type 'float'")
# If over rows, then make sure that all dtypes are equal for not
# numeric_only
elif axis:
for i in range(1, len(self.dtypes)):
pre_dtype = self.dtypes[i - 1]
curr_dtype = self.dtypes[i]
if not is_dtype_equal(pre_dtype, curr_dtype):
raise TypeError(
"Cannot compare type '{0}' with type '{1}'".format(
pre_dtype, curr_dtype
)
)
else:
# Normally pandas returns this near the end of the quantile, but we
# can't afford the overhead of running the entire operation before
# we error.
if not any(is_numeric_dtype(t) for t in self.dtypes):
raise ValueError("need at least one array to concatenate")
# check that all qs are between 0 and 1
pandas.DataFrame()._check_percentile(q)
axis = pandas.DataFrame()._get_axis_number(axis)
if isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)):
return DataFrame(
query_compiler=self._query_compiler.quantile_for_list_of_values(
q=q,
axis=axis,
numeric_only=numeric_only,
interpolation=interpolation,
)
)
else:
return self._query_compiler.quantile_for_single_value(
q=q, axis=axis, numeric_only=numeric_only, interpolation=interpolation
)
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
ErrorMessage.non_verified_udf()
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.query(expr, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def radd(self, other, axis="columns", level=None, fill_value=None):
return self.add(other, axis, level, fill_value)
def rank(
self,
axis=0,
method="average",
numeric_only=None,
na_option="keep",
ascending=True,
pct=False,
):
"""
Compute numerical data ranks (1 through n) along axis.
Equal values are assigned a rank that is the [method] of
the ranks of those values.
Args:
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
method: {'average', 'min', 'max', 'first', 'dense'}
Specifies which method to use for equal vals
numeric_only (boolean)
Include only float, int, boolean data.
na_option: {'keep', 'top', 'bottom'}
Specifies how to handle NA options
ascending (boolean):
Decedes ranking order
pct (boolean):
Computes percentage ranking of data
Returns:
A new DataFrame
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.rank(
axis=axis,
method=method,
numeric_only=numeric_only,
na_option=na_option,
ascending=ascending,
pct=pct,
)
)
def rdiv(self, other, axis="columns", level=None, fill_value=None):
"""Div this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rdiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rdiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def reindex(
self,
labels=None,
index=None,
columns=None,
axis=None,
method=None,
copy=True,
level=None,
fill_value=np.nan,
limit=None,
tolerance=None,
):
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.reindex,
labels=labels,
index=index,
columns=columns,
axis=axis,
method=method,
copy=copy,
level=level,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis == 0 and labels is not None:
index = labels
elif labels is not None:
columns = labels
if index is not None:
new_query_compiler = self._query_compiler.reindex(
0,
index,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
else:
new_query_compiler = self._query_compiler
if columns is not None:
final_query_compiler = new_query_compiler.reindex(
1,
columns,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
else:
final_query_compiler = new_query_compiler
return self._create_dataframe_from_compiler(final_query_compiler, not copy)
def reindex_axis(
self,
labels,
axis=0,
method=None,
level=None,
copy=True,
limit=None,
fill_value=np.nan,
):
return self._default_to_pandas(
pandas.DataFrame.reindex_axis,
labels,
axis=axis,
method=method,
level=level,
copy=copy,
limit=limit,
fill_value=fill_value,
)
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.reindex_like,
other,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
def rename(
self,
mapper=None,
index=None,
columns=None,
axis=None,
copy=True,
inplace=False,
level=None,
):
"""Alters axes labels.
Args:
mapper, index, columns: Transformations to apply to the axis's
values.
axis: Axis to target with mapper.
copy: Also copy underlying data.
inplace: Whether to return a new DataFrame.
level: Only rename a specific level of a MultiIndex.
Returns:
If inplace is False, a new DataFrame with the updated axes.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# We have to do this with the args because of how rename handles
# kwargs. It doesn't ignore None values passed in, so we have to filter
# them ourselves.
args = locals()
kwargs = {k: v for k, v in args.items() if v is not None and k != "self"}
# inplace should always be true because this is just a copy, and we
# will use the results after.
kwargs["inplace"] = True
df_to_rename = pandas.DataFrame(index=self.index, columns=self.columns)
df_to_rename.rename(**kwargs)
if inplace:
obj = self
else:
obj = self.copy()
obj.index = df_to_rename.index
obj.columns = df_to_rename.columns
if not inplace:
return obj
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.name = mapper
else:
renamed.index.name = mapper
if not inplace:
return renamed
def _set_axis_name(self, name, axis=0, inplace=False):
"""Alter the name or names of the axis.
Args:
name: Name for the Index, or list of names for the MultiIndex
axis: 0 or 'index' for the index; 1 or 'columns' for the columns
inplace: Whether to modify `self` directly or return a copy
Returns:
Type of caller or None if inplace=True.
"""
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.set_names(name)
else:
renamed.index.set_names(name)
if not inplace:
return renamed
def reorder_levels(self, order, axis=0):
return self._default_to_pandas(
pandas.DataFrame.reorder_levels, order, axis=axis
)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return self._default_to_pandas(
pandas.DataFrame.replace,
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def resample(
self,
rule,
how=None,
axis=0,
fill_method=None,
closed=None,
label=None,
convention="start",
kind=None,
loffset=None,
limit=None,
base=0,
on=None,
level=None,
):
return self._default_to_pandas(
pandas.DataFrame.resample,
rule,
how=how,
axis=axis,
fill_method=fill_method,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
limit=limit,
base=base,
on=on,
level=level,
)
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into DataFrame columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# TODO Implement level
if level is not None:
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.reset_index,
level=level,
drop=drop,
inplace=inplace,
col_level=col_level,
col_fill=col_fill,
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
# Error checking for matching Pandas. Pandas does not allow you to
# insert a dropped index into a DataFrame if these columns already
# exist.
if (
not drop
and not isinstance(self.index, pandas.MultiIndex)
and all(n in self.columns for n in ["level_0", "index"])
):
raise ValueError("cannot insert level_0, already exists")
new_query_compiler = self._query_compiler.reset_index(drop=drop, level=level)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def rfloordiv(self, other, axis="columns", level=None, fill_value=None):
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rfloordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rfloordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rmod(self, other, axis="columns", level=None, fill_value=None):
"""Mod this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rmod,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rmod(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rmul(self, other, axis="columns", level=None, fill_value=None):
return self.mul(other, axis, level, fill_value)
def rolling(
self,
window,
min_periods=None,
freq=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
return self._default_to_pandas(
pandas.DataFrame.rolling,
window,
min_periods=min_periods,
freq=freq,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
def round(self, decimals=0, *args, **kwargs):
"""Round each element in the DataFrame.
Args:
decimals: The number of decimals to round to.
Returns:
A new DataFrame.
"""
return DataFrame(
query_compiler=self._query_compiler.round(decimals=decimals, **kwargs)
)
def rpow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rpow,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
# Check to make sure integers are not raised to negative integer powers
if (
is_integer_dtype(type(other))
and other < 0
and all(is_integer_dtype(t) for t in self.dtypes)
):
raise ValueError("Integers to negative integer powers are not allowed.")
new_query_compiler = self._query_compiler.rpow(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rsub(self, other, axis="columns", level=None, fill_value=None):
"""Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: The axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rsub,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_time_only=True)
new_query_compiler = self._query_compiler.rsub(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rtruediv(self, other, axis="columns", level=None, fill_value=None):
return self.truediv(other, axis, level, fill_value)
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
):
"""Returns a random sample of items from an axis of object.
Args:
n: Number of items from axis to return. Cannot be used with frac.
Default = 1 if frac = None.
frac: Fraction of axis items to return. Cannot be used with n.
replace: Sample with or without replacement. Default = False.
weights: Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index.
Index values in weights not found in sampled object will be
ignored and index values in sampled object not in weights will
be assigned weights of zero. If called on a DataFrame, will
accept the name of a column when axis = 0. Unless weights are
a Series, weights must be same length as axis being sampled.
If weights do not sum to 1, they will be normalized to sum
to 1. Missing values in the weights column will be treated as
zero. inf and -inf values not allowed.
random_state: Seed for the random number generator (if int), or
numpy RandomState object.
axis: Axis to sample. Accepts axis number or name.
Returns:
A new Dataframe
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
axis_labels = self.columns
axis_length = len(axis_labels)
else:
# Getting rows requires indices instead of labels. RangeIndex provides this.
axis_labels = pandas.RangeIndex(len(self.index))
axis_length = len(axis_labels)
if weights is not None:
# Index of the weights Series should correspond to the index of the
# Dataframe in order to sample
if isinstance(weights, pandas.Series):
weights = weights.reindex(self.axes[axis])
# If weights arg is a string, the weights used for sampling will
# the be values in the column corresponding to that string
if isinstance(weights, string_types):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a valid column")
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
weights = pandas.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# weights cannot be NaN when sampling, so we must set all nan
# values to 0
weights = weights.fillna(0)
# If passed in weights are not equal to 1, renormalize them
# otherwise numpy sampling function will error
weights_sum = weights.sum()
if weights_sum != 1:
if weights_sum != 0:
weights = weights / weights_sum
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
if n is None and frac is None:
# default to n = 1 if n and frac are both None (in accordance with
# Pandas specification)
n = 1
elif n is not None and frac is None and n % 1 != 0:
# n must be an integer
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
# compute the number of samples based on frac
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
# Pandas specification does not allow both n and frac to be passed
# in
raise ValueError("Please enter a value for `frac` OR `n`, not both")
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
if n == 0:
# An Empty DataFrame is returned if the number of samples is 0.
# The Empty Dataframe should have either columns or index specified
# depending on which axis is passed in.
return DataFrame(
columns=[] if axis == 1 else self.columns,
index=self.index if axis == 1 else [],
)
if random_state is not None:
# Get a random number generator depending on the type of
# random_state that is passed in
if isinstance(random_state, int):
random_num_gen = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.randomState):
random_num_gen = random_state
else:
# random_state must be an int or a numpy RandomState object
raise ValueError(
"Please enter an `int` OR a "
"np.random.RandomState for random_state"
)
# choose random numbers and then get corresponding labels from
# chosen axis
sample_indices = random_num_gen.choice(
np.arange(0, axis_length), size=n, replace=replace
)
samples = axis_labels[sample_indices]
else:
# randomly select labels from chosen axis
samples = np.random.choice(
a=axis_labels, size=n, replace=replace, p=weights
)
if axis:
query_compiler = self._query_compiler.getitem_column_array(samples)
return DataFrame(query_compiler=query_compiler)
else:
query_compiler = self._query_compiler.getitem_row_array(samples)
return DataFrame(query_compiler=query_compiler)
def select(self, crit, axis=0):
return self._default_to_pandas(pandas.DataFrame.select, crit, axis=axis)
def select_dtypes(self, include=None, exclude=None):
# Validates arguments for whether both include and exclude are None or
# if they are disjoint. Also invalidates string dtypes.
pandas.DataFrame().select_dtypes(include, exclude)
if include and not is_list_like(include):
include = [include]
elif not include:
include = []
if exclude and not is_list_like(exclude):
exclude = [exclude]
elif not exclude:
exclude = []
sel = tuple(map(set, (include, exclude)))
include, exclude = map(lambda x: set(map(_get_dtype_from_object, x)), sel)
include_these = pandas.Series(not bool(include), index=self.columns)
exclude_these = pandas.Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(
is_dtype_instance_mapper, self.dtypes.iteritems()
):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
indicate = [
i for i in range(len(dtype_indexer.values)) if not dtype_indexer.values[i]
]
return self.drop(columns=self.columns[indicate], inplace=False)
def sem(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
return self._default_to_pandas(
pandas.DataFrame.sem,
axis=axis,
skipna=skipna,
level=level,
ddof=ddof,
numeric_only=numeric_only,
**kwargs
)
def set_axis(self, labels, axis=0, inplace=None):
"""Assign desired index to given axis.
Args:
labels (pandas.Index or list-like): The Index to assign.
axis (string or int): The axis to reassign.
inplace (bool): Whether to make these modifications inplace.
Returns:
If inplace is False, returns a new DataFrame, otherwise None.
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and "labels" as second, is still supported '
"but will be deprecated in a future version of pandas.",
FutureWarning,
stacklevel=2,
)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
"set_axis currently defaults to operating inplace.\nThis "
"will change in a future version of pandas, use "
"inplace=True to avoid this warning.",
FutureWarning,
stacklevel=2,
)
inplace = True
if inplace:
setattr(self, pandas.DataFrame()._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
):
"""Set the DataFrame index using one or more existing columns.
Args:
keys: column label or list of column labels / arrays.
drop (boolean): Delete columns to be used as the new index.
append (boolean): Whether to append columns to existing index.
inplace (boolean): Modify the DataFrame in place.
verify_integrity (boolean): Check the new index for duplicates.
Otherwise defer the check until necessary. Setting to False
will improve the performance of this method
Returns:
If inplace is set to false returns a new DataFrame, otherwise None.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, pandas.MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, pandas.MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, pandas.Series):
level = col._values
names.append(col.name)
elif isinstance(col, pandas.Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, pandas.Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = _ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError("Index has duplicate keys: %s" % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def set_value(self, index, col, value, takeable=False):
return self._default_to_pandas(
pandas.DataFrame.set_values, index, col, value, takeable=takeable
)
def shift(self, periods=1, freq=None, axis=0):
return self._default_to_pandas(
pandas.DataFrame.shift, periods=periods, freq=freq, axis=axis
)
def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Return unbiased skew over requested axis Normalized by N-1
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
numeric_only : boolean, default None
Returns:
skew : Series or DataFrame (if level specified)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
return self._query_compiler.skew(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def slice_shift(self, periods=1, axis=0):
return self._default_to_pandas(
pandas.DataFrame.slice_shift, periods=periods, axis=axis
)
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
by=None,
):
"""Sort a DataFrame by one of the indices (columns or index).
Args:
axis: The axis to sort over.
level: The MultiIndex level to sort over.
ascending: Ascending or descending
inplace: Whether or not to update this DataFrame inplace.
kind: How to perform the sort.
na_position: Where to position NA on the sort.
sort_remaining: On Multilevel Index sort based on all levels.
by: (Deprecated) argument to pass to sort_values.
Returns:
A sorted DataFrame
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.sort_index,
axis=axis,
level=level,
ascending=ascending,
inplace=False,
kind=kind,
na_position=na_position,
sort_remaining=sort_remaining,
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
if by is not None:
warnings.warn(
"by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning,
stacklevel=2,
)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)
new_query_compiler = self._query_compiler.sort_index(
axis=axis, ascending=ascending, kind=kind, na_position=na_position
)
if inplace:
self._update_inplace(new_query_compiler=new_query_compiler)
else:
return DataFrame(query_compiler=new_query_compiler)
def sort_values(
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
):
"""Sorts by a column/row or list of columns/rows.
Args:
by: A list of labels for the axis to sort over.
axis: The axis to sort.
ascending: Sort in ascending or descending order.
inplace: If true, do the operation inplace.
kind: How to sort.
na_position: Where to put np.nan values.
Returns:
A sorted DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if not is_list_like(by):
by = [by]
# Currently, sort_values will just reindex based on the sorted values.
# TODO create a more efficient way to sort
if axis == 0:
broadcast_value_dict = {col: self[col] for col in by}
broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index)
new_index = broadcast_values.sort_values(
by=by,
axis=axis,
ascending=ascending,
kind=kind,
na_position=na_position,
).index
return self.reindex(index=new_index, copy=not inplace)
else:
broadcast_value_list = [
to_pandas(self[row :: len(self.index)]) for row in by
]
index_builder = list(zip(broadcast_value_list, by))
broadcast_values = pandas.concat(
[row for row, idx in index_builder], copy=False
)
broadcast_values.columns = self.columns
new_columns = broadcast_values.sort_values(
by=by,
axis=axis,
ascending=ascending,
kind=kind,
na_position=na_position,
).columns
return self.reindex(columns=new_columns, copy=not inplace)
def sortlevel(
self, level=0, axis=0, ascending=True, inplace=False, sort_remaining=True
):
return self._default_to_pandas(
pandas.DataFrame.sortlevel,
level=level,
axis=axis,
ascending=ascending,
inplace=inplace,
sort_remaining=sort_remaining,
)
def squeeze(self, axis=None):
# Checks for 1x1 DF, passes into squeeze with approproate ndim
if (
self._query_compiler.data.shape[0] == 1
and self._query_compiler.data.shape[1] == 1
):
return self._query_compiler.squeeze(0, axis)
# Checks for 1xN or Nx1 DF, passes into squeeze with appropriate ndim
elif 1 in self._query_compiler.data.shape:
return self._query_compiler.squeeze(1, axis)
# NxN DF, don't need to pass into squeeze
else:
return self.copy()
def stack(self, level=-1, dropna=True):
return self._default_to_pandas(
pandas.DataFrame.stack, level=level, dropna=dropna
)
def std(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
"""Computes standard deviation across the DataFrame.
Args:
axis (int): The axis to take the std on.
skipna (bool): True to skip NA values, false otherwise.
ddof (int): degrees of freedom
Returns:
The std of the DataFrame (Pandas Series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
return self._query_compiler.std(
axis=axis,
skipna=skipna,
level=level,
ddof=ddof,
numeric_only=numeric_only,
**kwargs
)
def sub(self, other, axis="columns", level=None, fill_value=None):
"""Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: The axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.sub,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_time_only=True)
new_query_compiler = self._query_compiler.sub(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def subtract(self, other, axis="columns", level=None, fill_value=None):
"""Alias for sub.
Args:
other: The object to use to apply the subtraction to this.
axis: THe axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied.
"""
return self.sub(other, axis, level, fill_value)
def swapaxes(self, axis1, axis2, copy=True):
return self._default_to_pandas(
pandas.DataFrame.swapaxes, axis1, axis2, copy=copy
)
def swaplevel(self, i=-2, j=-1, axis=0):
return self._default_to_pandas(pandas.DataFrame.swaplevel, i=i, j=j, axis=axis)
def tail(self, n=5):
"""Get the last n rows of the DataFrame.
Args:
n (int): The number of rows to return.
Returns:
A new DataFrame with the last n rows of this DataFrame.
"""
if n >= len(self.index):
return self.copy()
return DataFrame(query_compiler=self._query_compiler.tail(n))
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.take,
indices,
axis=axis,
convert=convert,
is_copy=is_copy,
**kwargs
)
def to_clipboard(self, excel=None, sep=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.to_clipboard, excel=excel, sep=sep, **kwargs
)
def to_csv(
self,
path_or_buf=None,
sep=",",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
mode="w",
encoding=None,
compression=None,
quoting=None,
quotechar='"',
line_terminator="\n",
chunksize=None,
tupleize_cols=None,
date_format=None,
doublequote=True,
escapechar=None,
decimal=".",
):
kwargs = {
"path_or_buf": path_or_buf,
"sep": sep,
"na_rep": na_rep,
"float_format": float_format,
"columns": columns,
"header": header,
"index": index,
"index_label": index_label,
"mode": mode,
"encoding": encoding,
"compression": compression,
"quoting": quoting,
"quotechar": quotechar,
"line_terminator": line_terminator,
"chunksize": chunksize,
"tupleize_cols": tupleize_cols,
"date_format": date_format,
"doublequote": doublequote,
"escapechar": escapechar,
"decimal": decimal,
}
return self._default_to_pandas(pandas.DataFrame.to_csv, **kwargs)
def to_dense(self):
return self._default_to_pandas(pandas.DataFrame.to_dense)
def to_dict(self, orient="dict", into=dict):
return self._default_to_pandas(
pandas.DataFrame.to_dict, orient=orient, into=into
)
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
):
return self._default_to_pandas(
pandas.DataFrame.to_excel,
excel_writer,
sheet_name,
na_rep,
float_format,
columns,
header,
index,
index_label,
startrow,
startcol,
engine,
merge_cells,
encoding,
inf_rep,
verbose,
freeze_panes,
)
def to_feather(self, fname):
return self._default_to_pandas(pandas.DataFrame.to_feather, fname)
def to_gbq(
self,
destination_table,
project_id,
chunksize=10000,
verbose=True,
reauth=False,
if_exists="fail",
private_key=None,
):
return self._default_to_pandas(
pandas.DataFrame.to_gbq,
destination_table,
project_id,
chunksize=chunksize,
verbose=verbose,
reauth=reauth,
if_exists=if_exists,
private_key=private_key,
)
def to_hdf(self, path_or_buf, key, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.to_hdf, path_or_buf, key, **kwargs
)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="np.NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
bold_rows=True,
classes=None,
escape=True,
max_rows=None,
max_cols=None,
show_dimensions=False,
notebook=False,
decimal=".",
border=None,
):
return self._default_to_pandas(
pandas.DataFrame.to_html,
buf,
columns,
col_space,
header,
index,
na_rep,
formatters,
float_format,
sparsify,
index_names,
justify,
bold_rows,
classes,
escape,
max_rows,
max_cols,
show_dimensions,
notebook,
decimal,
border,
)
def to_json(
self,
path_or_buf=None,
orient=None,
date_format=None,
double_precision=10,
force_ascii=True,
date_unit="ms",
default_handler=None,
lines=False,
compression=None,
):
return self._default_to_pandas(
pandas.DataFrame.to_json,
path_or_buf,
orient,
date_format,
double_precision,
force_ascii,
date_unit,
default_handler,
lines,
compression,
)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="np.NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
):
return self._default_to_pandas(
pandas.DataFrame.to_latex,
buf=buf,
columns=columns,
col_space=col_space,
header=header,
index=index,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
index_names=index_names,
bold_rows=bold_rows,
column_format=column_format,
longtable=longtable,
escape=escape,
encoding=encoding,
decimal=decimal,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
)
def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs):
return self._default_to_pandas(
pandas.DataFrame.to_msgpack,
path_or_buf=path_or_buf,
encoding=encoding,
**kwargs
)
def to_panel(self):
return self._default_to_pandas(pandas.DataFrame.to_panel)
def to_parquet(self, fname, engine="auto", compression="snappy", **kwargs):
return self._default_to_pandas(
pandas.DataFrame.to_parquet,
fname,
engine=engine,
compression=compression,
**kwargs
)
def to_period(self, freq=None, axis=0, copy=True):
return self._default_to_pandas(
pandas.DataFrame.to_period, freq=freq, axis=axis, copy=copy
)
def to_pickle(self, path, compression="infer", protocol=pkl.HIGHEST_PROTOCOL):
return self._default_to_pandas(
pandas.DataFrame.to_pickle, path, compression=compression, protocol=protocol
)
def to_records(self, index=True, convert_datetime64=True):
return self._default_to_pandas(
pandas.DataFrame.to_records,
index=index,
convert_datetime64=convert_datetime64,
)
def to_sparse(self, fill_value=None, kind="block"):
return self._default_to_pandas(
pandas.DataFrame.to_sparse, fill_value=fill_value, kind=kind
)
def to_sql(
self,
name,
con,
flavor=None,
schema=None,
if_exists="fail",
index=True,
index_label=None,
chunksize=None,
dtype=None,
):
return self._default_to_pandas(
pandas.DataFrame.to_sql,
name,
con,
flavor,
schema,
if_exists,
index,
index_label,
chunksize,
dtype,
)
def to_stata(
self,
fname,
convert_dates=None,
write_index=True,
encoding="latin-1",
byteorder=None,
time_stamp=None,
data_label=None,
variable_labels=None,
):
return self._default_to_pandas(
pandas.DataFrame.to_stata,
fname,
convert_dates,
write_index,
encoding,
byteorder,
time_stamp,
data_label,
variable_labels,
)
def to_string(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="np.NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
line_width=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
):
return self._default_to_pandas(
pandas.DataFrame.to_string,
buf=buf,
columns=columns,
col_space=col_space,
header=header,
index=index,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
index_names=index_names,
justify=justify,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
)
def to_timestamp(self, freq=None, how="start", axis=0, copy=True):
return self._default_to_pandas(
pandas.DataFrame.to_timestamp, freq=freq, how=how, axis=axis, copy=copy
)
def to_xarray(self):
return self._default_to_pandas(pandas.DataFrame.to_xarray)
def transform(self, func, *args, **kwargs):
kwargs["is_transform"] = True
result = self.agg(func, *args, **kwargs)
try:
result.columns = self.columns
result.index = self.index
except ValueError:
raise ValueError("transforms cannot produce aggregated results")
return result
def truediv(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.truediv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.truediv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def truncate(self, before=None, after=None, axis=None, copy=True):
return self._default_to_pandas(
pandas.DataFrame.truncate, before=before, after=after, axis=axis, copy=copy
)
def tshift(self, periods=1, freq=None, axis=0):
return self._default_to_pandas(
pandas.DataFrame.tshift, periods=periods, freq=freq, axis=axis
)
def tz_convert(self, tz, axis=0, level=None, copy=True):
return self._default_to_pandas(
pandas.DataFrame.tz_convert, tz, axis=axis, level=level, copy=copy
)
def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous="raise"):
return self._default_to_pandas(
pandas.DataFrame.tz_localize,
tz,
axis=axis,
level=level,
copy=copy,
ambiguous=ambiguous,
)
def unstack(self, level=-1, fill_value=None):
return self._default_to_pandas(
pandas.DataFrame.unstack, level=level, fill_value=fill_value
)
def update(
self, other, join="left", overwrite=True, filter_func=None, raise_conflict=False
):
"""Modify DataFrame in place using non-NA values from other.
Args:
other: DataFrame, or object coercible into a DataFrame
join: {'left'}, default 'left'
overwrite: If True then overwrite values for common keys in frame
filter_func: Can choose to replace values other than NA.
raise_conflict: If True, will raise an error if the DataFrame and
other both contain data in the same place.
Returns:
None
"""
if raise_conflict:
return self._default_to_pandas(
pandas.DataFrame.update,
other,
join=join,
overwrite=overwrite,
filter_func=filter_func,
raise_conflict=raise_conflict,
)
if not isinstance(other, DataFrame):
other = DataFrame(other)
query_compiler = self._query_compiler.update(
other._query_compiler,
join=join,
overwrite=overwrite,
filter_func=filter_func,
raise_conflict=raise_conflict,
)
self._update_inplace(new_query_compiler=query_compiler)
def var(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
"""Computes variance across the DataFrame.
Args:
axis (int): The axis to take the variance on.
skipna (bool): True to skip NA values, false otherwise.
ddof (int): degrees of freedom
Returns:
The variance of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
return self._query_compiler.var(
axis=axis,
skipna=skipna,
level=level,
ddof=ddof,
numeric_only=numeric_only,
**kwargs
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
raise_on_error=None,
):
"""Replaces values not meeting condition with values in other.
Args:
cond: A condition to be met, can be callable, array-like or a
DataFrame.
other: A value or DataFrame of values to use for setting this.
inplace: Whether or not to operate inplace.
axis: The axis to apply over. Only valid when a Series is passed
as other.
level: The MultiLevel index level to apply over.
errors: Whether or not to raise errors. Does nothing in Pandas.
try_cast: Try to cast the result back to the input type.
raise_on_error: Whether to raise invalid datatypes (deprecated).
Returns:
A new DataFrame with the replaced values.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(other, pandas.Series) and axis is None:
raise ValueError("Must specify axis=0 or 1")
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
if isinstance(cond, DataFrame):
cond = cond._query_compiler.to_pandas()
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.where,
cond,
other=other,
inplace=False,
axis=axis,
level=level,
errors=errors,
try_cast=try_cast,
raise_on_error=raise_on_error,
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
cond = cond(self) if callable(cond) else cond
if not isinstance(cond, DataFrame):
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = DataFrame(cond, index=self.index, columns=self.columns)
if isinstance(other, DataFrame):
other = other._query_compiler
elif isinstance(other, pandas.Series):
other = other.reindex(self.index if not axis else self.columns)
else:
index = self.index if not axis else self.columns
other = pandas.Series(other, index=index)
query_compiler = self._query_compiler.where(
cond._query_compiler, other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(query_compiler, inplace)
def xs(self, key, axis=0, level=None, drop_level=True):
return self._default_to_pandas(
pandas.DataFrame.xs, key, axis=axis, level=level, drop_level=drop_level
)
def __getitem__(self, key):
"""Get the column specified by key for this DataFrame.
Args:
key : The column name.
Returns:
A Pandas Series representing the value for the column.
"""
key = com._apply_if_callable(key, self)
# Shortcut if key is an actual column
is_mi_columns = isinstance(self.columns, pandas.MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except (KeyError, ValueError, TypeError):
pass
# see if we can slice the rows
# This lets us reuse code in Pandas to error check
indexer = convert_to_index_sliceable(pandas.DataFrame(index=self.index), key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (pandas.Series, np.ndarray, pandas.Index, list)):
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self.where(key)
elif is_mi_columns:
return self._default_to_pandas(pandas.DataFrame.__getitem__, key)
# return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
return SeriesView(
self._query_compiler.getitem_single_key(key), self, (slice(None), key)
)
def _getitem_array(self, key):
if com.is_bool_indexer(key):
if isinstance(key, pandas.Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
PendingDeprecationWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
"Item wrong length {} instead of {}.".format(
len(key), len(self.index)
)
)
key = check_bool_indexer(self.index, key)
# We convert to a RangeIndex because getitem_row_array is expecting a list
# of indices, and RangeIndex will give us the exact indices of each boolean
# requested.
key = pandas.RangeIndex(len(self.index))[key]
return DataFrame(query_compiler=self._query_compiler.getitem_row_array(key))
else:
if any(k not in self.columns for k in key):
raise KeyError(
"{} not index".format(
str([k for k in key if k not in self.columns]).replace(",", "")
)
)
return DataFrame(
query_compiler=self._query_compiler.getitem_column_array(key)
)
def _getitem_slice(self, key):
# We convert to a RangeIndex because getitem_row_array is expecting a list
# of indices, and RangeIndex will give us the exact indices of each boolean
# requested.
key = pandas.RangeIndex(len(self.index))[key]
return DataFrame(query_compiler=self._query_compiler.getitem_row_array(key))
def __getattr__(self, key):
"""After regular attribute access, looks up the name in the columns
Args:
key (str): Attribute name.
Returns:
The value of the attribute.
"""
try:
return object.__getattribute__(self, key)
except AttributeError as e:
if key in self.columns:
return self[key]
raise e
def __setitem__(self, key, value):
if not isinstance(key, str):
return self._default_to_pandas(pandas.DataFrame.__setitem__, key, value)
if key not in self.columns:
self.insert(loc=len(self.columns), column=key, value=value)
else:
loc = self.columns.get_loc(key)
self.__delitem__(key)
self.insert(loc=loc, column=key, value=value)
def __len__(self):
"""Gets the length of the DataFrame.
Returns:
Returns an integer length of the DataFrame object.
"""
return len(self.index)
def __unicode__(self):
return self._default_to_pandas(pandas.DataFrame.__unicode__)
def __invert__(self):
return self._default_to_pandas(pandas.DataFrame.__invert__)
def __hash__(self):
return self._default_to_pandas(pandas.DataFrame.__hash__)
def __iter__(self):
"""Iterate over the columns
Returns:
An Iterator over the columns of the DataFrame.
"""
return iter(self.columns)
def __contains__(self, key):
"""Searches columns for specific key
Args:
key : The column name
Returns:
Returns a boolean if the specified key exists as a column name
"""
return self.columns.__contains__(key)
def __nonzero__(self):
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(
self.__class__.__name__
)
)
__bool__ = __nonzero__
def __abs__(self):
"""Creates a modified DataFrame by taking the absolute value.
Returns:
A modified DataFrame
"""
return self.abs()
def __round__(self, decimals=0):
return self._default_to_pandas(pandas.DataFrame.__round__, decimals=decimals)
def __array__(self, dtype=None):
# TODO: This is very inefficient and needs fix, also see as_matrix
return to_pandas(self).__array__(dtype=dtype)
def __array_wrap__(self, result, context=None):
# TODO: This is very inefficient, see also __array__ and as_matrix
return to_pandas(self).__array_wrap__(result, context=context)
def __getstate__(self):
return self._default_to_pandas(pandas.DataFrame.__getstate__)
def __setstate__(self, state):
return self._default_to_pandas(pandas.DataFrame.__setstate__, state)
def __delitem__(self, key):
"""Delete a column by key. `del a[key]` for example.
Operation happens in place.
Notes: This operation happen on row and column partition
simultaneously. No rebuild.
Args:
key: key to delete
"""
if key not in self:
raise KeyError(key)
self._update_inplace(new_query_compiler=self._query_compiler.delitem(key))
def __finalize__(self, other, method=None, **kwargs):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.__finalize__, other, method=method, **kwargs
)
def __copy__(self, deep=True):
"""Make a copy using modin.DataFrame.copy method
Args:
deep: Boolean, deep copy or not.
Currently we do not support deep copy.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
"""Make a -deep- copy using modin.DataFrame.copy method
This is equivalent to copy(deep=True).
Args:
memo: No effect. Just to comply with Pandas API.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=True)
def __and__(self, other):
return self.__bool__() and other
def __or__(self, other):
return self.__bool__() or other
def __xor__(self, other):
return self.__bool__() ^ other
def __lt__(self, other):
return self.lt(other)
def __le__(self, other):
return self.le(other)
def __gt__(self, other):
return self.gt(other)
def __ge__(self, other):
return self.ge(other)
def __eq__(self, other):
return self.eq(other)
def __ne__(self, other):
return self.ne(other)
def __add__(self, other):
return self.add(other)
def __iadd__(self, other):
return self.add(other)
def __radd__(self, other, axis="columns", level=None, fill_value=None):
return self.radd(other, axis, level, fill_value)
def __mul__(self, other):
return self.mul(other)
def __imul__(self, other):
return self.mul(other)
def __rmul__(self, other, axis="columns", level=None, fill_value=None):
return self.rmul(other, axis, level, fill_value)
def __pow__(self, other):
return self.pow(other)
def __ipow__(self, other):
return self.pow(other)
def __rpow__(self, other, axis="columns", level=None, fill_value=None):
return self.rpow(other, axis, level, fill_value)
def __sub__(self, other):
return self.sub(other)
def __isub__(self, other):
return self.sub(other)
def __rsub__(self, other, axis="columns", level=None, fill_value=None):
return self.rsub(other, axis, level, fill_value)
def __floordiv__(self, other):
return self.floordiv(other)
def __ifloordiv__(self, other):
return self.floordiv(other)
def __rfloordiv__(self, other, axis="columns", level=None, fill_value=None):
return self.rfloordiv(other, axis, level, fill_value)
def __truediv__(self, other):
return self.truediv(other)
def __itruediv__(self, other):
return self.truediv(other)
def __rtruediv__(self, other, axis="columns", level=None, fill_value=None):
return self.rtruediv(other, axis, level, fill_value)
def __mod__(self, other):
return self.mod(other)
def __imod__(self, other):
return self.mod(other)
def __rmod__(self, other, axis="columns", level=None, fill_value=None):
return self.rmod(other, axis, level, fill_value)
def __div__(self, other, axis="columns", level=None, fill_value=None):
return self.div(other, axis, level, fill_value)
def __rdiv__(self, other, axis="columns", level=None, fill_value=None):
return self.rdiv(other, axis, level, fill_value)
def __neg__(self):
"""Computes an element wise negative DataFrame
Returns:
A modified DataFrame where every element is the negation of before
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.negative())
def __sizeof__(self):
return self._default_to_pandas(pandas.DataFrame.__sizeof__)
@property
def __doc__(self):
return self._query_compiler.to_pandas().__doc__
@property
def blocks(self):
return self._query_compiler.to_pandas().blocks
@property
def style(self):
return self._query_compiler.to_pandas().style
@property
def iat(self, axis=None):
from .indexing import _iLocIndexer
return _iLocIndexer(self)
@property
def loc(self):
"""Purely label-location based indexer for selection by label.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
from .indexing import _LocIndexer
return _LocIndexer(self)
@property
def is_copy(self):
return self._query_compiler.to_pandas().is_copy
@property
def at(self, axis=None):
from .indexing import _LocIndexer
return _LocIndexer(self)
@property
def ix(self, axis=None):
raise ErrorMessage.not_implemented("ix is not implemented.")
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
from .indexing import _iLocIndexer
return _iLocIndexer(self)
def _create_dataframe_from_compiler(self, new_query_compiler, inplace=False):
"""Returns or updates a DataFrame given new query_compiler"""
assert (
isinstance(new_query_compiler, type(self._query_compiler))
or type(new_query_compiler) in self._query_compiler.__class__.__bases__
), "Invalid Query Compiler object: {}".format(type(new_query_compiler))
if not inplace:
return DataFrame(query_compiler=new_query_compiler)
else:
self._update_inplace(new_query_compiler=new_query_compiler)
def _validate_other(
self,
other,
axis,
numeric_only=False,
numeric_or_time_only=False,
numeric_or_object_only=False,
comparison_dtypes_only=False,
):
"""Helper method to check validity of other in inter-df operations"""
axis = pandas.DataFrame()._get_axis_number(axis)
result = other
if isinstance(other, DataFrame):
return other._query_compiler
elif is_list_like(other):
other_dtypes = [type(x) for x in other]
if axis == 0:
if len(other) != len(self.index):
raise ValueError(
"Unable to coerce to Series, length must be {0}: "
"given {1}".format(len(self.index), len(other))
)
else:
if len(other) != len(self.columns):
raise ValueError(
"Unable to coerce to Series, length must be {0}: "
"given {1}".format(len(self.columns), len(other))
)
else:
other_dtypes = [
type(other)
for _ in range(len(self.index) if axis else len(self.columns))
]
# Do dtype checking
if numeric_only:
if not all(
is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)
for self_dtype, other_dtype in zip(self.dtypes, other_dtypes)
):
raise TypeError("Cannot do operation on non-numeric dtypes")
elif numeric_or_object_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype))
for self_dtype, other_dtype in zip(self.dtypes, other_dtypes)
):
raise TypeError("Cannot do operation non-numeric dtypes")
elif comparison_dtypes_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
or is_dtype_equal(self_dtype, other_dtype)
for self_dtype, other_dtype in zip(self.dtypes, other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
elif numeric_or_time_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
for self_dtype, other_dtype in zip(self.dtypes, other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
return result
def _validate_dtypes(self, numeric_only=False):
"""Helper method to check that all the dtypes are the same"""
dtype = self.dtypes[0]
for t in self.dtypes:
if numeric_only and not is_numeric_dtype(t):
raise TypeError("{0} is not a numeric data type".format(t))
elif not numeric_only and t != dtype:
raise TypeError(
"Cannot compare type '{0}' with type '{1}'".format(t, dtype)
)
def _validate_dtypes_min_max(self, axis, numeric_only):
# If our DataFrame has both numeric and non-numeric dtypes then
# comparisons between these types do not make sense and we must raise a
# TypeError. The exception to this rule is when there are datetime and
# timedelta objects, in which case we proceed with the comparison
# without ignoring any non-numeric types. We must check explicitly if
# numeric_only is False because if it is None, it will default to True
# if the operation fails with mixed dtypes.
if (
axis
and numeric_only is False
and np.unique([ | is_numeric_dtype(dtype) | pandas.core.dtypes.common.is_numeric_dtype |
# Copyright (c) 2021, <NAME>, All rights reserved.
import copy
from pprint import pprint
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from data.based.transformers_enums import TransformersType
np.seterr(all='warn')
class BasedAnalyzer:
def __init__(self, dataset, cfg):
self._dataset = dataset
self._df = dataset.df
def head(self):
self.df.head()
def description(self, col=None):
if self.dataset.dataset_description_file is not None and col is None:
print("--------------- about dataset -----------------")
print(self.dataset.about)
print('\n')
print("--------------- dataframe info ----------------")
pprint(self.info())
print('\n')
print("---------- dataframe description --------------")
pprint(self.describe_dataframe())
print('\n')
if col is None:
print("--------------- dataframe info ----------------")
pprint(self.info())
print('\n')
print("---------- dataframe description --------------")
pprint(self.describe_dataframe())
print('\n')
print("--------------- dataframe columns -----------------")
pprint(self.df.columns)
print('\n')
print("--------------- nan Values -----------------")
print(self.missing_values().head(20))
print('\n')
else:
print("--------------- nan Values of {} -----------------".format(col))
print(self.missing_values(col=col))
print('\n')
if col is None:
print("--------------- duplicates -----------------")
else:
print("--------------- duplicates of {} -----------------".format(col))
print('Total number of duplicates: ', self.duplicates(col))
print('\n')
if col is None:
print("------ Numerical/Categorical Features ------")
print('Numerical Features: {}'.format(self.dataset.numerical_features()))
print('number of Numerical Features: {}'.format(self.dataset.numerical_features().__len__()))
print('Categorical Features: {}'.format(self.dataset.categorical_features()))
print('number of Categorical Features: {}'.format(self.dataset.categorical_features().__len__()))
print('\n')
if col is None:
print("--------------- skew & kurt -----------------")
else:
print("--------------- skew & kurt of {} -----------------".format(col))
print('calculate skewness and kurtosis of numerical features')
print(self.skew_kurt(col=col))
# print(
# '\n* skewness is a measure of the asymmetry of the probability distribution of a real-valued random variable '
# 'about its mean. \nnegative skew commonly indicates that the tail is on the left side of the distribution, '
# 'and positive skew indicates that the tail is on the right.\n ')
# print('* kurtosis is a measure of the "tailedness" of the probability distribution of a real-valued random '
# 'variable. Like skewness,\n kurtosis describes the shape of a probability distribution and there are '
# 'different ways of quantifying it for a theoretical distribution \nand corresponding ways of estimating '
# 'it from a sample from a population.')
print('\n')
if col is None:
print("----------------- quantiles -----------------")
else:
print("--------------- quantiles of {} -----------------".format(col))
print(self.quantiles(col=col))
print('\n')
if col is None:
print("----------------- is target balanced? -----------------")
print(self.count_by(col=self.target_col))
print('\n')
else:
print("----------------- Top 15 values in column of {} -----------------".format(col))
print(self.count_by(col=col).head(15))
print('\n')
def count_by(self, col):
new_df = self.df[col].value_counts().sort_values(ascending=False).reset_index()
new_df.columns = ['value', 'counts']
return new_df
def missing_values(self, col=None):
if col is None:
total = self.df.isnull().sum().sort_values(ascending=False)
percent = (self.df.isnull().sum() / self.df.isnull().count()).sort_values(ascending=False)
missing_data = | pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) | pandas.concat |
import json
import numpy
import pandas
from flair.embeddings import WordEmbeddings, FlairEmbeddings
from flair.embeddings import DocumentPoolEmbeddings, DocumentRNNEmbeddings, Sentence
with open('./data/params.json', 'r') as param_:
param = json.load(param_)
# in_data = pandas.read_excel(param['data']['opened'])
in_data = pandas.read_csv(param['data']['opened'], sep=';')
array = in_data[param['data']['text']].values
layers = param['model']['models']
#layers_source = param['model']['sources']
word_models = ['glove', 'turian', 'extvec', 'crawl', 'news', 'twitter', 'en-wiki', 'en-crawl']
flair_models = ['en-forward', 'en-backward', 'news-forward', 'news-backward', 'mix-forward', 'mix-backward']
for j in range(len(layers)):
if (layers[j] not in word_models) and (layers[j] not in flair_models):
raise KeyError("Not Enough Minerals")
if layers[j] in word_models:
layers[j] = WordEmbeddings(layers[j])
if layers[j] in flair_models:
layers[j] = FlairEmbeddings(layers[j])
embedding = None
if param['model']['agg'] == 'pooling':
# TODO: check if kwargs work
embedding = DocumentPoolEmbeddings(layers, **param['model']['options'])
if param['model']['agg'] == 'rnn':
# TODO: check if kwargs work
embedding = DocumentRNNEmbeddings(layers, **param['model']['options'])
if embedding is None:
raise KeyError("Insufficient vespine gas")
result = []
for x in array:
sentence = Sentence(x)
embedding.embed(sentence)
result.append(sentence.embedding.detach().numpy().reshape(1, -1))
result = numpy.concatenate(result, axis=0)
print('saving')
# pandas.DataFrame(result).to_excel('./data/gained.xlsx', index=False)
if 'code' in param:
code_ = param['model']['code'] + '_'
else:
code_ = ''
columns = ['E_FLR_{}{}'.format(code_, j) for j in range(result.shape[1])]
# pandas.DataFrame(data=result, columns=columns).to_excel(param['data']['closed'], index=False)
| pandas.DataFrame(data=result, columns=columns) | pandas.DataFrame |
from __init__ import OUTPUT
print("run _chemprop.sh in chemprop conda environment")
print("run _grover.sh in grover conda environment")
import pandas as pd
import numpy as np
import os
ROOT = os.path.dirname(os.path.abspath(__file__))
print("DEEP LEARNING PREDICTIONS")
df = pd.read_csv(os.path.join(OUTPUT, "data_12.csv"))
print(df.shape)
cp = pd.read_csv(os.path.join(ROOT, "_pred_chemprop.csv"))
gr = pd.read_csv(os.path.join(ROOT, "_pred_grover.csv"))
df = | pd.concat([df, cp, gr], axis=1) | pandas.concat |
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import os
from datetime import datetime, timedelta
import unittest
import numpy as np
import pandas as pd
from validation.scenario_generator import generate_scenario, get_raw_data, NPI_COLUMNS, MIN_NPIS, MAX_NPIS
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_PATH = os.path.join(ROOT_DIR, 'fixtures')
DATA_FILE = os.path.join(FIXTURES_PATH, "OxCGRT_latest.csv")
# Sets each NPI to level 1
ONE_NPIS = [1] * len(NPI_COLUMNS)
DATE_FORMAT = "%Y-%m-%d"
INCEPTION_DATE = "2020-01-01"
class TestScenarioGenerator(unittest.TestCase):
"""
Tests generating different NPI scenarios.
Definitions:
I = inception date = 2020-01-01 (earliest available data)
LK = last known date (for each country) in the latest available data
S = start date of the scenario
E = end date of the scenario
Time wise, the following kind of scenarios can be applied:
1. Counterfactuals: I____S_____E____LK where E can equal LK
2. Future: I____S_____LK___E where S can equal LK
3. Mind the gap: I____LK S____E
For each case, we check each type of scenario: freeze, MIN, MAX, custom
Scenarios can be applied to: 1 country, several countries, all countries
"""
@classmethod
def setUpClass(cls):
# Load the csv data only once
cls.latest_df = get_raw_data(DATA_FILE, latest=True)
def test_generate_scenario_counterfactual_freeze(self):
# Simulate Italy did not enter full lockdown on Mar 20, but instead waited 1 week before changing its NPIs
before_day = pd.to_datetime("2020-03-19", format=DATE_FORMAT)
frozen_npis_df = self.latest_df[(self.latest_df.CountryName == "Italy") &
(self.latest_df.Date == before_day)][NPI_COLUMNS].reset_index(drop=True)
frozen_npis = list(frozen_npis_df.values[0])
self._check_counterfactual("Freeze", frozen_npis)
def test_generate_scenario_counterfactual_min(self):
# Simulate Italy lifted all NPIs for a period
self._check_counterfactual("MIN", MIN_NPIS)
def test_generate_scenario_counterfactual_max(self):
# Simulate Italy maxed out all NPIs for a period
self._check_counterfactual("MAX", MAX_NPIS)
def test_generate_scenario_counterfactual_custom(self):
# Simulate Italy used custom NPIs for a period: each NPI set to 1 for 7 consecutive days
scenario = [ONE_NPIS] * 7
self._check_counterfactual(scenario, scenario[0])
def _check_counterfactual(self, scenario, scenario_npis):
# Simulate Italy lifted all NPI for this period
start_date_str = "2020-03-20"
end_date_str = "2020-03-26"
countries = ["Italy"]
scenario_df = generate_scenario(start_date_str, end_date_str, self.latest_df, countries, scenario=scenario)
self.assertIsNotNone(scenario_df)
# Misleading name but checks the elements, regardless of order
self.assertCountEqual(countries, scenario_df.CountryName.unique(), "Not the requested countries")
self.assertFalse(scenario_df["Date"].duplicated().any(), "Expected 1 row per date only")
start_date = pd.to_datetime(start_date_str, format=DATE_FORMAT)
end_date = pd.to_datetime(end_date_str, format=DATE_FORMAT)
before_day = start_date - np.timedelta64(1, 'D')
before_day_npis = scenario_df[scenario_df.Date == before_day][NPI_COLUMNS].reset_index(drop=True)
before_day_npis_truth = self.latest_df[(self.latest_df.CountryName == "Italy") &
(self.latest_df.Date == before_day)][NPI_COLUMNS].reset_index(drop=True)
# Check the day before the scenario is correct
pd.testing.assert_frame_equal(before_day_npis_truth, before_day_npis, "Not the expected frozen NPIs")
# For the right period (+1 to include start and end date)
nb_days = (end_date - start_date).days + 1
for i in range(nb_days):
check_day = start_date + np.timedelta64(i, 'D')
check_day_npis_df = scenario_df[scenario_df.Date == check_day][NPI_COLUMNS].reset_index(drop=True)
check_day_npis = list(check_day_npis_df.values[0])
self.assertListEqual(scenario_npis, check_day_npis)
# Check Mar 27 is different from frozen day
after_day = end_date + np.timedelta64(1, 'D')
after_day_npis_df = scenario_df[scenario_df.Date == after_day][NPI_COLUMNS].reset_index(drop=True)
self.assertTrue((scenario_npis - after_day_npis_df.values[0]).any(),
"Expected NPIs to be different")
# Check 27 is indeed equal to truth
after_day_npis_truth = self.latest_df[(self.latest_df.CountryName == "Italy") &
(self.latest_df.Date == after_day)
][NPI_COLUMNS].reset_index(drop=True)
pd.testing.assert_frame_equal(after_day_npis_truth, after_day_npis_df, "Not the expected unfrozen NPIs")
def test_generate_scenario_future_freeze(self):
# Simulate Italy froze it's NPIS for the second part of the year
countries = ["Italy"]
start_date_str = "2020-07-01"
end_date_str = "2020-12-31"
scenario = "Freeze"
before_day = pd.to_datetime("2020-06-30", format=DATE_FORMAT)
frozen_npis_df = self.latest_df[(self.latest_df.CountryName == "Italy") &
(self.latest_df.Date == before_day)][NPI_COLUMNS].reset_index(drop=True)
scenario_npis = list(frozen_npis_df.values[0])
# Generate the scenario
scenario_df = generate_scenario(start_date_str, end_date_str, self.latest_df, countries, scenario=scenario)
# Check it
self._check_future(start_date_str=start_date_str,
end_date_str=end_date_str,
scenario_df=scenario_df[scenario_df.CountryName == countries[0]],
scenario_npis=scenario_npis,
country=countries[0])
def test_generate_scenario_future_min(self):
# Simulate Italy lifted all NPIs for a period
countries = ["Italy"]
start_date_str = "2020-07-01"
end_date_str = "2020-12-31"
scenario = "MIN"
scenario_npis = MIN_NPIS
# Generate the scenario
scenario_df = generate_scenario(start_date_str, end_date_str, self.latest_df, countries, scenario=scenario)
# Check it
self._check_future(start_date_str=start_date_str,
end_date_str=end_date_str,
scenario_df=scenario_df[scenario_df.CountryName == countries[0]],
scenario_npis=scenario_npis,
country=countries[0])
def test_generate_scenario_future_max(self):
# Simulate Italy maxed out all NPIs for a period
countries = ["Italy"]
start_date_str = "2020-07-01"
end_date_str = "2020-12-31"
scenario = "MAX"
scenario_npis = MAX_NPIS
# Generate the scenario
scenario_df = generate_scenario(start_date_str, end_date_str, self.latest_df, countries, scenario=scenario)
# Check it
self._check_future(start_date_str=start_date_str,
end_date_str=end_date_str,
scenario_df=scenario_df[scenario_df.CountryName == countries[0]],
scenario_npis=scenario_npis,
country=countries[0])
def test_generate_scenario_future_custom(self):
# Simulate Italy used custom NPIs for a period: each NPI set to 1 for 7 consecutive days
countries = ["Italy"]
start_date_str = "2020-07-01"
end_date_str = "2020-12-31"
start_date = pd.to_datetime(start_date_str, format=DATE_FORMAT)
end_date = pd.to_datetime(end_date_str, format=DATE_FORMAT)
nb_days = (end_date - start_date).days + 1 # +1 to include start date
scenario = [ONE_NPIS] * nb_days
# Generate the scenario
scenario_df = generate_scenario(start_date_str, end_date_str, self.latest_df, countries, scenario=scenario)
# Check it
self._check_future(start_date_str=start_date_str,
end_date_str=end_date_str,
scenario_df=scenario_df[scenario_df.CountryName == countries[0]],
scenario_npis=scenario[0],
country=countries[0])
def test_generate_scenario_future_from_last_known_date_freeze(self):
# Simulate Italy freezes NPIS for the rest of the year
countries = ["Italy"]
start_date_str = None
end_date_str = "2020-12-31"
scenario = "Freeze"
last_known_date = self.latest_df[self.latest_df.CountryName == "Italy"].Date.max()
frozen_npis_df = self.latest_df[(self.latest_df.CountryName == "Italy") &
(self.latest_df.Date == last_known_date)][NPI_COLUMNS].reset_index(drop=True)
scenario_npis = list(frozen_npis_df.values[0])
# Generate the scenario
scenario_df = generate_scenario(start_date_str, end_date_str, self.latest_df, countries, scenario=scenario)
# Check it
self._check_future(start_date_str=start_date_str,
end_date_str=end_date_str,
scenario_df=scenario_df[scenario_df.CountryName == countries[0]],
scenario_npis=scenario_npis,
country=countries[0])
def test_generate_scenario_future_from_last_known_date_min(self):
# Simulate Italy lifts all NPIs for the rest of the year
countries = ["Italy"]
start_date_str = None
end_date_str = "2020-12-31"
scenario = "MIN"
scenario_npis = MIN_NPIS
# Generate the scenario
scenario_df = generate_scenario(start_date_str, end_date_str, self.latest_df, countries, scenario=scenario)
# Check it
self._check_future(start_date_str=start_date_str,
end_date_str=end_date_str,
scenario_df=scenario_df[scenario_df.CountryName == countries[0]],
scenario_npis=scenario_npis,
country=countries[0])
def test_generate_scenario_future_from_last_known_date_max(self):
# Simulate Italy maxes out NPIs for the rest of the year
countries = ["Italy"]
start_date_str = None
end_date_str = "2020-12-31"
scenario = "MAX"
scenario_npis = MAX_NPIS
# Generate the scenario
scenario_df = generate_scenario(start_date_str, end_date_str, self.latest_df, countries, scenario=scenario)
# Check it
self._check_future(start_date_str=start_date_str,
end_date_str=end_date_str,
scenario_df=scenario_df[scenario_df.CountryName == countries[0]],
scenario_npis=scenario_npis,
country=countries[0])
def test_generate_scenario_future_from_last_known_date_custom(self):
# Simulate Italy uses custom NPIs for the rest of the year
countries = ["Italy"]
last_known_date = self.latest_df[self.latest_df.CountryName == "Italy"].Date.max()
start_date = last_known_date + np.timedelta64(1, 'D')
end_date_str = "2020-12-31"
end_date = | pd.to_datetime(end_date_str, format=DATE_FORMAT) | pandas.to_datetime |
import glob
import os
import pandas as pd
def _load_accumulated_info(root_folder_path,
dataset_folder_name="PZA confidential/crops",
image_folder_name="crops"):
dataset_folder_path = os.path.join(root_folder_path, dataset_folder_name)
image_folder_path = os.path.join(dataset_folder_path, image_folder_name)
accumulated_info_list = []
for subdir in os.listdir(image_folder_path):
subdir_path = os.path.join(image_folder_path, subdir)
image_file_paths = glob.glob(os.path.join(subdir_path, "*.jpg"))
splits = subdir.split('_')
identity_id = splits[1]
class_id = splits[2]
if int(class_id) == 0:
# TODO: better scheme to select image
mid = int(len(image_file_paths) / 2)
for image_file_path in image_file_paths[mid-2:mid+2]:
camera_id = int(os.path.basename(image_folder_path)[:4])
# Append the records
accumulated_info = {
"image_file_path": image_file_path,
"identity_ID": identity_id,
"camera_ID": camera_id
}
accumulated_info_list.append(accumulated_info)
# Convert list to data frame
accumulated_info_dataframe = | pd.DataFrame(accumulated_info_list) | pandas.DataFrame |
from hw2 import cluster
from hw2 import io
import random
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def test_partition_clustering():
#to test, I created a data set of two features with 4 clusterings
x = np.append(np.random.binomial(10, 0.5, 50) + 50, np.random.binomial(10, 0.5, 50))
y1 = np.append(np.random.binomial(10, 0.5, 25), np.random.binomial(10, 0.5, 25) + 50)
y = np.append(y1, y1)
print(x,y)
dat = | pd.DataFrame({'x':x, 'y':y}) | pandas.DataFrame |
import json
import random
import numpy as np
import os
import glob
import pandas as pd
high = ['State']
med = ['AreaCode', 'HasChild', 'SingleExemp', 'Zip']
low = ['MaritalStatus', 'ChildExemp', 'City']
def holoclean_test_gen(dataset_path, report_path):
# read tax dataset
dataset = | pd.read_csv(dataset_path) | pandas.read_csv |
""" input/output """
import os
import pandas as pd
import numpy as np
from tqdm import tqdm
def from_phy(path_to_data='/Users/myroshnychenkm2/Downloads/dataset/', sampling_frequency=30000):
"""
Get spikes from a kilosort/phy result folder
:param path_to_data:
:param sampling_frequency:
:return:
:id: neuron id, 1xN
:ts: corresponding spiketime, 1xN
"""
groupfname = os.path.join(path_to_data, 'cluster_groups.csv')
groups = | pd.read_csv(groupfname, delimiter='\t') | pandas.read_csv |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res["a"].dtype == "float64"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = DataFrame({"DATE": exp_dts})
tm.assert_frame_equal(res, exp)
assert res["DATE"].dtype == "object"
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
)
exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
| tm.assert_frame_equal(res, exp) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.testing as pdt
from tests.fixtures import DataTestCase
from tsfresh import extract_features, extract_relevant_features, select_features
from tsfresh.feature_extraction.settings import ComprehensiveFCParameters
from tsfresh.utilities.dataframe_functions import impute
class RelevantFeatureExtractionDataTestCase(DataTestCase):
"""
Test case for the relevant_feature_extraction function
"""
def test_functional_equality(self):
"""
`extract_relevant_features` should be equivalent to running first `extract_features` with impute and
`select_features` afterwards.
Meaning it should produce the same relevant features and the values of these features should be identical.
:return:
"""
df, y = self.create_test_data_sample_with_target()
relevant_features = extract_relevant_features(
df,
y,
column_id="id",
column_value="val",
column_kind="kind",
column_sort="sort",
)
extracted_features = extract_features(
df,
column_id="id",
column_value="val",
column_kind="kind",
column_sort="sort",
impute_function=impute,
)
selected_features = select_features(extracted_features, y)
self.assertEqual(
set(relevant_features.columns),
set(selected_features.columns),
"Should select the same columns:\n\t{}\n\nvs.\n\n\t{}".format(
relevant_features.columns, selected_features.columns
),
)
relevant_columns = relevant_features.columns
relevant_index = relevant_features.index
self.assertTrue(
relevant_features.equals(
selected_features.loc[relevant_index][relevant_columns]
),
"Should calculate the same feature values",
)
class RelevantFeatureExtractionTestCase(TestCase):
def setUp(self):
np.random.seed(42)
y = pd.Series(np.random.binomial(1, 0.5, 20), index=range(20))
df = pd.DataFrame(index=range(100))
df["a"] = np.random.normal(0, 1, 100)
df["b"] = np.random.normal(0, 1, 100)
df["id"] = np.repeat(range(20), 5)
X = pd.DataFrame(index=range(20))
X["f1"] = np.random.normal(0, 1, 20)
X["f2"] = np.random.normal(0, 1, 20)
self.df = df
self.X = X
self.y = y
def test_extracted_features_contain_X_features(self):
X = extract_relevant_features(self.df, self.y, self.X, column_id="id")
self.assertIn("f1", X.columns)
self.assertIn("f2", X.columns)
pdt.assert_series_equal(self.X["f1"], X["f1"])
pdt.assert_series_equal(self.X["f2"], X["f2"])
pdt.assert_index_equal(self.X["f1"].index, X["f1"].index)
| pdt.assert_index_equal(self.X["f2"].index, X["f2"].index) | pandas.testing.assert_index_equal |
import pandas as pd
import jieba
import numpy
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import matplotlib
def drop_stopwords(content,stopwords):
content_clean=[]
all_words=[]
for line in content:
line_clean=[]
for word in line:
if word in stopwords:
continue
line_clean.append(word)
all_words.append(str(word))
content_clean.append(line_clean)
return content_clean,all_words
pd.set_option('display.width',None)
df_news=pd.read_csv('./data/Preliminary-finals.csv',names=['category','theme','URL','content'],encoding='utf-8',sep='\t')
df_news=df_news.dropna()
# print(df_news.head())
content=df_news.content.values.tolist()
# print(content[1000])
content_S=[]
for line in content:
current_segment=jieba.lcut(line)
if len(current_segment)>1 and current_segment!='\r\n':
content_S.append(current_segment)
# print(content_S[1000])
df_content=pd.DataFrame({'content_S':content_S})
stopwords=pd.read_csv('./data/stopwords.txt',index_col=False,sep='\t',quoting=3,names=['stopword'],encoding='utf-8')
contents=df_content.content_S.values.tolist()
stopwords=stopwords.stopword.values.tolist()
content_clean,all_words=drop_stopwords(contents,stopwords)
df_content=pd.DataFrame({'contentes_clean':content_clean})
df_all_words= | pd.DataFrame({'all_words':all_words}) | pandas.DataFrame |
"""
This is a module that allows you to connect to the Comscore library
developed by Annalect and obtain synthesized information.
"""
__author__ = "<NAME>, Data analytics Manager"
__email__ = "<EMAIL>"
__status__ = "planning"
from sqlalchemy import create_engine
from pandas import read_sql, DataFrame
import numpy as np
import pandas as pd
from collections import Counter
class comscore_omnicom_database:
"""Class to generate a connection to the OMG comscore database.
Example:
>>> comscore = comscore_omnicom_database(user = 'user_name', password = '<PASSWORD>')
>>> dataframe_time = comscore.domain_by_time(country = 'cl')
>>> dataframe_time.head(5)
"""
def __init__(self, user, password, endpoint = None):
"""Login into our database.
Note:
All users and passwords must have been provided by the annalect team.
Additionally, they must be connected to the Annalect VPN to be able to access, otherwise, you will get a connection error.
Args:
user (str): User name that will connect to the database. Delivered by the annalect team.
password (str): User password that will connect to the database. Delivered by the annalect team.
endpoint (:obj:`str`, optional): Database endpoint, by default the redshift database comes but you can choose another one.
This step is optional
"""
self.user = user
self.password = password
if isinstance(endpoint, type(None)):
self.engine_str = 'postgres://' + str(self.user) + ':' + str(self.password) + '@dsdk-v0p1-annalect.clf6bikxcquu.us-east-1.redshift.amazonaws.com:5439/dsdk'
else:
self.engine_str = 'postgres://' + str(self.user) + ':' + str(self.password) + str(endpoint)
self.engine = create_engine(self.engine_str)
self.connection = self.engine.connect()
def domain_by_time(self, country = None, start_date = None, end_date = None, event_like = False, domain = None, saved = False):
"""Function to generate a query to the database and obtain the total number of visits and reach in a period of time.
Note:
To use this method, you must have previously logged in.
Args:
country (str, required): Select the country code on which you want to create a query.
Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']
start_date (str, optional): Select from which date you want to see your data.
The default is 2019-01-01
end_date (str, optional): Select the maximum date to consult.
The default value is 2019-06-01
event_like (str, optional): An additional WHERE statement that adds the "LIKE" in SQL. All the words you consider will be taken.
if you use the event_like operator your query will look like the following.
Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail LIKE '% event_like_value %'
domain (str, required): The specified domain you want to know about. This value is required.
saved (bool, optional): If saved is true then you must indicate the path where you want to save
the file and it will be saved on your local machine.
This is optional.
Returns:
DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.
Raises:
TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained,
also if some mandatory value is not delivered to the function.
"""
query = """
SELECT calendar_date as date, domain, event,
count(distinct guid) as total_reach,
count(guid) as total_visits, sum(time_spent) as total_time, total_time / total_visits as avg_time_per_visit
FROM spectrum_comscore.clickstream_{country_in}
where calendar_date between '{start_date_in}' and '{end_date_in}'
and domain = '{domain_in}'
{statement_like}
group by calendar_date, domain, event
"""
if isinstance(country, type(None)):
error = 'Define your country'
raise TypeError(error)
if isinstance(start_date, type(None)):
start_date = '2019-01-01'
print('The preset date was defined, which starts from January 1, 2019')
if isinstance(end_date, type(None)):
end_date = '2019-06-01'
print('The preset date was defined, which starts from June 1, 2019')
if isinstance(domain, type(None)):
error = 'Define your specific domain'
raise TypeError(error)
if not isinstance(event_like, type(False)):
if not isinstance(event_like, str):
error = 'Verify your ilike, you use other type value different to string'
raise TypeError(error)
else:
ilike = "and event_detail like '%%{}%%'".format(event_like)
else:
ilike = ''
print('you dont define ilike statement')
setence = query.format(start_date_in = start_date, country_in = country, end_date_in = end_date, domain_in = domain, statement_like = ilike)
print(setence + '\n')
dataframe = read_sql(setence, con = self.connection)
if saved:
path_file = input('define your personal path to save the file: ')
dataframe.to_csv(path_file + '{}_{}_{}.csv'.format(domain.replace('.', '-'), start_date, end_date))
return dataframe
def custom_query_statement(self, query):
"""A function that allows you to create a custom query on the comscore database.
Note:
To use this method, you must have previously logged in.
Args:
query (str, required): Full statement of the query.
Returns:
DataFrame: If the process works correctly, you will get a dataframe with all the required data.
"""
return read_sql(query, con = self.connection)
def demographics_by_web(self, country = None, start_date = None, end_date = None,
event_like = False, domain = None, saved = False, age_group = False,
ages_between = None, gender_between = None):
"""A function that displays all demographic values for a specific domain.
Note:
To use this method, you must have previously logged in.
Args:
country (str, required): Select the country code on which you want to create a query.
Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']
start_date (str, optional): Select from which date you want to see your data.
The default is 2019-01-01
end_date (str, optional): Select the maximum date to consult.
The default value is 2019-06-01
event_like (str, optional): An additional WHERE statement that adds the "LIKE" in SQL. All the words you consider will be taken.
if you use the event_like operator your query will look like the following.
Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail SIMILAR TO '% event_like_value %'
domain (str, required): The specified domain you want to know about. This value is required.
saved (bool, optional): If saved is true then you must indicate the path where you want to save
the file and it will be saved on your local machine.
This is optional.
age_group (bool, optional): A selection of true or false, if it is false it will not be grouped by age range,
if it is true it will be grouped by age range.
ages_between (tuple, optional): A tuple value, which must have two integer values (min age, max age).
gender_between (tuple, optional): A tuple that contains the strings of all the genres that you want to examine.
example: ('Male', 'Female', 'other')
Returns:
DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.
Raises:
TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained,
also if some mandatory value is not delivered to the function.
"""
query = """
SELECT main_base.domain, main_base.device, user_base.gender,
{age_group_statement},
user_base.children_present,
count(distinct main_base.guid) as reach, count(main_base.guid) as visits, count(main_base.machine_id) as number_of_devices,
sum(main_base.time_spent) as total_time, total_time / visits as avg_time_per_visit
FROM spectrum_comscore.clickstream_{country_in} main_base
LEFT JOIN
(select person_id, gender, age, children_present
from spectrum_comscore.person_demographics_{country_in}
where date >= '{start_date_in}'
group by person_id, gender, age, children_present) user_base
ON main_base.guid = user_base.person_id
where calendar_date between '{start_date_in}' and '{end_date_in}'
and domain = '{domain_in}'
{statement_like}
{statement_ages}
group by 1,2,3,4,5
"""
if isinstance(country, type(None)):
error = 'Define your country'
raise TypeError(error)
if isinstance(start_date, type(None)):
start_date = '2019-01-01'
print('The preset date was defined, which starts from January 1, 2019')
if isinstance(end_date, type(None)):
end_date = '2019-06-01'
print('The preset date was defined, which starts from June 1, 2019')
if isinstance(domain, type(None)):
error = 'Define your specific domain'
raise TypeError(error)
if isinstance(ages_between, type(None)):
ages_state = ''
else:
if not isinstance(ages_between, tuple):
error = 'Verify your ages, you use other type value different to tuple'
raise TypeError(error)
else:
ages_state = 'and user_base.age between ' + str(ages_between[0]) + ' and ' + str(ages_between[1])
if not isinstance(event_like, type(False)):
if not isinstance(event_like, str):
error = 'Verify your ilike, you use other type value different to string'
raise TypeError(error)
else:
ilike = "and event_detail SIMILAR TO '%%{}%%'".format(event_like)
else:
ilike = ''
print('you dont define ilike statement')
if age_group:
age_group_value = """
CASE
WHEN user_base.age > 13 AND user_base.age <= 17 THEN '13-17'
WHEN user_base.age > 17 AND user_base.age <= 34 THEN '18-34'
WHEN user_base.age > 34 AND user_base.age <= 54 THEN '35-54'
WHEN user_base.age > 54 THEN '55+'
ELSE
'Undetermined'
END
AS age_group
"""
else:
age_group_value = """ user_base.age """
setence = query.format(start_date_in = start_date, end_date_in = end_date, domain_in = domain, statement_like = ilike,
country_in = country, age_group_statement = age_group_value,
statement_ages = ages_state)
print(setence + '\n')
dataframe = read_sql(setence, con = self.connection)
dataframe.gender = dataframe.gender.astype(str).replace('nan', 'Undetermined')
dataframe.children_present = dataframe.children_present.astype(str).replace('nan', 'Undetermined')
if isinstance(gender_between, type(None)):
pass
else:
if not isinstance(gender_between, tuple):
error = 'Gender is not a tuple'
raise TypeError(error)
else:
dataframe = dataframe.query('gender in {}'.format(gender_between))
if saved:
path_file = input('define your personal path to save the file: ')
dataframe.to_csv(path_file + 'demographics_{}_{}_{}.csv'.format(domain.replace('.', '-'), start_date, end_date))
return dataframe
def correlation_between_domains(self, country = None, start_date = None, end_date = None, url_site = None,
domain_name_like = False, reach_greater_than = 8, corr_greater = 0.75, saved = False):
"""Correlation between a specific domain and others.
Note:
To use this method, you must have previously logged in.
Args:
country (str, required): Select the country code on which you want to create a query.
Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']
start_date (str, optional): Select from which date you want to see your data.
The default is 2019-01-01
end_date (str, optional): Select the maximum date to consult.
The default value is 2019-06-01
url_site (str, optional):
domain_name_like (str, required): An additional WHERE statement that adds the "LIKE" in SQL. All the words you consider will be taken.
if you use the event_like operator your query will look like the following.
Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail SIMILAR TO '% event_like_value %'
reach_greater_than (int, optional): This indicator by default has a value of eight and represents
that all the pages that are studied for the correlation must have a scope greater than 8 people.
You can adjust this number, using this argument.
corr_greater (float, optional): It is a filter to show only those pages that have a
correlation with the main domain greater than 0.75, which is the default value.
You can adjust this number, using this argument.
saved (bool, optional): If saved is true then you must indicate the path where you want to save
the file and it will be saved on your local machine.
This is optional.
Returns:
DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.
Raises:
TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained,
also if some mandatory value is not delivered to the function.
"""
query = """
select calendar_date as date, domain, count(distinct guid) as reach, count(guid) as visits
from spectrum_comscore.clickstream_{country_in}
where domain = '{url_site_in}'
{domain_like_statement}
and calendar_date between '{start_date_in}' and '{end_date_in}'
group by date, domain
having reach >= {reach_greater_than_in}
UNION ALL
select calendar_date as date, domain, count(distinct guid) as reach, count(guid) as visits
from spectrum_comscore.clickstream_{country_in}
where guid in (select guid from spectrum_comscore.clickstream_{country_in}
where domain = '{url_site_in}'
{domain_like_statement}
group by guid)
and calendar_date between '{start_date_in}' and '{end_date_in}'
and domain not in ('facebook.com', 'netflix.com', 'google.com', 'gmail.com', 'twitter.com', 'google.cl', 'instagram.com', 'youtube.com')
group by date, domain
having count(distinct guid) >= {reach_greater_than_in}
"""
if isinstance(country, type(None)):
error = 'Define your country'
raise TypeError(error)
if not isinstance(domain_name_like, type(False)):
if not isinstance(domain_name_like, str):
error = 'Verify your domain like, you use other type value different to string'
raise TypeError(error)
else:
domainlike = "or event_detail SIMILAR TO '%%{}%%' ".format(domain_name_like)
else:
domainlike = ''
print('you dont define domain like statement')
if isinstance(url_site, type(None)):
error = 'Define the url of the site'
raise TypeError(error)
if isinstance(start_date, type(None)):
start_date = '2019-01-01'
print('The preset date was defined, which starts from January 1, 2019')
if isinstance(end_date, type(None)):
end_date = '2019-05-01'
print('The preset date was defined, which end from May 1, 2019')
sentence = query.format(country_in = country, start_date_in = start_date, end_date_in = end_date,
url_site_in = url_site, domain_like_statement = domainlike,
reach_greater_than_in = reach_greater_than)
print(sentence + '\n')
dataframe = read_sql(sentence,
con = self.connection)
dataframe = dataframe.drop_duplicates().pivot(index='date', columns='domain', values='reach')
dataframe_corr = dataframe.corr(method='pearson')
if domain_name_like == False:
dataframe_uniq_matrix = dataframe_corr[[url_site]]
final_corr = dataframe_uniq_matrix[dataframe_uniq_matrix[url_site] >= corr_greater]
else:
dataframe_uniq_matrix = dataframe_corr[dataframe_corr.index.str.contains(domain_name_like)]
mask = ~(dataframe_uniq_matrix.mask(np.eye(len(dataframe_uniq_matrix), dtype=bool)).abs() > corr_greater).any()
final_corr = dataframe_uniq_matrix.loc[mask, mask]
if saved:
path_file = input('define your personal path to save the file: ')
final_corr.to_csv(path_file + 'corr_{}_{}_{}.csv'.format(url_site.replace('.', '-'), start_date, end_date))
return dataframe, dataframe_corr, dataframe_uniq_matrix, final_corr
def overlaps_between_pages(self, country = None, start_date = None, end_date = None,
domain = None, competitors = None, like_domain = None,
like_competitors = None, saved = False):
"""A function to obtain the interdomain overexposure.
Note:
To use this method, you must have previously logged in.
Args:
country (str, required): Select the country code on which you want to create a query.
Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']
start_date (str, optional): Select from which date you want to see your data.
The default is 2019-01-01
end_date (str, optional): Select the maximum date to consult.
The default value is 2019-06-01
domain (str, optional): The main domain on which we want to overlap
competitors (tuple, required): All competitors stored in a tuple, where the inner values are strings
like_domain (str, optional): An additional WHERE statement that adds the "LIKE" in SQL. All the words you consider will be taken.
if you use the event_like operator your query will look like the following.
Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail SIMILAR TO '% event_like_value %'
like_competitors (str, optional): An additional WHERE statement that adds the "LIKE" in SQL. All the words you consider will be taken.
if you use the event_like operator your query will look like the following.
Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail SIMILAR TO '% event_like_value %'
saved (bool, optional): If saved is true then you must indicate the path where you want to save
the file and it will be saved on your local machine.
This is optional.
Returns:
DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.
Raises:
TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained,
also if some mandatory value is not delivered to the function.
"""
testing = """
select guid, domain
from spectrum_comscore.clickstream_{country_in}
where domain = '{domain_in}'
{main_domain_statement_like}
and calendar_date between '{start_date_in}' and '{end_date_in}'
group by guid, domain
UNION ALL
select guid, domain
from spectrum_comscore.clickstream_{country_in}
where domain in {competidors_in}
{competidors_statement_like}
and calendar_date between '{start_date_in}' and '{end_date_in}'
group by guid, domain
"""
if isinstance(country, type(None)):
error = 'Define your country'
raise TypeError(error)
if isinstance(domain, type(None)):
error = 'Define your domain'
raise TypeError(error)
if isinstance(competitors, type(None)):
error = 'Define your competitors'
raise TypeError(error)
else:
if not isinstance(competitors, tuple):
error = 'Competitors must be entered in parentheses'
raise TypeError(error)
else:
pass
if isinstance(start_date, type(None)):
start_date = '2019-01-01'
print('The preset date was defined, which starts from January 1, 2019')
if isinstance(end_date, type(None)):
end_date = '2019-06-01'
print('The preset date was defined, which starts from January 1, 2019')
if isinstance(like_domain, type(None)):
domainlike = ''
else:
if not isinstance(like_domain, str):
error = 'domain like is not a string'
raise TypeError(error)
else:
domainlike = "and event_detail SIMILAR TO '%%{}%%'".format(like_domain)
if isinstance(like_competitors, type(None)):
competitorslike = ''
else:
if not isinstance(like_competitors, str):
error = 'domain like is not a string'
raise TypeError(error)
else:
competitorslike = "and event_detail SIMILAR TO '%%{}%%'".format(like_competitors)
sentence = testing.format(country_in = country, start_date_in = start_date, end_date_in = end_date,
domain_in = domain, competidors_in = competitors,
main_domain_statement_like = domainlike,
competidors_statement_like = competitorslike)
print(sentence, '\n')
tests = read_sql(sentence,
con = self.connection)
#create unique list of names
uniqueNames = tests.domain.unique()
print(uniqueNames)
#create a data frame dictionary to store your data frames
DataFrameDict = {elem : pd.DataFrame for elem in uniqueNames}
my_list = []
for key in DataFrameDict.keys():
DataFrameDict[key] = tests[:][tests.domain == key].reset_index(drop = True)
my_list.append(list(tests[:][tests.domain == key].reset_index(drop = True).guid))
frame = pd.DataFrame()
for index in range(len(my_list)):
lista_final = [list(filter(lambda x: x in my_list[index], sublist)) for sublist in my_list]
mt = [len(x) / len(my_list[index]) for x in lista_final]
frame = pd.concat([frame, DataFrame(mt)], axis = 1)
frame.columns = list(uniqueNames)
frame.index = list(uniqueNames)
if saved:
path_file = input('define your personal path to save the file: ')
frame.to_csv(path_file + 'overlap_{}_{}_{}.csv'.format(domain.replace('.', '-'), start_date, end_date))
return DataFrameDict, frame
def bayesian_inference_over_sites(self, country = None, domain = None,
time_spent = None, start_date = None, end_date = None, saved = False):
"""A function that uses the Bayes theorem to calculate which sites are most likely to be visited by a user who visits our site.
Note:
To use this method, you must have previously logged in.
Args:
country (str, required): Select the country code on which you want to create a query.
Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']
start_date (str, optional): Select from which date you want to see your data.
The default is 2019-01-01
end_date (str, optional): Select the maximum date to consult.
The default value is 2019-06-01
domain (str, optional): The main domain on which we want to overlap
time_spent (int, required): The minimum amount of time a user has to spend on the site and the competitor's site to be examined.
The default value is 300.
Returns:
DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.
Raises:
TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained,
also if some mandatory value is not delivered to the function.
"""
query = """
WITH my_table_3 as (
select domain
from spectrum_comscore.clickstream_{country_in}
where guid in (select guid from spectrum_comscore.clickstream_{country_in}
where domain = '{domain_in}'
and calendar_date between '{start_date_in}' and '{end_date_in}'
and time_spent >= {time_spent_in}
group by guid)
and time_spent >= {time_spent_in}
group by domain
),
my_table_4 as (
select guid
from spectrum_comscore.clickstream_{country_in}
where guid in (select guid from spectrum_comscore.clickstream_{country_in}
where domain = '{domain_in}'
and calendar_date between '{start_date_in}' and '{end_date_in}'
and time_spent >= {time_spent_in}
group by guid)
group by guid
)
SELECT domain, 'visitors' as type, count(distinct guid) as reach
from spectrum_comscore.clickstream_{country_in}
where domain in (select domain from my_table_3) and guid in (select guid from my_table_4)
and domain not in ('facebook.com', 'netflix.com', 'google.com', 'gmail.com', 'twitter.com', 'google.cl', 'instagram.com', 'youtube.com', 'bing.com', 'whatsapp.com', 'msn.com', 'live.com', '{domain_in}')
group by 1,2
UNION ALL
SELECT domain, 'outsiders' as type, count(distinct guid) as reach
from spectrum_comscore.clickstream_{country_in}
where domain in (select domain from my_table_3) and guid not in (select guid from my_table_4)
and domain not in ('facebook.com', 'netflix.com', 'google.com', 'gmail.com', 'twitter.com', 'google.cl', 'instagram.com', 'youtube.com', 'bing.com', 'whatsapp.com', 'msn.com', 'live.com')
group by 1,2
"""
if isinstance(country, type(None)):
error = 'Define your country'
raise TypeError(error)
if isinstance(domain, type(None)):
error = 'Define your domain'
raise TypeError(error)
if isinstance(time_spent, type(None)):
time_spent = 300
print('The preset time_spent was defined, which 300 seconds')
if isinstance(start_date, type(None)):
start_date = '2019-01-01'
print('The preset date was defined, which starts from January 1, 2019')
if isinstance(end_date, type(None)):
end_date = '2019-06-01'
print('The preset date was defined, which ends from June 1, 2019')
sentence = query.format(country_in = country, start_date_in = start_date, end_date_in = end_date,
domain_in = domain, time_spent_in = time_spent)
print(sentence, '\n')
table3 = read_sql(sentence, con = self.connection)
pivot_table_3 = pd.pivot_table(table3, values = 'reach', index = 'type', columns = 'domain')
dataframe_probs_a = pd.DataFrame(columns=['domain', 'p(a)', 'p(x | a)'])
totals = pivot_table_3.iloc[0].sum() + pivot_table_3.iloc[1].sum()
for indexs in range(pivot_table_3.shape[1]):
dataframe_probs_a.loc[indexs] = [str(pivot_table_3.iloc[:,indexs].name),
pivot_table_3.iloc[:,indexs].sum() / totals,
pivot_table_3.iloc[:,indexs].visitors / pivot_table_3.iloc[:,indexs].sum()]
dataframe_probs_a['p(a)*p(x | a)'] = dataframe_probs_a['p(a)'] * dataframe_probs_a['p(x | a)']
dataframe_probs_a['bayes'] = dataframe_probs_a['p(a)*p(x | a)'] / dataframe_probs_a['p(a)*p(x | a)'].sum()
dataframe_probs_a['bayes %'] = dataframe_probs_a['bayes'] * 100
dataframe_probs_a.sort_values('bayes %', ascending = False,inplace=True)
dataframe_probs_a.reset_index(drop = True, inplace = True)
short_frame = dataframe_probs_a[dataframe_probs_a['bayes %'] > 0.4].reset_index(drop = True)
if saved:
path_file = input('define your personal path to save the file: ')
short_frame.to_csv(path_file + 'bayesianinference_sites_prob_{}_{}_{}.csv'.format(domain.replace('.', '-'), start_date, end_date))
return dataframe_probs_a, short_frame
def bayesian_site_predictor(self, country = None,
domain = None, time_spent = None,
start_date = None, end_date = None, saved = False):
"""A function to know which is the next most likely site to visit after a user visits our site.
Note:
To use this method, you must have previously logged in.
Args:
country (str, required): Select the country code on which you want to create a query.
Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']
start_date (str, optional): Select from which date you want to see your data.
The default is 2019-01-01
end_date (str, optional): Select the maximum date to consult.
The default value is 2019-06-01
domain (str, optional): The main domain on which we want to overlap
time_spent (int, required): The minimum amount of time a user has to spend on the site and the competitor's site to be examined.
The default value is 300.
Returns:
DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.
Raises:
TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained,
also if some mandatory value is not delivered to the function.
"""
query_1 = """
SELECT a.guid, concat(concat(b.domain,'_'),a.domain) AS trans_domain, b.positive_interval_time, b.timestamp_date_min,
min(a.visit_date_event) as min_visit
FROM
(select guid, domain, CONVERT(datetime,dateadd(s, event_time,'2000-01-01')) at time zone 'utc' at time zone 'clst' visit_date_event
from spectrum_comscore.clickstream_{country_in}
where guid in (select guid from spectrum_comscore.clickstream_{country_in}
where domain = '{domain_in}' and calendar_date between '{start_date_in}' and '{end_date_in}' and time_spent >= {time_spent_in}
group by guid) and time_spent >= 30
group by guid, domain, event_time) a
LEFT JOIN
(select guid, domain, min(timestamp_date) as timestamp_date_min,
timestamp_date_min + INTERVAL '1 hour' as positive_interval_time
from (
select guid, domain,
CONVERT(datetime, dateadd(s, event_time,'2000-01-01')) at time zone 'utc' at time zone 'clst' timestamp_date
from spectrum_comscore.clickstream_{country_in}
where domain = '{domain_in}' and calendar_date between '{start_date_in}' and '{end_date_in}' and time_spent >= {time_spent_in}
) group by guid, domain) b
ON a.guid = b.guid
where a.visit_date_event <= b.positive_interval_time and a.visit_date_event > b.timestamp_date_min
group by 1,2,3,4
having trans_domain not in ('{domain_in}_{domain_in}')
"""
if isinstance(country, type(None)):
error = 'Define your country'
raise TypeError(error)
if isinstance(domain, type(None)):
error = 'Define your domain'
raise TypeError(error)
if isinstance(time_spent, type(None)):
time_spent = 300
print('The preset time_spent was defined, which 300 seconds')
if isinstance(start_date, type(None)):
start_date = '2019-01-01'
print('The preset date was defined, which starts from January 1, 2019')
if isinstance(end_date, type(None)):
end_date = '2019-06-01'
print('The preset date was defined, which ends from June 1, 2019')
sentence1 = query_1.format(country_in = country, start_date_in = start_date,
end_date_in = end_date,
domain_in = domain, time_spent_in = time_spent)
print(sentence1, '\n')
tests = read_sql(sentence1, con = self.connection)
new = tests.sort_values(by = ['guid', 'min_visit'])
new.drop_duplicates(subset = 'guid', keep = 'first', inplace = True)
new['list_domains'] = new.trans_domain.str.split('_')
list_of_domains = list(new['list_domains'])
list_a = map(tuple, list_of_domains) #must convert to tuple because list is an unhashable type
final_count = Counter(list_a)
dataframe_of_visits = pd.DataFrame.from_dict(final_count, orient = 'index')
dataframe_of_visits['other_index'] = dataframe_of_visits.index
dataframe_of_visits.reset_index(inplace = True, drop = True)
dataframe_of_visits['list'] = dataframe_of_visits.other_index.apply(list)
dataframe_of_visits['list_str'] = dataframe_of_visits['list'].apply(','.join)
dataframe_of_visits[['orig', 'desti']] = [sub.split(",") for sub in dataframe_of_visits.list_str]
dataframe_of_visits['type'] = 'from_me_to_destiny'
dataframe_of_visits.columns = ['reach', 'indexs', 'list_index', 'list_index_str', 'orig', 'domain', 'type']
final_a_frame = dataframe_of_visits[['domain', 'type', 'reach']]
second_query = """
SELECT a.guid, b.domain as orig, a.domain as desti, b.positive_interval_time, b.timestamp_date_min, min(a.visit_date_event) as min_visit
FROM
(select guid, domain, CONVERT(datetime,dateadd(s, event_time,'2000-01-01')) at time zone 'utc' at time zone 'clst' visit_date_event
from spectrum_comscore.clickstream_{country_in}
where guid in (select guid from spectrum_comscore.clickstream_{country_in}
where domain in {domain_in} and calendar_date between '{start_date_in}' and '{end_date_in}' and time_spent >= {time_spent_in}
group by guid) and time_spent >= 30
group by guid, domain, event_time) a
LEFT JOIN
(select guid, domain, min(timestamp_date) as timestamp_date_min,
timestamp_date_min + INTERVAL '1 hour' as positive_interval_time
from (
select guid, domain,
CONVERT(datetime, dateadd(s, event_time,'2000-01-01')) at time zone 'utc' at time zone 'clst' timestamp_date
from spectrum_comscore.clickstream_{country_in}
where domain in {domain_in} and calendar_date between '{start_date_in}' and '{end_date_in}' and time_spent >= {time_spent_in}
) group by guid, domain) b
ON a.guid = b.guid
where a.visit_date_event <= b.positive_interval_time and a.visit_date_event > b.timestamp_date_min
and a.domain != b.domain
group by 1,2,3,4,5
"""
optilist = list(dataframe_of_visits.domain)
strings = ','.join(optilist)
my_tuple = tuple(strings.split(','))
sentence2 = second_query.format(country_in = country, start_date_in = start_date,
domain_in = my_tuple, time_spent_in = time_spent,
end_date_in = end_date)
print(sentence2, '\n')
second_frame_test = read_sql(sentence2, con = self.connection)
to_my_domain = second_frame_test[second_frame_test.desti == domain]
new = to_my_domain.sort_values(by = ['guid', 'min_visit']).reset_index(drop = True)
new.drop_duplicates(subset = 'guid', keep = 'first', inplace = True)
new['trans_domain'] = new.orig + '_' + new.desti
new['list_domains'] = new.trans_domain.str.split('_')
list_of_domains = list(new['list_domains'])
list_a = map(tuple, list_of_domains) #must convert to tuple because list is an unhashable type
final_count = Counter(list_a)
dataframe_of_visits_to_me = pd.DataFrame.from_dict(final_count, orient = 'index')
dataframe_of_visits_to_me['other_index'] = dataframe_of_visits_to_me.index
dataframe_of_visits_to_me.reset_index(inplace = True, drop = True)
dataframe_of_visits_to_me['list'] = dataframe_of_visits_to_me.other_index.apply(list)
dataframe_of_visits_to_me['list_str'] = dataframe_of_visits_to_me['list'].apply(','.join)
dataframe_of_visits_to_me[['orig', 'desti']] = [sub.split(",") for sub in dataframe_of_visits_to_me.list_str]
dataframe_of_visits_to_me['type'] = 'from_domains_to_me'
dataframe_of_visits_to_me.columns = ['reach', 'indexs', 'list_index', 'list_index_str', 'domain', 'desti', 'type']
final_b_frame = dataframe_of_visits_to_me[['domain', 'type', 'reach']]
to_others_domain = second_frame_test[second_frame_test.desti != domain]
new = to_others_domain.sort_values(by = ['guid', 'min_visit']).reset_index(drop = True)
new.drop_duplicates(subset = 'guid', keep = 'first', inplace = True)
new['trans_domain'] = new.orig + '_' + new.desti
new['list_domains'] = new.trans_domain.str.split('_')
list_of_domains = list(new['list_domains'])
list_a = map(tuple, list_of_domains) #must convert to tuple because list is an unhashable type
final_count = Counter(list_a)
dataframe_of_visits_to_others = pd.DataFrame.from_dict(final_count, orient = 'index')
dataframe_of_visits_to_others['other_index'] = dataframe_of_visits_to_others.index
dataframe_of_visits_to_others.reset_index(inplace = True, drop = True)
dataframe_of_visits_to_others['list'] = dataframe_of_visits_to_others.other_index.apply(list)
dataframe_of_visits_to_others['list_str'] = dataframe_of_visits_to_others['list'].apply(','.join)
dataframe_of_visits_to_others[['orig', 'desti']] = [sub.split(",") for sub in dataframe_of_visits_to_others.list_str]
dataframe_of_visits_to_others['type'] = 'from_domains_to_others'
dataframe_of_visits_to_others.columns = ['reach', 'indexs', 'list_index', 'list_index_str', 'orig', 'domain', 'type']
final_c_frame = dataframe_of_visits_to_others[['domain', 'type', 'reach']]
final_frame = pd.concat([final_a_frame, final_b_frame, final_c_frame])
pivot_table_3 = pd.pivot_table(final_frame, values = 'reach', index = 'type', columns = 'domain').fillna(0)
dataframe_probs_a = | pd.DataFrame(columns=['domain', 'p(a)', 'split']) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]),
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_start_stop_table(self, setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self, setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self, setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
dict(A=np.random.rand(20), B=np.random.rand(20)),
index=pd.date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(
self, obj, comparator, path, compression=False, **kwargs
):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_multiple_open_close(self, setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
with pytest.raises(ValueError):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
with pytest.raises(ClosedFileError):
store.keys()
with pytest.raises(ClosedFileError):
"df" in store
with pytest.raises(ClosedFileError):
len(store)
with pytest.raises(ClosedFileError):
store["df"]
with pytest.raises(AttributeError):
store.df
with pytest.raises(ClosedFileError):
store.select("df")
with pytest.raises(ClosedFileError):
store.get("df")
with pytest.raises(ClosedFileError):
store.append("df2", df)
with pytest.raises(ClosedFileError):
store.put("df3", df)
with pytest.raises(ClosedFileError):
store.get_storer("df2")
with pytest.raises(ClosedFileError):
store.remove("df2")
with pytest.raises(ClosedFileError, match="file is not open"):
store.select("df")
def test_pytables_native_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(
is_platform_windows(), reason="native2 read fails oddly on windows"
)
def test_pytables_native2_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
@td.xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = pd.DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=pd.Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]})
| tm.assert_frame_equal(expected, result) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(self):
# GH 18186
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({'a': Categorical(data, ordered=True)})
actual = self.read_csv(StringIO('a\n' + '\n'.join(data)),
dtype='category')
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('categories', [
['a', 'b', 'c'],
['a', 'c', 'b'],
['a', 'b', 'c', 'd'],
['c', 'b', 'a'],
])
def test_categorical_categoricaldtype(self, categories, ordered):
data = """a,b
1,a
1,b
1,b
2,c"""
expected = pd.DataFrame({
"a": [1, 1, 1, 2],
"b": Categorical(['a', 'b', 'b', 'c'],
categories=categories,
ordered=ordered)
})
dtype = {"b": CategoricalDtype(categories=categories,
ordered=ordered)}
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_unsorted(self):
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(['c', 'b', 'a'])
expected = pd.DataFrame({
'a': [1, 1, 1, 2],
'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
})
result = self.read_csv(StringIO(data), dtype={'b': dtype})
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_numeric(self):
dtype = {'b': CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = pd.DataFrame({'b': Categorical([1, 1, 2, 3])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_datetime(self):
dtype = {
'b': CategoricalDtype(pd.date_range('2017', '2019', freq='AS'))
}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
dtype = {
'b': CategoricalDtype([pd.Timestamp("2014")])
}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = pd.DataFrame({'b': Categorical([pd.Timestamp('2014')] * 2)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_timedelta(self):
dtype = {'b': CategoricalDtype(pd.to_timedelta(['1H', '2H', '3H']))}
data = "b\n1H\n2H\n3H"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_unexpected_categories(self):
dtype = {'b': CategoricalDtype(['a', 'b', 'd', 'e'])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = pd.DataFrame({"b": Categorical(list('dacd'),
dtype=dtype['b'])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ['a', 'b', 'c']
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'],
categories=cats)}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'],
categories=cats)},
index=[2, 3])]
dtype = CategoricalDtype(cats)
actuals = self.read_csv(StringIO(data), dtype={'b': dtype},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'),
np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
# see gh-9424
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one.1', dtype='f')], axis=1)
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
data = ''
result = self.read_csv(StringIO(data), names=['one', 'one'],
dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_raise_on_passed_int_dtype_with_nas(self):
# see gh-2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
sep=",", skipinitialspace=True,
dtype={'DOY': np.int64})
def test_dtype_with_converter(self):
data = """a,b
1.1,2.2
1.2,2.3"""
# dtype spec ignored if converted specified
with tm.assert_produces_warning(ParserWarning):
result = self.read_csv(StringIO(data), dtype={'a': 'i8'},
converters={'a': lambda x: str(x)})
expected = DataFrame({'a': ['1.1', '1.2'], 'b': [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
def test_empty_dtype(self):
# see gh-14712
data = 'a,b'
expected = pd.DataFrame(columns=['a', 'b'], dtype=np.float64)
result = self.read_csv(StringIO(data), header=0, dtype=np.float64)
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'a': pd.Categorical([]),
'b': pd.Categorical([])},
index=[])
result = self.read_csv(StringIO(data), header=0,
dtype='category')
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), header=0,
dtype={'a': 'category', 'b': 'category'})
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
result = self.read_csv(StringIO(data), header=0,
dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'a': pd.Series([], dtype='timedelta64[ns]'),
'b': pd.Series([], dtype='timedelta64[ns]')},
index=[])
result = self.read_csv(StringIO(data), header=0,
dtype='timedelta64[ns]')
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.float64)
result = self.read_csv(StringIO(data), header=0,
dtype={'a': np.float64})
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.float64)
result = self.read_csv(StringIO(data), header=0,
dtype={0: np.float64})
tm.assert_frame_equal(result, expected)
expected = | pd.DataFrame(columns=['a', 'b']) | pandas.DataFrame |
#!/usr/bin/python3
import sys
import pandas as pd
import numpy as np
import os
import concurrent.futures
import functools, itertools
import sofa_time
import statistics
import multiprocessing as mp
import socket
import ipaddress
# sys.path.insert(0, '/home/st9540808/Desktop/sofa/bin')
import sofa_models, sofa_preprocess
import sofa_config
import sofa_print
colors_send = ['#14f2e0', '#41c8e5', '#6e9eeb']
colors_recv = ['#9a75f0', '#c74bf6', '#f320fa', '#fe2bcc']
color_send = itertools.cycle(colors_send)
color_recv = itertools.cycle(colors_recv)
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit", # 13
"msg_id"] # 14
# @profile
def extract_individual_rosmsg(df_send_, df_recv_, *df_others_):
""" Return a dictionary with topic name as key and
a list of ros message as value.
Structure of return value: {topic_name: {(guid, seqnum): log}}
where (guid, seqnum) is a msg_id
"""
# Convert timestamp to unix time
# unix_time_off = statistics.median(sofa_time.get_unix_mono_diff() for i in range(100))
# for df in (df_send, df_recv, *df_others):
# df['ts'] = df['ts'] + unix_time_off
df_send_[1]['ts'] = df_send_[1]['ts'] + df_send_[0].cpu_time_offset + df_send_[0].unix_time_off
df_recv_[1]['ts'] = df_recv_[1]['ts'] + df_recv_[0].cpu_time_offset + df_recv_[0].unix_time_off
df_others = []
for cfg_to_pass, df_other in df_others_:
df_other['ts'] = df_other['ts'] + cfg_to_pass.cpu_time_offset + cfg_to_pass.unix_time_off
df_others.append(df_other)
df_send = df_send_[1]
df_recv = df_recv_[1]
# sort by timestamp
df_send.sort_values(by=['ts'], ignore_index=True)
df_recv.sort_values(by=['ts'], ignore_index=True)
# publish side
gb_send = df_send.groupby('guid')
all_publishers_log = {guid:log for guid, log in gb_send}
# subscription side
gb_recv = df_recv.groupby('guid')
all_subscriptions_log = {guid:log for guid, log in gb_recv}
# other logs (assume there's no happen-before relations that needed to be resolved)
# every dataframe is a dictionary in `other_log_list`
gb_others = [df_other.groupby('guid') for df_other in df_others]
other_log_list = [{guid:log for guid, log in gb_other} for gb_other in gb_others]
# find guids that are in both subsciption and publisher log
interested_guids = all_subscriptions_log.keys() \
& all_publishers_log.keys()
res = {}
for guid in interested_guids:
# get a publisher from log
df = all_publishers_log[guid]
df_send_partial = all_publishers_log[guid].copy()
add_data_calls = df[~pd.isna(df['seqnum'])] # get all non-NaN seqnums in log
try:
pubaddr, = pd.unique(df['publisher']).dropna()
print(pubaddr)
except ValueError as e:
print('Find a guid that is not associated with a publisher memory address. Error: ' + str(e))
continue
# print(add_data_calls)
all_RTPSMsg_idx = ((df_send['func'] == '~RTPSMessageGroup') & (df_send['publisher'] == pubaddr))
all_RTPSMsgret_idx = ((df_send['func'] == '~RTPSMessageGroup exit') & (df_send['publisher'] == pubaddr))
all_sendSync_idx = ((df_send['func'] == 'sendSync') & (df_send['publisher'] == pubaddr))
all_nn_xpack_idx = (df['func'] == 'nn_xpack_send1')
modified_rows = []
for idx, add_data_call in add_data_calls.iterrows():
ts = add_data_call['ts']
rcl_idx = df.loc[(df['ts'] < ts) & (df['layer'] == 'rcl')]['ts'].idxmax()
df_send_partial.loc[rcl_idx, 'seqnum'] = add_data_call.loc['seqnum']
# For grouping RTPSMessageGroup function
try:
ts_gt = (df_send['ts'] > ts) # ts greater than that of add_data_call
RTPSMsg_idx = df_send.loc[ts_gt & all_RTPSMsg_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsg_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
RTPSMsgret_idx = df_send.loc[ts_gt & all_RTPSMsgret_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsgret_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
sendSync_idx = df_send.loc[ts_gt & (df_send['ts'] < df_send.loc[RTPSMsgret_idx, 'ts']) & all_sendSync_idx]
sendSync = sendSync_idx.copy()
sendSync['seqnum'] = add_data_call.loc['seqnum']
modified_rows.extend(row for _, row in sendSync.iterrows())
except ValueError as e:
pass
if 'rmw_cyclonedds_cpp' in df['implementation'].values:
try:
df_cls = other_log_list[0][guid]
seqnum = add_data_call.loc['seqnum']
max_ts = df_cls[(df_cls['layer'] == 'cls_egress') & (df_cls['seqnum'] == seqnum)]['ts'].max()
index = df.loc[(ts < df['ts']) & (df['ts'] < max_ts) & all_nn_xpack_idx].index
df_send_partial.loc[index, 'seqnum'] = seqnum
except ValueError as e:
pass
df_send_partial = pd.concat([df_send_partial, pd.DataFrame(modified_rows)])
# get a subscrption from log
df = all_subscriptions_log[guid]
df_recv_partial = all_subscriptions_log[guid].copy()
add_recvchange_calls = df[~pd.isna(df['seqnum'])] # get all not nan seqnums in log
if 'cyclonedds' in df['layer'].unique():
add_recvchange_calls = df[df['func'] == 'ddsi_udp_conn_read exit']
all_sub = pd.unique(df['subscriber']) # How many subscribers subscribe to this topic?
subs_map = {sub: (df['subscriber'] == sub) &
(df['func'] == "rmw_take_with_info exit") for sub in all_sub}
all_pid = pd.unique(df_recv['pid'])
pid_maps = {pid: (df_recv['pid'] == pid) &
(df_recv['func'] == "rmw_wait exit") for pid in all_pid}
modified_rows = []
for idx, add_recvchange_call in add_recvchange_calls.iterrows():
ts = add_recvchange_call['ts']
subaddr = add_recvchange_call.at['subscriber']
seqnum = add_recvchange_call.at['seqnum']
# Consider missing `rmw_take_with_info exit` here
try:
rmw_take_idx = df.loc[(df['ts'] > ts) & subs_map[subaddr]]['ts'].idxmin()
if 'cyclonedds' in df['layer'].unique():
free_sample = df.loc[(df['func'] == 'free_sample') & (df['seqnum'] == seqnum)]
if len(free_sample) == 0:
continue
free_sample = free_sample.iloc[0]
if free_sample['ts'] > df.at[rmw_take_idx, 'ts']:
rmw_take_idx = df.loc[(df['ts'] > free_sample['ts']) & subs_map[subaddr]]['ts'].idxmin()
# if 'cyclonedds' in df['layer'].unique():
# free_sample = df_recv.loc[(df_recv['ts'] > ts) &
# (df_recv['func'] == 'free_sample') &
# (df_recv['pid'] == df.at[rmw_take_idx, 'pid']) &
# (df_recv['seqnum'] == seqnum)]
# free_sample_idx = free_sample.idxmax()
# if len(free_sample) == 0:
# rmw_take_idx = df.loc[(df['ts'] > free_sample['ts']) & subs_map[subaddr]]['ts'].idxmin()
# print(df.loc[rmw_take_idx])
# free_sample() should be called in rmw_take, therefore
# free_sample() happened before rmw_take_with_info returns
df_recv_partial.at[rmw_take_idx, 'seqnum'] = seqnum
# TODO: Group by ip port in cls_ingress
UDPResourceReceive_idx = df.loc[(df['ts'] < ts) &
(df['func'] == 'UDPResourceReceive exit') &
(df['pid'] == add_recvchange_call.at['pid'])]['ts'].idxmax()
df_recv_partial.at[UDPResourceReceive_idx, 'seqnum'] = seqnum
except ValueError as e:
pass
try:
# Group rmw_wait exit
pid = df_recv_partial.at[rmw_take_idx, 'pid']
rmw_wait_idx = df_recv.loc[(df_recv['ts'] < df_recv_partial.at[rmw_take_idx,'ts']) &
pid_maps[pid]]['ts'].idxmax()
modified_row = df_recv.loc[rmw_wait_idx]
modified_row.at['seqnum'] = add_recvchange_call.at['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
except ValueError as e:
pass
# Doesn't need to remove duplicates for
# a = pd.DataFrame(modified_rows)
# print(a[~a.index.duplicated(keep='first')])
df_recv_partial = pd.concat([df_recv_partial, pd.DataFrame(modified_rows)])
# Merge all modified dataframes
df_merged = df_send_partial.append(df_recv_partial, ignore_index=True, sort=False)
# handle other log files
for other_log in other_log_list:
df_other = other_log[guid]
df_merged = df_merged.append(df_other, ignore_index=True, sort=False)
# Avoid `TypeError: boolean value of NA is ambiguous` when calling groupby()
df_merged['subscriber'] = df_merged['subscriber'].fillna(np.nan)
df_merged['guid'] = df_merged['guid'].fillna(np.nan)
df_merged['seqnum'] = df_merged['seqnum'].fillna(np.nan)
df_merged.sort_values(by=['ts'], inplace=True)
gb_merged = df_merged.groupby(['guid', 'seqnum'])
ros_msgs = {msg_id:log for msg_id, log in gb_merged} # msg_id: (guid, seqnum)
# get topic name from log
topic_name = df_merged['topic_name'].dropna().unique()
if len(topic_name) > 1:
raise Exception("More than one topic in a log file")
topic_name = topic_name[0]
if topic_name in res:
res[topic_name] = {**res[topic_name], **ros_msgs}
else:
res[topic_name] = ros_msgs
print('finished parsing ' + topic_name)
return res
def extract_individual_rosmsg2(df_send, df_recv, df_cls):
# unix_time_off = statistics.median(sofa_time.get_unix_mono_diff() for i in range(100))
# for df in (df_send, df_recv, df_cls):
# df['ts'] = df['ts'] + unix_time_off
df_send.sort_values(by=['ts'], ignore_index=True)
df_recv.sort_values(by=['ts'], ignore_index=True)
df_cls.sort_values(by=['ts'], ignore_index=True)
# publish side
gb_send = df_send.groupby('guid')
all_publishers_log = {guid:log for guid, log in gb_send}
# subscription side
gb_recv = df_recv.groupby('guid')
all_subscriptions_log = {guid:log for guid, log in gb_recv}
# in kernel (probably not need it)
gb_cls = df_cls.groupby('guid')
all_cls_log = {guid:log for guid, log in gb_cls}
interested_guids = all_subscriptions_log.keys() \
& all_publishers_log.keys()
res = {}
for guid in interested_guids:
# get a publisher from log
df = all_publishers_log[guid].copy()
df_send_partial = all_publishers_log[guid].copy()
add_data_calls = df[~pd.isna(df['seqnum'])] # get all non-NaN seqnums in log
try:
pubaddr, = pd.unique(df['publisher']).dropna()
except ValueError as e:
print('Find a guid that is not associated with a publisher memory address. Error: ' + str(e))
continue
print(pubaddr)
modified_rows = []
for idx, add_data_call in add_data_calls.iterrows():
seqnum = add_data_call['seqnum']
ts = add_data_call['ts']
rcl_idx = df.loc[(df['ts'] < ts) & (df['layer'] == 'rcl')]['ts'].idxmax()
df_send_partial.loc[rcl_idx, 'seqnum'] = add_data_call.loc['seqnum']
# Use the two timestamps to get a slice of dataframe
# Here we drop :~RTPSMessageGroup exit"
ts_cls = df_cls[(df_cls['guid'] == guid) &
(df_cls['seqnum'] == seqnum) &
(df_cls['layer'] == 'cls_egress')]['ts'].max() # Get ts upper bound
df_send_tgt = df_send[(ts <= df_send['ts']) &
(df_send['ts'] <= ts_cls) &
(df_send['publisher'] == pubaddr)]
modified_row = df_send_tgt.copy()
modified_row['guid'] = guid
modified_row['seqnum'] = seqnum
modified_rows.append(modified_row)
df_send_partial = df_send_partial.combine_first(pd.concat(modified_rows))
# get a subscrption from log
df = all_subscriptions_log[guid].copy()
df_recv_partial = all_subscriptions_log[guid].copy()
add_recvchange_calls = df[~pd.isna(df['seqnum'])] # get all not nan seqnums in log
all_sub = pd.unique(df['subscriber']) # How many subscribers subscribe to this topic?
subs_map = {sub: (df['subscriber'] == sub) &
(df['func'] == "rmw_take_with_info exit") for sub in all_sub}
all_pid = pd.unique(df_recv['pid'])
pid_maps = {pid: (df_recv['pid'] == pid) &
(df_recv['func'] == "rmw_wait exit") for pid in all_pid}
modified_rows = []
for idx, add_recvchange_call in add_recvchange_calls.iterrows():
ts = add_recvchange_call.at['ts']
subaddr = add_recvchange_call.at['subscriber']
seqnum = add_recvchange_call.at['seqnum']
# Use the two timestamps to get a slice of dataframe
ts_cls = df_cls[(df_cls['guid'] == guid) &
(df_cls['seqnum'] == seqnum) &
(df_cls['layer'] == 'cls_ingress')]['ts'].min()
df_recv_tgt = df_recv[(ts_cls < df_recv['ts']) & (df_recv['ts'] < ts)].copy()
# Consider missing `rmw_take_with_info exit` here
try:
rmw_take_idx = df.loc[(df['ts'] > ts) & subs_map[subaddr]]['ts'].idxmin()
df_recv_partial.at[rmw_take_idx, 'seqnum'] = seqnum
# TODO: Group by ip port in cls_ingress
UDPResourceReceive_idx = df_recv_tgt.loc[(df_recv_tgt['func'] == 'UDPResourceReceive exit') &
(df_recv_tgt['pid'] == add_recvchange_call.at['pid'])]['ts'].idxmax();
df_recv_partial.at[UDPResourceReceive_idx, 'seqnum'] = seqnum
# Group rmw_wait exit
pid = df_recv_partial.at[rmw_take_idx, 'pid']
rmw_wait_idx = df_recv.loc[(df_recv['ts'] < df_recv_partial.at[rmw_take_idx,'ts']) &
pid_maps[pid]]['ts'].idxmax()
modified_row = df_recv.loc[rmw_wait_idx]
modified_row.at['seqnum'] = add_recvchange_call.at['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
except ValueError as e:
pass
df_recv_partial = pd.concat([df_recv_partial, pd.DataFrame(modified_rows)])
# Merge all modified dataframes
df_merged = df_send_partial.append(df_recv_partial, ignore_index=True, sort=False)
# handle other log files
df_merged = df_merged.append(df_cls, ignore_index=True, sort=False)
# Avoid `TypeError: boolean value of NA is ambiguous` when calling groupby()
df_merged['subscriber'] = df_merged['subscriber'].fillna(np.nan)
df_merged['guid'] = df_merged['guid'].fillna(np.nan)
df_merged['seqnum'] = df_merged['seqnum'].fillna(np.nan)
df_merged.sort_values(by=['ts'], inplace=True)
gb_merged = df_merged.groupby(['guid', 'seqnum'])
ros_msgs = {msg_id:log for msg_id, log in gb_merged} # msg_id: (guid, seqnum)
# get topic name from log
topic_name = df_merged['topic_name'].dropna().unique()
if len(topic_name) > 1:
raise Exception("More than one topic in a log file")
topic_name = topic_name[0]
if res.get(topic_name) is None:
res[topic_name] = ros_msgs
else:
res[topic_name].update(ros_msgs)
print(type(res[topic_name]))
print('finished parsing ' + topic_name)
return res
# print(df_recv_partial[['layer', 'ts', 'func', 'guid', 'seqnum']])
def print_all_msgs(res):
for topic_name, all_msgs_log in res.items():
print('topic: ' + topic_name)
for (guid, seqnum), msg_log in all_msgs_log.items():
print('msg_id: ', (guid, seqnum))
print(msg_log)
print('')
def get_rcl_publish(df):
try:
rcl = df.loc[df['func'] == 'rcl_publish'].iloc[0] # shuold be unique
except ValueError as e:
print(e)
return pd.Series('false', index=['layer']) # return a dummy for easy checkup
return rcl
# @profile
def ros_msgs_trace_read(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
# msg_log['subscriber'] = msg_log['subscriber'].apply(lambda x: np.nan if x is pd.NA else x)
gb_sub = msg_log.groupby('subscriber') # How many subscribers receviced this ros message?
start = get_rcl_publish(msg_log)
if start.at['layer'] != 'rcl': # skip when the first function call is not from rcl
continue
for sub_addr, sub_log in gb_sub:
trace = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
end = sub_log.iloc[-1]
if end.at['layer'] != 'rmw': # skip when the last function call is not from rmw (eg. rosbag2)
continue
time = start['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = start['ts'] - cfg.time_base
trace['timestamp'] = time
trace['duration'] = (end['ts'] - start['ts']) * 1e3 # ms
trace['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Transmission: %s -> %s<br>Seqnum: %d" % \
(start['layer'], start['func'], end['layer'], end['func'],
start['topic_name'],
start['comm'], end['comm'],
int(start['seqnum']))
trace['unit'] = 'ms'
trace['msg_id'] = msg_id
traces.append(trace)
traces = pd.DataFrame(traces)
return traces
def ros_msgs_trace_read_ros_lat_send(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
trace = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
start = get_rcl_publish(msg_log)
end = msg_log.loc[msg_log['func'] == 'write_sample_gc'].iloc[0]
time = end['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = end['ts'] - cfg.time_base
trace['timestamp'] = time
trace['duration'] = (end['ts'] - start['ts']) * 1e3 # ms
trace['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Transmission: %s -> %s<br>Seqnum: %d" % \
(start['layer'], start['func'], end['layer'], end['func'],
start['topic_name'],
start['comm'], end['comm'],
int(start['seqnum']))
trace['unit'] = 'ms'
trace['msg_id'] = msg_id
traces.append(trace)
traces = pd.DataFrame(traces)
return traces
def ros_msgs_trace_read_os_lat_send(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
start = get_rcl_publish(msg_log)
if start.at['layer'] != 'rcl': # skip when the first function call is not from rcl
continue
all_sendSync = msg_log.loc[(msg_log['func'] == 'sendSync') | (msg_log['func'] == 'nn_xpack_send1')].copy()
all_egress = msg_log.loc[msg_log['layer'] == 'cls_egress']
for _, sendSync in all_sendSync.iterrows():
trace = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
# addr = sendSync['daddr']
port = sendSync['dport']
egress = all_egress.loc[(all_egress['dport'] == port)].iloc[0]
time = sendSync['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = sendSync['ts'] - cfg.time_base
trace['timestamp'] = time
trace['duration'] = (egress['ts'] - sendSync['ts']) * 1e3 # ms
trace['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Destination address: %s:%d" % \
(sendSync['layer'], sendSync['func'], egress['layer'], '',
start['topic_name'],
str(ipaddress.IPv4Address(socket.ntohl(int(all_egress['daddr'].unique())))),
socket.ntohs(int(port)))
trace['unit'] = 'ms'
trace['msg_id'] = msg_id
traces.append(trace)
traces = pd.DataFrame(traces)
return traces
def ros_msgs_trace_read_os_lat_recv(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
start = get_rcl_publish(msg_log)
if start.at['layer'] != 'rcl': # skip when the first function call is not from rcl
continue
all_recv = msg_log.loc[(msg_log['func'] == 'UDPResourceReceive exit') |
(msg_log['func'] == 'ddsi_udp_conn_read exit')].copy()
all_ingress = msg_log.loc[msg_log['layer'] == 'cls_ingress'].copy()
for _, ingress in all_ingress.iterrows():
trace = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
addr = ingress['daddr']
port = ingress['dport']
try:
recv = all_recv.loc[(all_recv['dport'] == port)].iloc[0]
except Exception as e:
print(str(msg_id) + " missing " + str(port))
print(e)
continue
time = ingress['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = ingress['ts'] - cfg.time_base
trace['timestamp'] = time
trace['duration'] = (recv['ts'] - ingress['ts']) * 1e3 # ms
trace['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Source address: %s:%d, Destination address: %s:%d<br>Seqnum: %d" % \
(ingress['layer'], '', recv['layer'], recv['func'], start['topic_name'],
str(ipaddress.IPv4Address(socket.ntohl(int(ingress['saddr'])))), socket.ntohs(int(ingress['sport'])),
str(ipaddress.IPv4Address(socket.ntohl(int(addr)))), socket.ntohs(int(port)),
int(ingress['seqnum']))
trace['unit'] = 'ms'
trace['msg_id'] = msg_id
traces.append(trace)
traces = | pd.DataFrame(traces) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from pkganalysis.submission import (SamplSubmission, IgnoredSubmissionError)
from pkganalysis.RMSD_calculator_HG import RMSD_calculator, RMSD_calculator_protein
# =============================================================================
# CONSTANTS
# =============================================================================
# Paths to input data.
from Analysis.Scripts.pkganalysis.submission import load_submissions
STAGE_2_SUBMISSIONS_DIR_PATH = '../Submissions-stage2/'
EXPERIMENTAL_DATA_FILE_PATH = '../../experimental-data/stage-2/'
USER_MAP_FILE_PATH = '../Analysis-outputs-stage2/SAMPL7-user-map-PL-stage2.csv'
class Stage2Submission(SamplSubmission):
"""A submission for the main host-guest challenge.
Parameters
----------
file_path : str
The path to the submission file.
Raises
------
IgnoredSubmission
If the submission ID is among the ignored submissions.
"""
# The IDs of the submissions used for testing the validation. Should be strings of submission IDs
TEST_SUBMISSION_SIDS = {}
# The IDs of submissions for reference calculations. Should be strings of submission IDs
REF_SUBMISSION_SIDS = []
# Section of the submission file.
SECTIONS = {'Participant name', 'Participant organization', 'Name', 'Software', 'Method', 'Category', 'Ranked'}
# Sections in CSV format with kwargs to pass to pandas.read_csv().
CSV_SECTIONS = {}
RENAME_METHODS = {}
def __init__(self, file_path, user_map):
super().__init__(file_path, user_map)
file_name = os.path.splitext(os.path.basename(file_path))[0]
self.file_name = file_name
output_directory = file_path.replace('.tar', '').replace('.gz', '')
subdir = os.listdir(output_directory)[0]
new_path = str(output_directory + '/' + subdir + '/PHIP2_2-description.txt')
self.file_path = new_path
#print('updated path is:' + str(self.file_path))
#TO DO: Not sure if I'm going to use the immediately following for anything
#file_name_simple = file_name.replace('_','-')
#file_data = file_name_simple.split('-')
#self.host_name = file_data[0]
# Load predictions.
sections = self._load_sections(self.file_path) # From parent-class.
#No prediction section yet (will have to load file path to sdf and pdb files
#self.data = sections['Predictions'] # This is a list
#self.data = pd.DataFrame(data=self.data) # Now a DataFrame
try:
self.name = self.RENAME_METHODS[sections['Name'][0]]
except KeyError:
self.name = sections['Name'][0]
# Store participant name, organization, method category
self.participant = sections['Participant name'][0].strip()
self.category = sections['Category'][0].strip()
self.organization = sections['Participant organization'][0].strip()
self.ranked = sections['Ranked'][0].strip() =='True'
# Check if this is a test submission.
if self.sid in self.TEST_SUBMISSION_SIDS:
raise IgnoredSubmissionError('This submission has been used for tests.')
# Check if this is a reference submission
self.reference_submission = False
if self.sid in self.REF_SUBMISSION_SIDS:
self.reference_submission = True
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
class Stage2SubmissionCollection:
"""A collection of Stage 2 submissions."""
def __init__(self, submissions, input_data, apo_structure, pdbs_directory, stage2_submission_collection_file_path):
self.apo_structure = apo_structure
if os.path.isfile(stage2_submission_collection_file_path):
self.data = pd.read_csv(stage2_submission_collection_file_path)
else:
#Generate submission collection from scratch
collection = pd.DataFrame(columns=['Site', 'Fragment', 'Smile', 'PDB', 'RMSD exp-apo',
'Pose 1 Ligand','RMSD pose 1 ligand', 'Pose 1 Protein', 'RMSD pose 1 exp-pred', 'RMSD pose 1 apo-pred',
'Pose 2 Ligand','RMSD pose 2 ligand', 'Pose 2 Protein', 'RMSD pose 2 exp-pred', 'RMSD pose 2 apo-pred',
'Pose 3 Ligand','RMSD pose 3 ligand', 'Pose 3 Protein', 'RMSD pose 3 exp-pred', 'RMSD pose 3 apo-pred',
'Pose 4 Ligand','RMSD pose 4 ligand', 'Pose 4 Protein', 'RMSD pose 4 exp-pred', 'RMSD pose 4 apo-pred',
'Pose 5 Ligand','RMSD pose 5 ligand', 'Pose 5 Protein', 'RMSD pose 5 exp-pred', 'RMSD pose 5 apo-pred'])
for file in os.listdir(input_data):
if file.endswith('.csv'):
csv_file_path = os.path.join(input_data, file)
df = pd.read_csv(csv_file_path, names=['Fragment', 'Smile'])
df.insert(0, 'Site', file[0:6])
for pdb in os.listdir(pdbs_directory):
if pdb.endswith('.pdb'):
fragment = pdb.split('-')[0]
pdb_file_path = os.path.join(pdbs_directory, pdb)
df.loc[df['Fragment'] == fragment, 'PDB'] = pdb_file_path
collection = collection.append(df, ignore_index=True, sort=False)
submission_collection_list = []
for submission in submissions:
submission_collection_df = collection.copy()
submission_collection_df.insert(0, 'Ranked', submission.ranked)
submission_collection_df.insert(0, 'Name', submission.name)
submission_collection_df.insert(0, 'Category', submission.category)
submission_collection_df.insert(0, 'Organization', submission.organization)
submission_collection_df.insert(0, 'Participant', submission.participant)
submission_collection_df.insert(0, 'SID', submission.sid)
dir_path = submission.file_path.replace('PHIP2_2-description.txt', '')
for file in os.listdir(dir_path):
file_path = os.path.join(dir_path, file)
identifier = file.replace('.sdf', '').replace('.mol2', '').replace('.pdb', '')
split = identifier.split('-')
if file.endswith('.sdf') or file.endswith('.mol2'):
fragment = split[1]
pose_id = int(split[2])
if pose_id == 1:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 1 Ligand'] = file_path
if pose_id == 2:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 2 Ligand'] = file_path
if pose_id == 3:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 3 Ligand'] = file_path
if pose_id == 4:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 4 Ligand'] = file_path
if pose_id == 5:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 5 Ligand'] = file_path
if file.endswith('.pdb'):
fragment = split[1]
pose_id = int(split[2])
if pose_id == 1:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 1 Protein'] = file_path
if pose_id == 2:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 2 Protein'] = file_path
if pose_id == 3:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 3 Protein'] = file_path
if pose_id == 4:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 4 Protein'] = file_path
if pose_id == 5:
submission_collection_df.loc[submission_collection_df['Fragment'] == fragment, 'Pose 5 Protein'] = file_path
submission_collection_list.append(submission_collection_df)
submission_collection = | pd.concat(submission_collection_list, ignore_index=True, sort=False) | pandas.concat |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
)
@pytest.mark.parametrize(
"other",
[
[[1, 2], [10, 100]],
[[1, 2, 10, 100, 0.1, 0.2, 0.0021]],
[[]],
[[], [], [], []],
[[0.23, 0.00023, -10.00, 100, 200, 1000232, 1232.32323]],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
ps = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Can only append a Series if ignore_index=True "
"or if the Series has a name",
):
df.append(ps)
def test_cudf_arrow_array_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Table via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Table, consider "
"using .to_arrow()",
):
df.__arrow_array__()
sr = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
sr = cudf.Series(["a", "b", "c"])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_dataframe_sample_basic(n, frac, replace, axis):
# as we currently don't support column with same name
if axis == 1 and replace:
return
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"float": [0.05, 0.2, 0.3, 0.2, 0.25],
"int": [1, 3, 5, 4, 2],
},
index=[1, 2, 3, 4, 5],
)
df = cudf.DataFrame.from_pandas(pdf)
random_state = 0
try:
pout = pdf.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
except BaseException:
assert_exceptions_equal(
lfunc=pdf.sample,
rfunc=df.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
)
else:
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
assert pout.shape == gout.shape
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("random_state", [1, np.random.mtrand.RandomState(10)])
def test_dataframe_reproducibility(replace, random_state):
df = cudf.DataFrame({"a": cupy.arange(0, 1024)})
expected = df.sample(1024, replace=replace, random_state=random_state)
out = df.sample(1024, replace=replace, random_state=random_state)
assert_eq(expected, out)
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
def test_series_sample_basic(n, frac, replace):
psr = pd.Series([1, 2, 3, 4, 5])
sr = cudf.Series.from_pandas(psr)
random_state = 0
try:
pout = psr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
except BaseException:
assert_exceptions_equal(
lfunc=psr.sample,
rfunc=sr.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
)
else:
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
assert pout.shape == gout.shape
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_empty(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.empty, gdf.empty)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_size(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.size, gdf.size)
@pytest.mark.parametrize(
"ps",
[
pd.Series(dtype="float64"),
pd.Series(index=[100, 10, 1, 0], dtype="float64"),
pd.Series([], dtype="float64"),
pd.Series(["a", "b", "c", "d"]),
pd.Series(["a", "b", "c", "d"], index=[0, 1, 10, 11]),
],
)
def test_series_empty(ps):
ps = ps
gs = cudf.from_pandas(ps)
assert_eq(ps.empty, gs.empty)
@pytest.mark.parametrize(
"data",
[
[],
[1],
{"a": [10, 11, 12]},
{
"a": [10, 11, 12],
"another column name": [12, 22, 34],
"xyz": [0, 10, 11],
},
],
)
@pytest.mark.parametrize("columns", [["a"], ["another column name"], None])
def test_dataframe_init_with_columns(data, columns):
pdf = pd.DataFrame(data, columns=columns)
gdf = cudf.DataFrame(data, columns=columns)
assert_eq(
pdf,
gdf,
check_index_type=False if len(pdf.index) == 0 else True,
check_dtype=False if pdf.empty and len(pdf.columns) else True,
)
@pytest.mark.parametrize(
"data, ignore_dtype",
[
([pd.Series([1, 2, 3])], False),
([pd.Series(index=[1, 2, 3], dtype="float64")], False),
([pd.Series(name="empty series name", dtype="float64")], False),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
),
([pd.Series([1, 2, 3], name="hi")] * 10, False),
([pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10, False),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], name="abc", dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([1, -100, 200, -399, 400], name="abc"),
pd.Series([111, 222, 333], index=[10, 11, 12]),
],
False,
),
],
)
@pytest.mark.parametrize(
"columns", [None, ["0"], [0], ["abc"], [144, 13], [2, 1, 0]]
)
def test_dataframe_init_from_series_list(data, ignore_dtype, columns):
gd_data = [cudf.from_pandas(obj) for obj in data]
expected = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(gd_data, columns=columns)
if ignore_dtype:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data, ignore_dtype, index",
[
([pd.Series([1, 2, 3])], False, ["a", "b", "c"]),
([pd.Series(index=[1, 2, 3], dtype="float64")], False, ["a", "b"]),
(
[pd.Series(name="empty series name", dtype="float64")],
False,
["index1"],
),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
["0", "2", "1"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
["_", "+", "*"],
),
([pd.Series([1, 2, 3], name="hi")] * 10, False, ["mean"] * 10),
(
[pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10,
False,
["abc"] * 10,
),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
["set_index_a", "set_index_b"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
["a", "b", "c"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], name="abc", dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
["a", "v", "z"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([1, -100, 200, -399, 400], name="abc"),
pd.Series([111, 222, 333], index=[10, 11, 12]),
],
False,
["a", "v", "z"],
),
],
)
@pytest.mark.parametrize(
"columns", [None, ["0"], [0], ["abc"], [144, 13], [2, 1, 0]]
)
def test_dataframe_init_from_series_list_with_index(
data, ignore_dtype, index, columns
):
gd_data = [cudf.from_pandas(obj) for obj in data]
expected = pd.DataFrame(data, columns=columns, index=index)
actual = cudf.DataFrame(gd_data, columns=columns, index=index)
if ignore_dtype:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data, index",
[
([pd.Series([1, 2]), pd.Series([1, 2])], ["a", "b", "c"]),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
["_", "+"],
),
([pd.Series([1, 2, 3], name="hi")] * 10, ["mean"] * 9),
],
)
def test_dataframe_init_from_series_list_with_index_error(data, index):
gd_data = [cudf.from_pandas(obj) for obj in data]
assert_exceptions_equal(
pd.DataFrame,
cudf.DataFrame,
([data], {"index": index}),
([gd_data], {"index": index}),
)
@pytest.mark.parametrize(
"data",
[
[pd.Series([1, 2, 3], index=["a", "a", "a"])],
[pd.Series([1, 2, 3], index=["a", "a", "a"])] * 4,
[
pd.Series([1, 2, 3], index=["a", "b", "a"]),
pd.Series([1, 2, 3], index=["b", "b", "a"]),
],
[
pd.Series([1, 2, 3], index=["a", "b", "z"]),
pd.Series([1, 2, 3], index=["u", "b", "a"]),
pd.Series([1, 2, 3], index=["u", "b", "u"]),
],
],
)
def test_dataframe_init_from_series_list_duplicate_index_error(data):
gd_data = [cudf.from_pandas(obj) for obj in data]
assert_exceptions_equal(
lfunc=pd.DataFrame,
rfunc=cudf.DataFrame,
lfunc_args_and_kwargs=([], {"data": data}),
rfunc_args_and_kwargs=([], {"data": gd_data}),
check_exception_type=False,
)
def test_dataframe_iterrows_itertuples():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
with pytest.raises(
TypeError,
match=re.escape(
"cuDF does not support iteration of DataFrame "
"via itertuples. Consider using "
"`.to_pandas().itertuples()` "
"if you wish to iterate over namedtuples."
),
):
df.itertuples()
with pytest.raises(
TypeError,
match=re.escape(
"cuDF does not support iteration of DataFrame "
"via iterrows. Consider using "
"`.to_pandas().iterrows()` "
"if you wish to iterate over each row."
),
):
df.iterrows()
@pytest.mark.parametrize(
"df",
[
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": [10, 22, 33],
"c": [0.3234, 0.23432, 0.0],
"d": ["hello", "world", "hello"],
}
),
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": ["hello", "world", "hello"],
"c": [0.3234, 0.23432, 0.0],
}
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
"category_data": cudf.Series(
["a", "a", "b"], dtype="category"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
],
)
@pytest.mark.parametrize(
"include",
[None, "all", ["object"], ["int"], ["object", "int", "category"]],
)
def test_describe_misc_include(df, include):
pdf = df.to_pandas()
expected = pdf.describe(include=include, datetime_is_numeric=True)
actual = df.describe(include=include, datetime_is_numeric=True)
for col in expected.columns:
if expected[col].dtype == np.dtype("object"):
expected[col] = expected[col].fillna(-1).astype("str")
actual[col] = actual[col].fillna(-1).astype("str")
assert_eq(expected, actual)
@pytest.mark.parametrize(
"df",
[
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": [10, 22, 33],
"c": [0.3234, 0.23432, 0.0],
"d": ["hello", "world", "hello"],
}
),
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": ["hello", "world", "hello"],
"c": [0.3234, 0.23432, 0.0],
}
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
"category_data": cudf.Series(
["a", "a", "b"], dtype="category"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
],
)
@pytest.mark.parametrize(
"exclude", [None, ["object"], ["int"], ["object", "int", "category"]]
)
def test_describe_misc_exclude(df, exclude):
pdf = df.to_pandas()
expected = pdf.describe(exclude=exclude, datetime_is_numeric=True)
actual = df.describe(exclude=exclude, datetime_is_numeric=True)
for col in expected.columns:
if expected[col].dtype == np.dtype("object"):
expected[col] = expected[col].fillna(-1).astype("str")
actual[col] = actual[col].fillna(-1).astype("str")
assert_eq(expected, actual)
@pytest.mark.parametrize(
"df",
[
cudf.DataFrame({"a": [1, 2, 3]}),
cudf.DataFrame(
{"a": [1, 2, 3], "b": ["a", "z", "c"]}, index=["a", "z", "x"]
),
cudf.DataFrame(
{
"a": [1, 2, 3, None, 2, 1, None],
"b": ["a", "z", "c", "a", "v", "z", "z"],
}
),
cudf.DataFrame({"a": [], "b": []}),
cudf.DataFrame({"a": [None, None], "b": [None, None]}),
cudf.DataFrame(
{
"a": ["hello", "world", "rapids", "ai", "nvidia"],
"b": cudf.Series([1, 21, 21, 11, 11], dtype="timedelta64[s]"),
}
),
cudf.DataFrame(
{
"a": ["hello", None, "world", "rapids", None, "ai", "nvidia"],
"b": cudf.Series(
[1, 21, None, 11, None, 11, None], dtype="datetime64[s]"
),
}
),
],
)
@pytest.mark.parametrize("numeric_only", [True, False])
@pytest.mark.parametrize("dropna", [True, False])
def test_dataframe_mode(df, numeric_only, dropna):
pdf = df.to_pandas()
expected = pdf.mode(numeric_only=numeric_only, dropna=dropna)
actual = df.mode(numeric_only=numeric_only, dropna=dropna)
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize("lhs, rhs", [("a", "a"), ("a", "b"), (1, 1.0)])
def test_equals_names(lhs, rhs):
lhs = cudf.DataFrame({lhs: [1, 2]})
rhs = cudf.DataFrame({rhs: [1, 2]})
got = lhs.equals(rhs)
expect = lhs.to_pandas().equals(rhs.to_pandas())
assert_eq(expect, got)
def test_equals_dtypes():
lhs = cudf.DataFrame({"a": [1, 2.0]})
rhs = cudf.DataFrame({"a": [1, 2]})
got = lhs.equals(rhs)
expect = lhs.to_pandas().equals(rhs.to_pandas())
assert_eq(expect, got)
@pytest.mark.parametrize(
"df1",
[
pd.DataFrame({"a": [10, 11, 12]}, index=["a", "b", "z"]),
pd.DataFrame({"z": ["a"]}),
],
)
@pytest.mark.parametrize(
"df2",
[
pd.DataFrame(),
pd.DataFrame({"a": ["a", "a", "c", "z", "A"], "z": [1, 2, 3, 4, 5]}),
],
)
@pytest.mark.parametrize(
"op",
[
operator.eq,
operator.ne,
operator.lt,
operator.gt,
operator.le,
operator.ge,
],
)
def test_dataframe_error_equality(df1, df2, op):
gdf1 = cudf.from_pandas(df1)
gdf2 = cudf.from_pandas(df2)
assert_exceptions_equal(op, op, ([df1, df2],), ([gdf1, gdf2],))
@pytest.mark.parametrize(
"df,expected_pdf",
[
(
cudf.DataFrame(
{
"a": cudf.Series([1, 2, None, 3], dtype="uint8"),
"b": cudf.Series([23, None, None, 32], dtype="uint16"),
}
),
pd.DataFrame(
{
"a": pd.Series([1, 2, None, 3], dtype=pd.UInt8Dtype()),
"b": pd.Series(
[23, None, None, 32], dtype=pd.UInt16Dtype()
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series([None, 123, None, 1], dtype="uint32"),
"b": cudf.Series(
[234, 2323, 23432, None, None, 224], dtype="uint64"
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[None, 123, None, 1], dtype=pd.UInt32Dtype()
),
"b": pd.Series(
[234, 2323, 23432, None, None, 224],
dtype=pd.UInt64Dtype(),
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series(
[-10, 1, None, -1, None, 3], dtype="int8"
),
"b": cudf.Series(
[111, None, 222, None, 13], dtype="int16"
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[-10, 1, None, -1, None, 3], dtype=pd.Int8Dtype()
),
"b": pd.Series(
[111, None, 222, None, 13], dtype= | pd.Int16Dtype() | pandas.Int16Dtype |
# USDA_CoA_Cropland.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Functions used to import and parse USDA Census of Ag Cropland data
in NAICS format
"""
import json
import numpy as np
import pandas as pd
from flowsa.location import US_FIPS, abbrev_us_state
from flowsa.common import WITHDRAWN_KEYWORD, \
fba_wsec_default_grouping_fields
from flowsa.flowbyfunctions import assign_fips_location_system, \
equally_allocate_suppressed_parent_to_child_naics
def CoA_Cropland_NAICS_URL_helper(*, build_url, config, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
# initiate url list for coa cropland data
urls = []
# call on state acronyms from common.py (and remove entry for DC)
state_abbrevs = abbrev_us_state
state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != "DC"}
# replace "__aggLevel__" in build_url to create three urls
for x in config['agg_levels']:
# at national level, remove the text string calling for state acronyms
if x == 'NATIONAL':
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("&state_alpha=__stateAlpha__", "")
urls.append(url)
else:
# substitute in state acronyms for state and county url calls
for z in state_abbrevs:
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__stateAlpha__", z)
urls.append(url)
return urls
def coa_cropland_NAICS_call(*, resp, **_):
"""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param resp: df, response from url call
:return: pandas dataframe of original source data
"""
cropland_json = json.loads(resp.text)
df_cropland = | pd.DataFrame(data=cropland_json["data"]) | pandas.DataFrame |
"""
Single stock returns - exponentially weighted moving average model
"""
import logging
import numpy as np
import pandas as pd
import seaborn as sns
from .model import Model, SamplingFrequency
from sklearn import linear_model, metrics
__all__ = ['SingleStockEWM']
class SingleStockEWM(Model):
def train(self, force=False):
"""
Training function for model
:return:
"""
return self._fetch_base_data(force)
def predict(self):
"""
Prediction function for model, for out of sample historical test set
:return: n/a (all data stored in self.predicted)
"""
# Input validation
if 'halflife' not in self.cfg or 'min_periods' not in self.cfg:
raise ValueError('SingleStockEWM: Model config requires both min_periods (periods backwards) and a '
'halflife (decay of historical values, periods to half original value) to run.')
# ## Load up model configs
halflife = self.cfg['halflife']
min_periods = self.cfg['min_periods']
# ## Estimates
# Returns
realized_returns = self.get('returns', data_type='realized', sampling_freq=self.cfg['returns']['sampling_freq'])
realized_volumes = self.get('volumes', data_type='realized', sampling_freq=self.cfg['returns']['sampling_freq'])
realized_sigmas = self.get('sigmas', data_type='realized', sampling_freq=self.cfg['returns']['sampling_freq'])
logging.info("Typical variance of returns: %g" % realized_returns.var().mean())
self.set('returns', realized_returns.ewm(halflife=halflife, min_periods=min_periods).mean().shift(1).dropna(),
'predicted')
# Volumes & sigmas
self.set('volumes', realized_volumes.ewm(halflife=halflife, min_periods=min_periods).mean().shift(1).dropna(),
'predicted')
self.set('sigmas', realized_sigmas.shift(1).dropna(), 'predicted')
# Covariance
if 'covariance' not in self.cfg:
raise NotImplemented('Covariance section needs to be defined under SS EWM model config.')
elif self.cfg['covariance']['method'] == 'SS':
self.set('covariance', realized_returns.ewm(halflife=halflife, min_periods=min_periods).cov().
shift(realized_returns.shape[1]).dropna(), 'predicted', self.cfg['covariance']['sampling_freq'])
elif self.cfg['covariance']['method'] == 'FF5':
# Fetch data
ff_returns = self.get('ff_returns', 'realized', SamplingFrequency.DAY)
realized_returns = self.get('returns', data_type='realized', sampling_freq=SamplingFrequency.DAY)
update = self.cfg['covariance']['update'] if 'update' in self.cfg['covariance'] else 'monthly'
if update == 'quarterly':
update_freq = '3M'
elif update == 'monthly':
update_freq = 'M'
elif update == 'biweekly':
update_freq = '2W'
elif update == 'weekly':
update_freq = 'W'
else:
raise NotImplemented('Update freq under covariance only supports: month, biweekly, weekly.')
# Generate computation frequency
first_days = pd.date_range(start=realized_returns.index[max(self.cfg['min_periods'] + 1, 90)],
end=realized_returns.index[-1],
freq=update_freq)
days_back = self.cfg['covariance']['train_days'] if 'train_days' in self.cfg['covariance'] else 90
# Use ML regression to obtain factor loadings. Then factor covariance and stock idiosyncratic variances
exposures, factor_sigma, idyos = {}, {}, {}
# Every first day in each biweekly period
cov_rscore = []
for day in first_days:
logging.info('Running for {}'.format(day.strftime('%Y %b %d')))
# Grab asset returns for preceding train_days (90 by default)
used_returns = realized_returns.loc[(realized_returns.index < day) &
(realized_returns.index >= day - pd.Timedelta(str(days_back) + " days"))]
used_ff_returns = ff_returns.loc[ff_returns.index.isin(used_returns.index)].iloc[:, :-1]
# Multi linear regression to extract factor loadings
mlr = linear_model.LinearRegression()
mlr.fit(used_ff_returns, used_returns)
mlr.predict(used_ff_returns)
# Track performance of FF fit
# rscore = metrics.r2_score(used_ff_returns, used_returns)
cov_rscore.append(0)
#p rint('predict_cov_FF5: mlr score = {s}'.format(s=rscore))
# Factor covariance - on FF returns
factor_sigma[day] = used_ff_returns.cov().fillna(0)
# Exposures - factor loadings obtained from multi linear regression coefficients of stock on FF factors
exposures[day] = pd.DataFrame(data=mlr.coef_, index=realized_returns.columns).fillna(0)
# Stock idiosyncratic variances - stock var minus FF var, ensure >=0
idyos[day] = pd.Series(np.diag(used_returns.cov().values -
exposures[day].values @ factor_sigma[day].values @ exposures[
day].values.T),
index=realized_returns.columns).fillna(method='ffill')
idyos[day][idyos[day] < 0] = 0
self.set('factor_sigma', pd.concat(factor_sigma.values(), axis=0, keys=factor_sigma.keys()), 'predicted')
self.set('exposures', pd.concat(exposures.values(), axis=0, keys=exposures.keys()), 'predicted')
self.set('idyos', | pd.DataFrame(idyos) | pandas.DataFrame |
import pandas as pd
import pandas_datareader as web
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
from keras.layers import LSTM
from keras.layers import Dense
from keras.models import Sequential
import math
import matplotlib.pyplot as mtlplt
import yfinance as yf
def create_df(symbol, start, end):
data = yf.download(symbol, start = start, end = end)
data_frame = | pd.DataFrame(data) | pandas.DataFrame |
import os
import sys
import multiprocessing as mp
import string
import platform
import shutil
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
import calendar
import pyemu
import flopy
# some global config for plotting
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
abet = string.ascii_uppercase
# some global config for path/directory structure
old_h_dir = os.path.join("..", "ver")
h_nam_file = "eaa_ver.nam"
h_dir = "history"
h_start_datetime = "1-1-2001"
h_end_datetime = "12-31-2015"
old_s_dir = os.path.join("..", "pred")
s_dir = "scenario"
s_nam_file = "eaa_pred.nam"
# history and scenarion simulation start datetimes
s_start_datetime = "1-1-1947"
s_end_datetime = "12-31-1958"
# files with history and scenario observation locations and states
h_hds_file = os.path.join("_data", "reformatted_head_obs.smp")
h_drn_file = os.path.join("_data", "springflow_obs.smp")
h_crd_file = os.path.join("_data", "head_obs.crd")
s_hds_file = os.path.join("_data", "pred_head_obs.smp")
s_drn_file = os.path.join("_data", "pred_springflow_obs.smp")
s_crd_file = os.path.join("_data", "pred_head_obs.crd")
# value of dry cells
hdry = -1.0e+20
# platform-specific binary information
exe_name = "mf2005"
ies_name = "pestpp-ies"
if "window" in platform.platform().lower():
bin_path = os.path.join("bin", "win")
exe_name = exe_name + ".exe"
ies_name = ies_name + ".exe"
elif "darwin" in platform.platform().lower():
bin_path = os.path.join("bin", "mac")
else:
bin_path = os.path.join("bin", "linux")
# the numeric IDs of J-17 and J-27
j17_id = 6837203
j27_id = 6950302
def _setup_model(old_dir, new_dir, start_datetime, nam_file, run=False):
"""load an existing model (either history or scenario) and configure it for
PEST interface construction
Args:
old_dir (str): directory location where the original model resides
new_dir (str): directory location where the new model files will be written
start_datetime (str): string rep of model starting datetime
nam_file (str): MODFLOW-2005 nam file
run (bool): flag to run the model once it is written to new_dir. Default is False
"""
# load the existing model and set some attributes
m = flopy.modflow.Modflow.load(nam_file, model_ws=old_dir, check=False,
verbose=True, forgive=False)
m.start_datetime = start_datetime
m.lpf.hdry = hdry
m.bas6.hnoflo = hdry
# change the workspace to new_dir
m.change_model_ws(new_dir, reset_external=True)
# set the external path so that arrays and lists are outside of the
# terrible MODFLOW file formats
m.external_path = "."
# write the inputs
m.write_input()
# run?
if run:
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join(new_dir, exe_name))
pyemu.os_utils.run("{0} {1}".format(exe_name, nam_file), cwd=new_dir)
def _rectify_wel(model_ws, nam_file, run=True):
"""rectify the stress period WEL file entries so that every
stress period has the same entries (filling missing wells with
"dummy" entries with zero pumping)
Args:
model_ws (str): model workspace
nam_file (str): MODFLOW-2005 nam file
run (bool): flag to run model once the WEL file has been rectified.
Default is True.
"""
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,
verbose=True, forgive=False)
# get the current WEL file datasets
spd = m.wel.stress_period_data
df_dict = {}
all_kij = set()
# run thru all stress periods to get the union of well locations
for kper in range(m.nper):
ra = spd[kper]
df = pd.DataFrame.from_records(ra)
df.loc[:, "kij"] = df.apply(lambda x: (x.k, x.i, x.j), axis=1)
df.loc[:, "kij_str"] = df.kij.apply(lambda x: "{0:01.0f}_{1:03.0f}_{2:03.0f}".format(*x))
df.index = df.kij_str
all_kij.update(set(df.kij_str.tolist()))
print(kper)
df_dict[kper] = df
# work up fast-lookup containers for well location indices
new_index = list(all_kij)
new_k = {s: int(s.split('_')[0]) for s in new_index}
new_i = {s: int(s.split('_')[1]) for s in new_index}
new_j = {s: int(s.split('_')[2]) for s in new_index}
new_index.sort()
# process each stress period
new_spd = {}
for kper, df in df_dict.items():
# reindex with the full kij locations index
df = df.reindex(new_index)
# map the new kijs to the old kijs
for f, d in zip(["k", "i", "j"], [new_k, new_i, new_j]):
isna = df.loc[:, f].isna()
df.loc[isna, f] = [d[kij] for kij in df.loc[isna, :].index.values]
# fill the nans with 0.0
isna = df.flux.isna()
df.loc[isna, "flux"] = 0.0
# deal with the platform numpy int casting issue
if "window" in platform.platform():
df.loc[:, "i"] = df.i.astype(np.int32)
df.loc[:, "j"] = df.j.astype(np.int32)
df.loc[:, "k"] = df.k.astype(np.int32)
else:
df.loc[:, "i"] = df.i.astype(np.int)
df.loc[:, "j"] = df.j.astype(np.int)
df.loc[:, "k"] = df.k.astype(np.int)
spd[kper] = df.loc[:, ["k", "i", "j", "flux"]].to_records(index=False)
# create a new WEL package and replace the old one
flopy.modflow.ModflowWel(m, stress_period_data=spd, ipakcb=m.wel.ipakcb)
# write to a new model_ws with a "_wel" suffix
m.change_model_ws("{0}_wel".format(model_ws))
m.external_path = '.'
m.write_input()
# run?
if run:
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join("{0}_wel".format(model_ws), exe_name))
pyemu.os_utils.run("{0} {1}".format(exe_name, nam_file), cwd="{0}_wel".format(model_ws))
# just to make sure the model ran
new_lst = flopy.utils.MfListBudget(os.path.join("{0}_wel".format(model_ws), nam_file.replace(".nam", ".list")))
def build_rch_zone_array(model_ws, nam_file, plot=False):
"""build a recharge zone integer array for zone-based parameters
using unique values in the in recharge arrays
Args:
model_ws (str): model workspace
nam_file (str): MODFLOW-2005 nam file
plot (bool): flag to plot the zone array. Default is False
"""
m = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, load_only=["rch"], check=False,
verbose=True, forvive=False)
arr = m.rch.rech[0].array
full_arr = m.rch.rech.array
mn = full_arr.mean(axis=0)[0, :, :]
mn_u, mn_c = np.unique(mn, return_counts=True)
zn_arr = np.zeros_like(arr, dtype=np.int)
for i, u_val in enumerate(mn_u):
# this contional makes sure we keep zeros as zero in the zone array
if u_val == 0.0:
continue
zn_arr[mn == u_val] = i
np.savetxt(os.path.join("_data", "rch_zn_arr.dat"), zn_arr, fmt="%3d")
if plot:
zn_arr = zn_arr.astype(np.float)
zn_arr[zn_arr == 0] = np.NaN
cb = plt.imshow(zn_arr)
plt.colorbar(cb)
plt.show()
def _setup_pst(org_model_ws, new_model_ws, nam_file):
"""construct the PEST interface, set parameter bounds and
generate the prior ensemble
Args:
org_model_ws (str): original model workspace
new_model_ws (str): new model workspace/directory where the
PEST interface will be constructed
nam_file (str): MODFLOW-2005 nam file
"""
# make sure the model simulated heads file exists - need this for observations
if not os.path.exists(os.path.join(org_model_ws, nam_file.replace(".nam", ".hds"))):
raise Exception("need to call _setup_model()")
# load the model from org_model_ws
m= flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws,
load_only=["dis"], check=False,
verbose=True, forgive=False)
# load the recharge zone array
rch_zn_arr = np.loadtxt(os.path.join("_data", "rch_zn_arr.dat"), dtype=np.int)
# array-based model inputs to parameterize by layer (zero-based)
props = [["lpf.hk", 0], ["lpf.ss", 0], ["lpf.sy", 0], ["bas6.strt", 0]]
# copy to constant (global props)
const_props = props.copy()
# fill a zone-based array inputs container with recharge
# zone pars for each stress period
zone_props = []
zone_props.extend([["rch.rech", kper] for kper in range(m.nper)])
# extend the global parameter container with recharge for each stress period
const_props.extend([["rch.rech", kper] for kper in range(m.nper)])
# include the final simulated groundwater level in every active
# model cell as an "observation" in PEST interface
hds_kperk = [[m.nper - 1, 0]]
# parameterize WEL flux and DRN cond spatially (one par for each entry)
spatial_bc_props = [["wel.flux", 0], ["drn.cond", 0]]
# parameterize WEL flux with a single global multiplier for ecah stress period
temporal_bc_props = [["wel.flux", kper] for kper in range(m.nper)]
#create the pest interface...
ph = pyemu.helpers.PstFromFlopyModel(nam_file, org_model_ws=org_model_ws, new_model_ws=new_model_ws,
grid_props=props,
hds_kperk=hds_kperk, zone_props=zone_props, hfb_pars=True,
remove_existing=True, build_prior=False, k_zone_dict={0: rch_zn_arr},
spatial_bc_props=spatial_bc_props, temporal_bc_props=temporal_bc_props,
model_exe_name=exe_name, pp_props=props, pp_space=30, const_props=const_props)
# set the parameter bounds to Edwards-based physically-plausible values
_set_par_bounds(ph.pst, nam_file)
# geostatistcal draws from the prior
pe = ph.draw(num_reals=300, use_specsim=True)
#add the control file initial values as a realization
pe.add_base()
# enforce parameter bounds on the ensemble
pe.enforce()
# save the ensemble to compressed (PEST extended binary) format
pe.to_binary(os.path.join(new_model_ws, "prior.jcb"))
# save the control file
ph.pst.write(os.path.join(new_model_ws, nam_file.replace(".nam", ".pst")))
# read the array parameter multiplier config file and set a hard upper bound
# on specific yield
df = pd.read_csv(os.path.join(new_model_ws, "arr_pars.csv"))
df.loc[:, "upper_bound"] = np.NaN
df.loc[:, "lower_bound"] = np.NaN
df.loc[df.org_file.apply(lambda x: "sy_" in x), "upper_bound"] = 0.25
df.to_csv(os.path.join(new_model_ws, "arr_pars.csv"))
# put the MODFLOW-2005 and PESTPP-IES binaries in the new_model_ws
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join(new_model_ws, exe_name))
shutil.copy2(os.path.join(bin_path, ies_name), os.path.join(new_model_ws, ies_name))
def _set_par_bounds(pst, nam_file):
"""set the parameter bounds to expert-knowledge-based
ranges
Args:
pst (pyemu.Pst): PEST control file instance
nam_file (str): MODFLOW-2005 nam file
"""
par = pst.parameter_data
# special case for WEL flux pars: more recent time has metering, so less uncertainty
names = par.loc[par.pargp.apply(lambda x: "welflux" in x), "parnme"]
if nam_file == h_nam_file:
par.loc[names, "parlbnd"] = 0.9
par.loc[names, "parubnd"] = 1.1
else:
par.loc[names, "parlbnd"] = 0.7
par.loc[names, "parubnd"] = 1.3
# DRN conductance
names = par.loc[par.pargp.apply(lambda x: "drncond" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.5
par.loc[names, "parubnd"] = 1.5
# initial conditions
names = par.loc[par.pargp.apply(lambda x: "strt" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.9
par.loc[names, "parubnd"] = 1.1
# recharge
names = par.loc[par.pargp.apply(lambda x: "rech" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.8
par.loc[names, "parubnd"] = 1.2
# HK
names = par.loc[par.pargp.apply(lambda x: "hk" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.01
par.loc[names, "parubnd"] = 100
def _add_smp_obs_to_pst(org_model_ws, new_model_ws, pst_name, nam_file, hds_crd_file):
"""add observations to the control file for the locations where groundwater levels
have been measured. The actual value of the observations will be set elsewhere
Args:
org_model_ws (str): original model workspace
new_model_ws (str): new model workspace
pst_name (str): PEST control file name
nam_file (str): MODFLOW-2005 nam file
hds_crd_file (str): PEST-style coordinate file that has been processed
to include k,i,j indices
"""
# make sure the control file exists
pst_name = os.path.join(new_model_ws, pst_name)
assert os.path.exists(pst_name)
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=new_model_ws,
load_only=["dis"], check=False,
forgive=False)
# load the control file
pst = pyemu.Pst(pst_name)
# load GW level location dataframe
crd_df = pd.read_csv(hds_crd_file + ".csv")
#load DRN location dataframe
drn_df = pd.read_csv(os.path.join("_data", "DRN_dict.csv"), delim_whitespace=True,
header=None, names=["name", "k", "i", "j"])
# build a dict of name-index location for DRN locations
kij_dict = {n: [0, i, j] for n, i, j in zip(drn_df.name, drn_df.i, drn_df.j)}
# the name of the DRN budget file
cbd_file = nam_file.replace(".nam", ".cbd")
# get one from the org model workspace and update the path to it
shutil.copy2(os.path.join(org_model_ws, cbd_file), os.path.join(new_model_ws, cbd_file))
cbd_file = os.path.join(new_model_ws, cbd_file)
# setup the forward run DRN budget post processor
prec = "double"
if "win" not in platform.platform().lower(): # not win or darwin
prec = "singl"
cbd_frun, cbd_df = pyemu.gw_utils.setup_hds_timeseries(cbd_file, kij_dict, prefix="drn",
include_path=True, fill=-1.0e+30,
text="drains", precision=prec,
model=m)
# make sure the new DRN instruction file exists
ins_file = "{0}_timeseries.processed.ins".format(cbd_file)
assert os.path.exists(ins_file), ins_file
# add the new DRN observations to the control file
pst.add_observations(ins_file=ins_file, pst_path=".")
# set meaningful obs group names
pst.observation_data.loc[cbd_df.index, "obgnme"] = cbd_df.obgnme
# build a dict of name-index locations for the GW level observations locations
kij_dict = {n: [0, i, j] for n, i, j in zip(crd_df.name, crd_df.i, crd_df.j)}
# setup GW level post processor
hds_file = os.path.join(new_model_ws, nam_file.replace(".nam", ".hds"))
assert os.path.exists(hds_file)
hds_frun, hds_df = pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, prefix="hds",
include_path=True, fill=-1.0e+30, model=m)
# make sure the GW level instruction file exists
ins_file = "{0}_timeseries.processed.ins".format(hds_file)
assert os.path.exists(ins_file), ins_file
# add the GW level obs to the control file and set meaningful
# obs group names
pst.add_observations(ins_file=ins_file, pst_path=".")
pst.observation_data.loc[hds_df.index, "obgnme"] = hds_df.obgnme
# write the updated control file
pst.write(pst_name)
# add the post processor commands to the forward run script
frun_file = os.path.join(new_model_ws, "forward_run.py")
with open(frun_file, 'r') as f:
lines = f.readlines()
idx = None
for i, line in enumerate(lines):
if "__name__" in line:
idx = i
assert idx is not None
lines.insert(idx, " " + cbd_frun + '\n')
lines.insert(idx, " " + hds_frun + '\n')
with open(frun_file, 'w') as f:
for line in lines:
f.write(line)
def add_ij_to_hds_smp(crd_file):
"""intersect the GW level observation coordinates against the
model grid to get k,i,j index information
Args:
crd_file (str): PEST-style "bore coordinates" file
"""
from shapely.geometry import Point
# read the bore coord file
crd_df = pd.read_csv(crd_file, delim_whitespace=True, header=None, names=["name", "x", "y", "layer"])
# set a shapely point attribute
crd_df.loc[:, "pt"] = crd_df.apply(lambda x: Point(x.x, x.y), axis=1)
# load the history model
m = flopy.modflow.Modflow.load(h_nam_file, model_ws=h_dir,
load_only=["dis"], check=False,
forgive=False)
# use the flopy grid intersect functionality
gi = flopy.utils.GridIntersect(m.modelgrid)
crd_df.loc[:, 'ij'] = crd_df.pt.apply(lambda x: gi.intersect_point(x)[0][0])
# split out the i and j indices
crd_df.loc[:, 'i'] = crd_df.ij.apply(lambda x: x[0])
crd_df.loc[:, 'j'] = crd_df.ij.apply(lambda x: x[1])
# remove extra columns
crd_df.pop("ij")
crd_df.pop("pt")
# save the new dataframe to a CSV file
crd_df.to_csv(crd_file + ".csv")
def _set_obsvals(d, nam_file, hds_file, drn_file, pst_file, run=True):
"""samples the groundwater and spring discharge observations to
the model stress periods and sets the "obsval" attribute in the control
file. Also plots up org obs and sampled obs in a multipage pdf
Args:
d (str): directory where the control file exists
nam_file (str): MODFLOW-2005 nam file
hds_file (str): PEST-style site sample file with groundwater
level observations
drn_file (str): PEST-style site sample file with spring discharge
observations
pst_file (str): PEST control file
run (bool): flag to run PESTPP-IES with NOPTMAX=0 after the
observation values have been updated. Default is True.
"""
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=d, load_only=["dis"],
check=False, forgive=False)
# work out the stress period ending datetime
sp_end_dts = pd.to_datetime(m.start_datetime) + pd.to_timedelta(np.cumsum(m.dis.perlen.array), unit='d')
# cast the model start_datetime from a str to a datetime instance
start_datetime = pd.to_datetime(m.start_datetime)
# load the gw level and spring discharge site sample files
# into pandas dataframes
hds_df = pyemu.smp_utils.smp_to_dataframe(hds_file)
drn_df = pyemu.smp_utils.smp_to_dataframe(drn_file)
# plotting limits
xmn, xmx = pd.to_datetime(start_datetime), pd.to_datetime(sp_end_dts[-1])
ymn, ymx = hds_df.value.min(), hds_df.value.max()
# containers for the sampled observation series
hds_sampled_dfs = []
drn_sampled_dfs = []
# a function to sample each observation in a given site
# dataframe to the model stress period ending datetimes
# uses nearest neighbor
def sample_to_model(udf):
d, v = [], []
for dt, val in zip(udf.index.values, udf.value.values):
# difference between this obs datetime and the
# stress period end datetime
diff = (sp_end_dts - dt).map(np.abs).values
# the index of the minimum diff (nearest neighbor)
idxmin = np.argmin(diff)
# minimum diff in days
day_diff = diff[idxmin].astype('timedelta64[D]')
# the diff is greater than a month, something is wrong...
if day_diff > np.timedelta64(31, 'D'):
print(idxmin, sp_end_dts[idxmin], dt, day_diff)
continue
# save the datetime and value
d.append(sp_end_dts[idxmin])
v.append(val)
# form a new dataframe and return
udf_mod = pd.DataFrame({"value": v}, index=d)
return udf_mod
# save a multipage PDF for inspection
with PdfPages(os.path.join("_data", "obs.pdf")) as pdf:
ax_per_page = 10
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
# process each unique GW level site entry
for usite in hds_df.name.unique():
print(usite)
# get a dataframe of just this site
udf = hds_df.loc[hds_df.name == usite, ["datetime", "value"]].copy()
# set the index to datetime
udf.index = udf.pop("datetime")
# sample to stress period ending datetimes
udf_mod = sample_to_model(udf)
#set a name attribute
udf_mod.loc[:, "name"] = usite
# store new sample site dataframe
hds_sampled_dfs.append(udf_mod)
# plot
ax = axes[ax_count]
ax.plot(udf.index, udf.value, lw=0.5, marker='.', color='0.5', ms=5, alpha=0.5)
ax.plot(udf_mod.index, udf_mod.value, lw=0.5, marker='.', color='b', ms=5, alpha=0.5)
ax.set_title("site:{0}, org count:{1}, reindexed count:{2}".format(usite, udf.shape[0], udf_mod.shape[0]),
loc="left")
ax.set_xlim(xmn, xmx)
# ax.set_ylim(ymn,ymx)
ax_count += 1
if ax_count >= ax_per_page:
plt.tight_layout()
pdf.savefig()
plt.close(fig)
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
#process each unqiue DRN site entry
for usite in drn_df.name.unique():
print(usite)
# get a dataframe of just this site
udf = drn_df.loc[drn_df.name == usite, ["datetime", "value"]].copy()
# use the datetime as the index
udf.index = udf.pop("datetime")
# sample to stress period ending datetime
udf_mod = sample_to_model(udf)
# set a name attribute
udf_mod.loc[:, "name"] = usite
# store
drn_sampled_dfs.append(udf_mod)
# plot
ax = axes[ax_count]
ax.plot(udf.index, udf.value, lw=0.5, marker='.', color='0.5', ms=5, alpha=0.5)
ax.plot(udf_mod.index, udf_mod.value, lw=0.5, marker='.', color='b', ms=5, alpha=0.5)
ax.set_title("site:{0}, org count:{1}, reindexed count:{2}".format(usite, udf.shape[0], udf_mod.shape[0]),
loc="left")
ax.set_xlim(xmn, xmx)
ax_count += 1
if ax_count >= ax_per_page:
plt.tight_layout()
pdf.savefig()
plt.close(fig)
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
plt.tight_layout()
pdf.savefig()
# concatenate the sampled GW level dataframes into one large dataframe
hds_df = pd.concat(hds_sampled_dfs)
# set the datetime index as a column
hds_df.loc[:, "datetime"] = hds_df.index
# set a generic and nonduplicated index
hds_df.index = np.arange(hds_df.shape[0])
# save the sampled dataframe
pyemu.smp_utils.dataframe_to_smp(hds_df, hds_file.replace(".smp", "_sampled.smp"))
# concatenate the sample spring discharge dataframes into one large dataframe
drn_df = pd.concat(drn_sampled_dfs)
# set the datetime index as a column
drn_df.loc[:, "datetime"] = drn_df.index
# set a generic and nonduplicated index
drn_df.index = np.arange(drn_df.shape[0])
# save the sampled dataframe
pyemu.smp_utils.dataframe_to_smp(drn_df, drn_file.replace(".smp", "_sampled.smp"))
# build up observation names ("obsnme") in the sampled GW level dataframe
# these are the same names that are in the control file
hds_df.loc[:, "dt_str"] = hds_df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
hds_df.loc[:, "site_name"] = hds_df.name
hds_df.loc[:, "obsnme"] = hds_df.apply(lambda x: "hds_{0}_{1}".format(str(x.site_name), x.dt_str), axis=1)
hds_df.loc[:, "obsnme"] = hds_df.obsnme.apply(str.lower)
hds_df.index = hds_df.obsnme
# load the control file
pst = pyemu.Pst(os.path.join(d, pst_file))
obs = pst.observation_data
# set all observations to zero weight
obs.loc[:, "weight"] = 0.0
# get set containers for observation names in the
# control file and in the GW level dataframe
pnames = set(list(obs.obsnme.values))
snames = set(list(hds_df.obsnme.values))
# make sure all GW level dataframe names are in the
# control file
print(snames - pnames)
assert len((snames - pnames)) == 0
# set the obsval attribute for space-time locations where
# we have actual GW level observations
obs.loc[hds_df.obsnme, "obsval"] = hds_df.value
# set a generic non-zero weight for the actual
# GW level observation locations
obs.loc[hds_df.obsnme, "weight"] = 1.0
# build up observation names ("obsnme") in the sampled spring discharge dataframe
# these are the same names that are in the control file
drn_df.loc[:, "dt_str"] = drn_df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
drn_df.loc[:, "site_name"] = drn_df.name
drn_df.loc[:, "obsnme"] = drn_df.apply(lambda x: "drn_{0}_{1}".format(str(x.site_name), x.dt_str), axis=1)
drn_df.loc[:, "obsnme"] = drn_df.obsnme.apply(str.lower)
drn_df.index = drn_df.obsnme
# get set container for observation names in the
# spring discharge dataframe
snames = set(list(drn_df.obsnme.values))
# make sure all spring discharge dataframe names are in the
# control file
print(snames - pnames)
assert len((snames - pnames)) == 0
# set the obsval attribute for space-time locations where
# we have actual spring discharge observations
# negative 1 since drn out is negative, convert from cfs to cfd
obs.loc[drn_df.obsnme, "obsval"] = -1.0 * drn_df.value * (60. * 60. * 24.)
# set a generic non-zero weight
obs.loc[drn_df.obsnme, "weight"] = 1.0
# set noptmax to 0 for testing
pst.control_data.noptmax = 0
# save the updated control file
pst.write(os.path.join(d, pst_file))
# run PESTPP-IES?
if run:
pyemu.os_utils.run("pestpp-ies {0}".format(pst_file), cwd=d)
def run_local(b_d, m_d, pst_name, num_workers=10):
"""run PESTPP-IES in parallel on the current machine
Args:
b_d (str): "base" directory that contains all the files needed
to run PESTPP-IES (MODFLOW file and PEST interface files)
m_d (str): "master" directory that will be created and where the
PESTPP-IES master instance will be started
pst_name (str): control file name. Must exist in b_d
num_workers (int): number of parallel workers to start.
Default is 10.
"""
pyemu.os_utils.start_workers(b_d, "pestpp-ies", pst_name, num_workers=num_workers,
master_dir=m_d, worker_root=".",
reuse_master=True)
def plot_obs_vs_sim_case(m_d, case="eaa_ver", post_iter=None,
plt_name="obs_v_sim.pdf", focus=False):
"""plot ensemble-based observed vs simulated GW level and spring discharge time
series for a given PEST "case".
Args:
m_d (str): "master" directory that holds the simulated output ensembles
case (str): the PEST "case" name. Default is "eaa_ver". various suffixes are
appended to this case to form control file and ensemble file names
post_iter (int): the PESTPP-IES iteration to use as the "posterior" ensemble.
If None, no posterior will be plotted. If True, only the maximum of the
prior is plotted (to help with figure "busy-ness"). Default is None.
plt_name (str): the name of the multi-page PDF to create. It is written in the
m_d directory. Default is :"obs_v_sim.pdf:.
focus (bool): flag to plot only the four locations of management interest. If
True, then only 4 axes are plotted - this creates the figures shown in the
manuscript. If False, all locations are plotted - this creates the
multipage PDFs shown in the supplementary material
Notes:
calls plot_obs_vs_sim()
"""
pst = pyemu.Pst(os.path.join(m_d, case + ".pst"))
base_en = pyemu.ObservationEnsemble.from_binary(pst=pst,
filename=os.path.join(m_d, case + ".base.obs.jcb"))
pr_en = pyemu.ObservationEnsemble.from_binary(pst=pst,
filename=os.path.join(m_d, case + ".0.obs.jcb"))
pt_en = None
if post_iter is not None:
pt_en = pyemu.ObservationEnsemble.from_binary(pst=pst,
filename=os.path.join(m_d, "{0}.{1}.obs.jcb". \
format(case, post_iter)))
if "eaa_ver" in case:
s, e = h_start_datetime, h_end_datetime
elif "eaa_pred" in case:
s, e = s_start_datetime, s_end_datetime
else:
raise Exception()
plot_obs_vs_sim(pst=pst, start_datetime=s, end_datetime=e,
base_en=base_en, pr_en=pr_en, pt_en=pt_en,
plt_name=os.path.join(m_d, plt_name), focus=focus)
def plot_obs_vs_sim(pst, start_datetime, end_datetime, base_en=None, pr_en=None, pt_en=None,
plt_name="obs_v_sim.pdf", mask_invalid=True, focus=False):
"""plot ensemble-based observed vs simulated
Args:
pst (pyemu.Pst): control file instance
start_datetime (str): model start datetime string
end_datetime (str): model end datetime string
base_en (pyemu.ObservationEnsemble): the observed plus noise ensemble.
Default is None (dont plot)
pr_en (pyemu.ObservationEnsemble): prior simulated output ensemble.
Default is None (dont plot)
pt_en: (pyemu.ObservationEnsemble): posterior simulated output ensmeble.
Default is None (dont plot)
plt_name (str): name of plot to generate. Default is "obs_v_sim.pdf"
mask_invalid (bool): flag to mask invalid values in the simulated output
ensembles (defined by hdry). Default is True.
focus (bool): flag to plot only the four locations of management interest. If
True, then only 4 axes are plotted - this creates the figures shown in the
manuscript. If False, all locations are plotted - this creates the
multipage PDFs shown in the supplementary material
"""
# get the non-zero observation data
obs = pst.observation_data
nz_obs = obs.loc[pst.nnz_obs_names, :].copy()
# set the datetimes for each non-zero observation
nz_obs.loc[:, "datetime"] = pd.to_datetime(nz_obs.obsnme.apply(lambda x: x.split('_')[-1]))
# spring discharge obs names
drn_names = nz_obs.loc[nz_obs.obsnme.apply(lambda x: "drn" in x), "obsnme"]
# convert from model units to (positive) CFS for plotting
nz_obs.loc[drn_names, "obsval"] *= -1.0 / (60.0 * 60.0 * 24.0)
# unique nonzero observation groups (site names)
nz_grps = nz_obs.obgnme.unique()
# if focus is True, drop the non-focus sites
focus_sites = ["comal", "sanmar", str(j17_id), "j-17", str(j27_id), "j-27"]
focus_labels = ["Comal", "<NAME>", "J-17", "J-17", "J-27", "J-27"]
nz_grps.sort()
if focus:
keep = []
labels = []
for nz_grp in nz_grps:
for fs, lab in zip(focus_sites, focus_labels):
print(nz_grp, fs, fs in nz_grp)
if fs in nz_grp:
keep.append(nz_grp)
labels.append(lab)
nz_grps = keep
with PdfPages(plt_name) as pdf:
xmn, xmx = pd.to_datetime(start_datetime), pd.to_datetime(end_datetime)
if focus:
ax_per_page = 4
fig, axes = plt.subplots(ax_per_page, 1, figsize=(7, 7))
else:
ax_per_page = 5
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
# process each unique non-zero obs group
for igrp, nz_grp in enumerate(nz_grps):
ax = axes[ax_count]
obs_grp = nz_obs.loc[nz_obs.obgnme == nz_grp, :].copy()
obs_grp.sort_values(by="datetime", inplace=True)
ireal_str = ""
# process each ensemble
for en, fc, en_name in zip([pr_en, pt_en, base_en], ['k', 'b', 'r'], ["prior", "post", "noise"]):
if en is None:
continue
# get the ensemble block for this group
en_grp = en.loc[:, obs_grp.obsnme].copy()
# convert the DRN outputs to positive CFS
if "drn" in nz_grp:
en_grp = en_grp * -1.0 / (60.0 * 60.0 * 24.0)
# obs_grp.loc[:,"obsval"] *= -1.0 / (60.0 * 60.0 * 24.0)
else:
# for GW level obs, check for invalid values
vals = en_grp.values
ivals = vals[vals == hdry]
if ivals.shape[0] > 0:
ireal = 0
for real in en_grp.index:
if hdry in en_grp.loc[real, :].values:
ireal += 1
ireal_str += ", {0} invalid values across {1} realizations in {2}". \
format(ivals.shape[0], ireal, en_name)
# mask invalid?
if mask_invalid:
en_grp.values[en_grp.values == hdry] = np.NaN
# if the posterior ensemble was passed and this is the prior ensemble
# dont plot it.
if pt_en is not None and en_name == "prior":
pass
else:
# plot each realization and thin-line trace
[ax.plot(obs_grp.datetime, en_grp.loc[i, :], color=fc, lw=0.025, alpha=0.1) for i in
en_grp.index.values]
# if the "base" realization is found, plot it with a heavier line
if "base" in en_grp.index:
print("base found",en_name,nz_grp)
if en_name == "prior":
ax.plot(obs_grp.datetime, en_grp.loc["base", :], color="k", dashes=(2,2),lw=1.5)
else:
ax.plot(obs_grp.datetime, en_grp.loc["base", :], color=fc,lw=1.5,alpha=0.35)
# set axis limits
ax.set_xlim(xmn, xmx)
ymn, ymx = obs_grp.obsval.min() * 0.9, obs_grp.obsval.max() * 1.1
# for the DRN series, set the y-axis limit to the observed spring flow
if pr_en is not None and "base" in pr_en.index:
en_grp = pr_en.loc["base", obs_grp.obsnme].copy()
if "drn" in nz_grp:
en_grp = en_grp * -1.0 / (60.0 * 60.0 * 24.0)
ymn = min(ymn, en_grp.min())
ymx = max(ymx, en_grp.max())
ax.set_ylim(ymn, ymx)
# build up meaningful axis titles
site_id = nz_grp.split('_')[1]
try:
site_id = int(site_id)
except:
pass
title = "site: {0}".format(site_id)
if site_id == j17_id:
title += " (J-17)"
elif site_id == j27_id:
title += " (J-27)"
if len(ireal_str) > 0:
title += ireal_str
if focus:
ax.set_title("{0}) {1}".format(abet[igrp], labels[igrp]), loc="left")
else:
ax.set_title(title, loc="left")
if "drn" in nz_grp:
ax.set_ylabel("flow ($\\frac{ft^3}{s}$)")
else:
ax.set_ylabel("head (ft)")
ax_count += 1
if ax_count >= ax_per_page:
plt.tight_layout()
pdf.savefig()
plt.close(fig)
if not focus:
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
if not focus:
for rem_ax in range(ax_count, ax_per_page):
axes[rem_ax].set_xticks([])
axes[rem_ax].set_yticks([])
axes[rem_ax].axis("off")
plt.tight_layout()
pdf.savefig()
def reweight_ensemble(m_d, t_d, case="eaa_ver"):
"""reweight the non-zero observations in the control file
using a management-focused strategy with the ensemble mean
residuals
Args:
m_d (str): master directory
t_d (str): template directory
case (str): the PEST case. Default is "eaa_ver"
"""
# load the control file
pst = pyemu.Pst(os.path.join(m_d, case + ".pst"))
obs = pst.observation_data
# load the prior simulated output ensemble
pr_en = pyemu.ObservationEnsemble.from_binary(pst=pst,
filename=os.path.join(m_d, case + ".0.obs.jcb"))
assert "base" in pr_en.index
# build a PEST-style residual dataframe using the mean ensmeble values
res_df = pd.DataFrame({"modelled": pr_en.loc["base", :].values,
"group": pst.observation_data.loc[pr_en.columns, "obgnme"].values,
"name": pr_en.columns.values,
"measured": pst.observation_data.loc[pr_en.columns, "obsval"].values}, \
index=pr_en.columns)
pst.set_res(res_df)
init_nz_obs = pst.nnz_obs_names
# drop the 5 realizations that cause the most dry values
vals = []
for real in pr_en.index:
nz_real = pr_en.loc[real, init_nz_obs].values
count = nz_real[nz_real == hdry].shape[0]
vals.append(count)
df = pd.DataFrame({"invalid_count": vals}, index=pr_en.index)
df.sort_values(by="invalid_count", inplace=True, ascending=False)
pr_en = pr_en.loc[df.iloc[5:].index]
assert pr_en.shape == pr_en.dropna().shape
# look for any nz obs yielding dry/inactive/insane values - zero weight these
# so we dont have issues during the PESTPP-IES iterations with phi exploding
nz_obs = obs.loc[pst.nnz_obs_names, :].copy()
abs_mx = np.abs(pr_en.loc[:, nz_obs.obsnme].values).max(axis=0)
# print(abs_mx)
tol = 1.0e+10
busted = abs_mx > tol
busted_obs_names = nz_obs.obsnme.loc[busted]
print(busted_obs_names)
obs.loc[busted_obs_names, "weight"] = 0.0
# re-get nz_obs now
nz_obs = obs.loc[pst.nnz_obs_names, :].copy()
print("removed {0} obs for insane values".format(busted_obs_names.shape[0]))
print(len(init_nz_obs), pst.nnz_obs)
# now use standard measurement based noise to check for prior-data conflict
nz_hds_names = nz_obs.loc[nz_obs.obgnme.apply(lambda x: x.startswith("hds")), "obsnme"]
obs.loc[nz_hds_names, "weight"] = 0.5
nz_drn_names = nz_obs.loc[nz_obs.obgnme.apply(lambda x: x.startswith("drn")), "obsnme"]
obs.loc[nz_drn_names, "weight"] = 1.0 / (obs.loc[nz_drn_names, "obsval"].apply(np.abs) * 0.25)
# correct for drn flows that are zero
drn_obs = nz_obs.loc[nz_drn_names, :]
zero_flow_drn = drn_obs.loc[drn_obs.obsval == 0.0, "obsnme"]
obs.loc[zero_flow_drn, "weight"] = 0.01
# correct for giant weights
too_high = drn_obs.loc[drn_obs.weight > 0.01, "obsnme"]
obs.loc[too_high, "weight"] = 0.01
print(obs.loc[nz_drn_names, "weight"].min(), obs.loc[nz_drn_names, "weight"].max())
print(len(nz_hds_names) + len(nz_drn_names), pst.nnz_obs)
# pst.write(os.path.join(m_d,"eaa_ver_reweight.pst"))
# generate an obs + noise ensemble using measurement-error-based weights
base_en = pyemu.ObservationEnsemble.from_gaussian_draw(pst=pst, num_reals=pr_en.shape[0])
base_en._df.index = pr_en._df.index
# check for prior-data conflict: for each non-zero weighted obs,
# if the simulated output ensemble doesnt overlap with the
# obs+noise ensemble, then we can't expect the DA process
# to reproduce these observations.
conflicted = []
for oname in pst.nnz_obs_names:
d_mn, d_mx = base_en.loc[:, oname].min(), base_en.loc[:, oname].max()
p_mn, p_mx = pr_en.loc[:, oname].min(), pr_en.loc[:, oname].max()
if d_mn > p_mx and p_mn > d_mx:
print(oname, d_mn, d_mx, p_mn, p_mx)
conflicted.append(oname)
pst.observation_data.loc[conflicted, "weight"] = 0.0
# re-get non-zero weighted observations
obs = pst.observation_data
nz_obs = obs.loc[pst.nnz_obs_names, :]
# extract the non-zero weight obs blocks from the obs+noise and
# prior simulated output ensembles and save them for restart (to
# save runs)
base_en = base_en.loc[:, nz_obs.obsnme]
base_en.to_binary(os.path.join(t_d, "obs.jcb"))
pr_en.to_binary(os.path.join(t_d, "restart_obs.jcb"))
par_en = pyemu.ParameterEnsemble.from_binary(pst=pst, filename=os.path.join(t_d, "prior.jcb"))
par_en = par_en.loc[pr_en.index, :]
assert par_en.shape == par_en.dropna().shape
par_en.to_binary(os.path.join(t_d, "restart_prior.jcb"))
# enforce subjective management-focused weighting scheme
tags = ["comal", "sanmar", str(j17_id), str(j27_id)]
ratio = 100.0
obsgrp_dict = {grp: 1.0 for grp in pst.nnz_obs_groups}
tagged_groups = []
for tag in tags:
t_obs = nz_obs.loc[nz_obs.obsnme.apply(lambda x: tag in x), :]
if t_obs.shape[0] == 0:
raise Exception(tag)
gnames = t_obs.obgnme.unique()
if len(gnames) > 1:
raise Exception(tag, gnames)
obsgrp_dict[gnames[0]] = ratio
tagged_groups.append(gnames[0])
print(pst.phi)
org_pc = pst.phi_components
pst.adjust_weights(obsgrp_dict=obsgrp_dict)
print(pst.phi)
pc = pst.phi_components
for grp in pst.nnz_obs_groups:
print(grp, org_pc[grp], pc[grp])
# set some ++ options for PESTPP-IES restart
pst.pestpp_options["ies_par_en"] = "restart_prior.jcb"
pst.pestpp_options["ies_obs_en"] = "obs.jcb"
pst.pestpp_options["ies_restart_obs_en"] = "restart_obs.jcb"
pst.control_data.noptmax = 1
pst.write(os.path.join(t_d, case + "_reweight.pst"))
def _prep_for_parallel(b_d, pst_name, m_d=None, noptmax=6, with_loc=False, overdue_giveup_fac=5):
"""prepare a given template directory for parallel execution. Makes a directory copy with
temporary files removed, and (optionally) a master directory
Args:
b_d (str): base template directory. This is copied to b_d+"_parallel"
pst_name (str): control file name
m_d (str): master directory to create. Default is None (dont create)
noptmax (int): number of PESTPP-IES DA iterations
with_loc (bool): flag to use localization. If True, "loc.jcb" must exists
in b_d
overdue_giveup_fac (float): factor to use to limit the amount of time waiting for
slow runs to finish. Default is 5, which means any run that takes longer than 5 times
the mean run time will be marked as a run failure and killed.
"""
# copy b_d to b_d+"_parallel" (e.g. the parallel template dir)
ct_d = "{0}_parallel".format(b_d)
if os.path.exists(ct_d):
shutil.rmtree(ct_d)
shutil.copytree(b_d, ct_d)
# remove any temp files not needed since the parallel template dir will
# need to be copied a bunch of times
rm_ext_list = [".list", ".jcb", ".out", ".hds", ".rec", ".ftl", ".mt3d", \
".ucn", ".rei", ".rns", ".rnj", ".cbb", ".cbd"]
rm_tag_list = ["_setup_"]
for rm_ext in rm_ext_list:
rm_files = [f for f in os.listdir(ct_d) if f.lower().endswith(rm_ext)]
[os.remove(os.path.join(ct_d, rm_file)) for rm_file in rm_files]
for rm_tag in rm_tag_list:
rm_files = [f for f in os.listdir(ct_d) if rm_tag in f.lower()]
[os.remove(os.path.join(ct_d, rm_file)) for rm_file in rm_files]
a_d = os.path.join(ct_d, "arr_mlt")
[os.remove(os.path.join(a_d, f)) for f in os.listdir(a_d)[1:]]
# copy the binaries into the parallel template dir
for bin in os.listdir(bin_path):
shutil.copy2(os.path.join(bin_path, bin), os.path.join(ct_d, bin))
# copy pyemu and flopy into the parallel template dir
shutil.copytree(os.path.join("flopy"), os.path.join(ct_d, "flopy"))
shutil.copytree(os.path.join("pyemu"), os.path.join(ct_d, "pyemu"))
# some platform specific things: condor is used only on windows
if "window" in platform.platform().lower():
shutil.copy2(os.path.join("python.zip"), os.path.join(ct_d, "python.zip"))
agent_zip = "condor_agent"
if os.path.exists(agent_zip + ".zip"):
os.remove(agent_zip + ".zip")
shutil.make_archive(agent_zip, "zip", ct_d)
# if a master dir is requested, prepare it
if m_d is not None:
# make sure we dont stomp on an existing dir
assert not os.path.exists(m_d), "master dir already exists {0}".format(m_d)
# copy b_d to m_d
shutil.copytree(b_d, m_d)
# copy flopy, pyemu and binaries into m_d
shutil.copytree("flopy", os.path.join(m_d, "flopy"))
shutil.copytree("pyemu", os.path.join(m_d, "pyemu"))
for bin in os.listdir(bin_path):
shutil.copy2(os.path.join(bin_path, bin), os.path.join(m_d, bin))
# load the control file
pst = pyemu.Pst(os.path.join(m_d, pst_name))
# if a localizer is being used
if with_loc:
pst.pestpp_options["ies_localizer"] = "loc.jcb"
# make sure it exists
loc_file = os.path.join(b_d, "loc.jcb")
assert os.path.exists(loc_file)
# load the localizer and make sure it is in sync with
# the current weighting strategy (only non-zero obs can be
# in the localizer row names)
loc = pyemu.Matrix.from_binary(loc_file).to_dataframe()
loc = loc.loc[pst.nnz_obs_names, :]
pyemu.Matrix.from_dataframe(loc).to_coo(os.path.join(m_d, "loc.jcb"))
# use 20 threads for localized solve
pst.pestpp_options["ies_num_threads"] = 20
# if the initial parameter ensemble is not in the ++ options,
# set it to the prior ensemble
if "ies_par_en" not in pst.pestpp_options:
pst.pestpp_options["ies_par_en"] = "prior.jcb"
pst.pestpp_options["ies_num_reals"] = 100
# save binary formats
pst.pestpp_options["ies_save_binary"] = True
# set overdue_giveup_fac
pst.pestpp_options["overdue_giveup_fac"] = overdue_giveup_fac
# if we are iterating, set the bad phi to protect against
# invalid (hdry) outputs
if noptmax != -1:
pst.pestpp_options["ies_bad_phi"] = 1e20
# just a guess...
pst.pestpp_options["ies_initial_lambda"] = 1000.0
# number of iterations
pst.control_data.noptmax = noptmax
# save the control file into the master dir
pst.write(os.path.join(m_d, pst_name))
def transfer_hist_pars_to_scenario(hist_en_filename, scen_en_filename):
"""transfer static (shared) parameters from a history parameter ensemble
to a scenario parameter ensemble
Args:
hist_en_filename (str): a binary-format file holding a parameter ensemble
for the history model
scen_en_filename (str): a binary-format file holding a parameter ensemble
for the scenario model
"""
# load the control files and ensembles for both the history and scenario models
hist_pst = pyemu.Pst(os.path.join(h_dir, "eaa_ver.pst"))
hist_en = pyemu.ParameterEnsemble.from_binary(pst=hist_pst, filename=hist_en_filename)
scen_pst = pyemu.Pst(os.path.join(s_dir, "eaa_pred.pst"))
scen_en = pyemu.ParameterEnsemble.from_binary(pst=scen_pst, filename=scen_en_filename)
# if the indices are not the same?
if list(hist_en.index.values) != list(scen_en.index.values):
# use the history en index
scen_en = scen_en.loc[hist_en.index, :]
assert scen_en.shape == scen_en.dropna().shape
# tags for shared parameters
props = {"hk", "ss", "sy", "dc", "hb"}
hist_par = hist_pst.parameter_data
scen_par = scen_pst.parameter_data
# get lists of shared parameters in both ensembles
hist_parnames = hist_par.loc[hist_par.parnme.apply(lambda x: True in [True for p in props if p in x]), "parnme"]
scen_parnames = scen_par.loc[scen_par.parnme.apply(lambda x: True in [True for p in props if p in x]), "parnme"]
# work out the common names between the two sets of names
common_pars = list(set(hist_parnames).intersection(set(scen_parnames)))
common_pars.sort()
# update the static pars in the scenario ensemble from the history ensemble
scen_en._df.loc[:, common_pars] = hist_en._df.loc[:, common_pars]
# save the update scenario ensemble
scen_en.to_binary(os.path.join(s_dir, "updated_par_en.jcb"))
# update the scenario control file ++ option for the parameter ensemble
scen_pst.pestpp_options["ies_par_en"] = "updated_par_en.jcb"
# save
scen_pst.write(os.path.join(s_dir, "eaa_pred.pst"))
def plot_below(pst_file, pr_en_file, pt_en_file=None,
plt_name="months_below.pdf", thres=30.0):
"""plot the consecutive and cumulative Comal springs discharge
months where springflow is below a given threshold
Args:
pst_file (str): control file name
pr_en_file (str): binary-format prior scenario output ensemble
pt_en_file (str): binary-format posterior scenario output ensemble
plt_name (str): plot name. Default is "months_below.pdf"
thres (float): threshold (in positive CFS) for Comal springflow
"""
# load the control file
pst = pyemu.Pst(pst_file)
# load the prior ensemble
pr_en = pyemu.ObservationEnsemble.from_binary(pst=pst, filename=pr_en_file)
# load the posterior ensemble?
pt_en = None
if pt_en_file is not None:
pt_en = pyemu.ObservationEnsemble.from_binary(pst=pst,
filename=pt_en_file)
# make sure we are showing the same prior and posterior reals
pr_en = pr_en.loc[pt_en.index,:]
# get just the comal springs ensemble blocks
# and convert to positive CFS
cols = [c for c in pr_en.columns if "comal" in c]
pr_en = pr_en.loc[:, cols] * -1. / 60. / 60. / 24.
if pt_en is not None:
pt_en = pt_en.loc[:, cols] * -1. / 60. / 60. / 24.
obs = pst.observation_data
comal_obs = obs.loc[cols, "obsval"] * -1. / 60. / 60. / 24.
# get the "truth" number of months below thres
comal_truth = comal_obs.loc[comal_obs < thres].shape[0]
# count prior ensemble consecutive and cumulative months below
pr_count, pr_max = [], []
for real in pr_en.index:
s = pr_en.loc[real, cols]
isbelow = s < thres
below = s.loc[isbelow]
isbelow = isbelow.astype(int)
# consecutive months below
mx = 0
longest = 0
current = 0
for num in isbelow.values:
if num == 1:
current += 1
else:
longest = max(longest, current)
current = 0
mx = max(longest, current)
c = below.shape[0]
pr_count.append(c)
pr_max.append(mx)
# count prior ensemble consecutive and cumulative months below
if pt_en is not None:
pt_count, pt_max = [], []
for real in pt_en.index:
s = pt_en.loc[real, cols]
isbelow = s < thres
below = s.loc[isbelow]
isbelow = isbelow.astype(int)
mx = 0
longest = 0
current = 0
for num in isbelow.values:
if num == 1:
current += 1
else:
longest = max(longest, current)
current = 0
mx = max(longest, current)
c = below.shape[0]
pt_count.append(c)
pt_max.append(mx)
# truth consecutive months below
mx = 0
longest = 0
current = 0
isbelow = comal_obs < thres
isbelow = isbelow.astype(int)
for num in isbelow.values:
if num == 1:
current += 1
else:
longest = max(longest, current)
current = 0
truth_mx = max(longest, current)
print("comal",comal_truth,truth_mx)
density=True
mxbin = 40
bins = np.arange(0,mxbin,1)
# echo number of realizations not being shown greater than mxbin
# this goes in the figure caption
tpr = np.array(pr_max)
tpt = np.array(pt_max)
print("consec",tpr[tpr>mxbin].shape,tpt[tpt>mxbin].shape)
tpr = np.array(pr_count)
tpt = np.array(pt_count)
print("count", tpr[tpr > mxbin].shape, tpt[tpt > mxbin].shape)
fig, axes = plt.subplots(1, 2, figsize=(8.5, 4))
ax = axes[0]
ax.hist(pr_max, bins=bins, facecolor="0.5", edgecolor="none", alpha=0.35, density=density)
if pt_en is not None:
ax.hist(pt_max, bins=bins, facecolor="b", edgecolor="none", alpha=0.35, density=density)
ax.set_ylabel("increasing probability density")
ymin = ax.get_ylim()
ax.plot([truth_mx, truth_mx], ymin, "r-")
ax.set_xlabel("months")
ax.set_yticks([])
ax.set_ylim(ymin)
ax.set_title("A) simulated consecutive Comal months below {0}".format(thres) + \
" $\\frac{ft^3}{s}$", loc="left")
ax = axes[1]
ax.hist(pr_count, bins=bins, facecolor="0.5", edgecolor="none", alpha=0.35, density=density)
if pt_en is not None:
ax.hist(pt_count, bins=bins, facecolor="b", edgecolor="none", alpha=0.35, density=density)
ax.set_ylabel("increasing probability density")
ymin = ax.get_ylim()
ax.plot([comal_truth, comal_truth], ymin, "r-")
ax.set_xlabel("months")
ax.set_yticks([])
ax.set_ylim(ymin)
ax.set_title("B) simulated Comal months below {0}".format(thres) + \
" $\\frac{ft^3}{s}$", loc="left")
plt.tight_layout()
plt.savefig(plt_name)
plt.show()
def build_temporal_localizer(t_dir, pst_name="eaa_ver.pst",save=True):
"""build a localizer for temporal parameters
"""
m = flopy.modflow.Modflow.load(h_nam_file, model_ws=t_dir, load_only=["dis"],
check=False, forgive=False)
totim = list(np.cumsum(m.dis.perlen.array))
totim.insert(0, 0.0)
# stress period begin datetime
kper_dt = pd.to_datetime(m.start_datetime) + pd.to_timedelta(totim, unit="d")
pst = pyemu.Pst(os.path.join(t_dir, pst_name))
par = pst.parameter_data.loc[pst.adj_par_names, :].copy()
obs = pst.observation_data.loc[pst.nnz_obs_names, :].copy()
obs.loc[:, "datetime"] = pd.to_datetime(obs.obsnme.apply(lambda x: x.split("_")[-1]))
# recharge constants
df = pd.read_csv(os.path.join(t_dir, "arr_pars.csv"), index_col=0)
df = df.loc[df.mlt_file.apply(lambda x: "rech" in x and "dat_cn" in x), :]
df.loc[:, "kper"] = df.org_file.apply(lambda x: int(x.split('.')[0].split('_')[-1]))
df.loc[:, "cnst"] = df.mlt_file.apply(lambda x: int(x.split('.')[0].split('rech')[-1]))
zone_kper = {z: k for z, k in zip(df.cnst, df.kper)}
cn_rech_par = par.loc[par.pargp.apply(lambda x: "cn_rech" in x), :].copy()
cn_rech_par.loc[:, "kper"] = cn_rech_par.pargp.apply(lambda x: zone_kper[int(x.replace("cn_rech", ""))])
cn_rech_par.loc[:, "datetime"] = cn_rech_par.kper.apply(lambda x: kper_dt[x])
# recharge zones
df = pd.read_csv(os.path.join(t_dir, "arr_pars.csv"), index_col=0)
df = df.loc[df.mlt_file.apply(lambda x: "rech" in x and "dat_zn" in x), :]
df.loc[:, "kper"] = df.org_file.apply(lambda x: int(x.split('.')[0].split('_')[-1]))
df.loc[:, "zone"] = df.mlt_file.apply(lambda x: int(x.split('.')[0].split('rech')[-1]))
zone_kper = {z: k for z, k in zip(df.zone, df.kper)}
zn_rech_par = par.loc[par.pargp.apply(lambda x: "zn_rech" in x), :].copy()
zn_rech_par.loc[:, "kper"] = zn_rech_par.pargp.apply(lambda x: zone_kper[int(x.replace("zn_rech", ""))])
zn_rech_par.loc[:, "datetime"] = zn_rech_par.kper.apply(lambda x: kper_dt[x])
# print(zn_rech_par.datetime.unique())
# print(cn_rech_par.datetime)
# wel pars
wel_par = par.loc[par.pargp == "welflux", :].copy()
wel_par.loc[:, "kper"] = wel_par.parnme.apply(lambda x: int(x.split('_')[-1]))
wel_par.loc[:, "datetime"] = wel_par.kper.apply(lambda x: kper_dt[x])
# print(wel_par.datetime)
# init conditions
strt_par = par.loc[par.pargp.apply(lambda x: "strt" in x), :].copy()
strt_grps = strt_par.pargp.unique()
temp_par = wel_par.copy()
temp_par = temp_par.append(zn_rech_par)
temp_par = temp_par.append(cn_rech_par)
for sgrp in strt_grps:
temp_par.loc[sgrp, "datetime"] = pd.to_datetime(m.start_datetime)
temp_par.loc[sgrp, "pargp"] = sgrp
temp_par.loc[sgrp, "parnme"] = sgrp
temp_grps = set(temp_par.pargp.unique().tolist())
static_groups = par.loc[par.pargp.apply(lambda x: x not in temp_grps), "pargp"].unique().tolist()
loc_cols = static_groups
loc_cols.extend(temp_par.parnme.tolist())
print(len(loc_cols))
loc = pyemu.Matrix.from_names(row_names=pst.nnz_obs_names, col_names=loc_cols).to_dataframe()
loc.loc[:, :] = 1.0
loc.loc[:, temp_par.parnme] = 0.0
u_obs_dts = obs.datetime.unique()
tol = | pd.to_timedelta(550, unit="d") | pandas.to_timedelta |
# # Gradient Boosted Trees: Model understanding
# # (https://www.tensorflow.org/alpha/tutorials/estimators/
# # boosted_trees_model_understanding)
import altair as alt
import numpy as np
import pandas as pd
import tensorflow as tf
from ivory.utils.altair import bar_from_series
tf.random.set_seed(123)
# ## How to interpret Boosted Trees models both locally and globally
# ## Load the titanic dataset
# !Load dataset.
dftrain = pd.read_csv("https://storage.googleapis.com/tf-datasets/titanic/train.csv")
dfeval = pd.read_csv("https://storage.googleapis.com/tf-datasets/titanic/eval.csv")
y_train = dftrain.pop("survived")
y_eval = dfeval.pop("survived")
# ## Create feature columns, input_fn, and the train the estimator
# ### Preprocess the data
fc = tf.feature_column
CATEGORICAL_COLUMNS = [
"sex",
"n_siblings_spouses",
"parch",
"class",
"deck",
"embark_town",
"alone",
]
NUMERIC_COLUMNS = ["age", "fare"]
def one_hot_cat_column(feature_name, vocab):
return fc.indicator_column(
fc.categorical_column_with_vocabulary_list(feature_name, vocab)
)
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(fc.numeric_column(feature_name, dtype=tf.float32))
# ### Build the input pipeline
# !Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((X.to_dict(orient="list"), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs).batch(NUM_EXAMPLES)
return dataset
return input_fn
# !Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
# ### Train the model
params = {
"n_trees": 50,
"max_depth": 3,
"n_batches_per_layer": 1,
# You must enable center_bias = True to get DFCs. This will force the model to
# make an initial prediction before using any features (e.g. use the mean of
# the training labels for regression or log odds for classification when
# using cross entropy loss).
"center_bias": True,
}
est = tf.estimator.BoostedTreesClassifier(feature_columns, **params)
# !Train model.
est.train(train_input_fn, max_steps=100)
# -
# !Evaluation.
results = est.evaluate(eval_input_fn)
pd.Series(results).to_frame()
# ## Local interpretability
pred_dicts = list(est.experimental_predict_with_explanations(eval_input_fn))
# !Create DFC Pandas dataframe.
labels = y_eval.values
probs = | pd.Series([pred["probabilities"][1] for pred in pred_dicts]) | pandas.Series |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
调用wset函数的部分
下载数据的方法
1.在时间上使用折半可以最少的下载数据,但已经下了一部分,要补下时如果挪了一位,又得全重下
2.在文件上,三个文件一组,三组一样,删中间一个,直到不能删了,退出
"""
import os
import pandas as pd
from .utils import asDateTime
def download_sectorconstituent(w, date, sector, windcode, field='wind_code'):
"""
板块成份
中信证券一级行业指数:时间好像没有必要,因为日历日也会查询出来
风险警示股票:日期就是查询的日期,股票也是最新名,没有啥用
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000;field=wind_code")
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000")
w.wset("sectorconstituent","date=2017-03-03;windcode=000300.SH")
:param w:
:param sector:
:param date:
:return:
"""
param = 'date=%s' % date
if sector:
param += ';sector=%s' % sector
if windcode:
param += ';windcode=%s' % windcode
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("sectorconstituent", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
try:
df['date'] = pd.to_datetime(df['date'])
except KeyError:
pass
return df
def download_indexconstituent(w, date, windcode, field='wind_code,i_weight'):
"""
指数权重
如果指定日期不是交易日,会返回时前一个交易日的信息
:param w:
:param windcode:
:param date:
:return:
"""
param = 'date=%s' % date
if windcode:
param += ';windcode=%s' % windcode
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("indexconstituent", param)
df = | pd.DataFrame(w_wset_data.Data) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.