prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from datetime_tricks import datetime_csv_suffix
def spliter(df, col, min_block_size):
df.sort_values(by=col, inplace=True)
df.reset_index().drop(columns=['index'], inplace=True)
container = []
r, c = df.shape
while r > min_block_size:
df.reset_index(inplace=True)
df.drop(columns=['index'], inplace=True)
for i in range(min_block_size, r-1):
if df[col].iloc[i] != df[col].iloc[i+1]:
break
else:
raise Exception('oversize')
container.append(df[:i+1])
df = df[i+1:]
r, c = df.shape
container.append(df)
return container
def export_splitted(container):
len_container = len(container)
for i, item in enumerate(container, start=1):
item.to_csv(
datetime_csv_suffix(
f'c:/vba_output/{i}_of_{len_container}.csv'
),
index=None
)
def export_parent_csv(df):
# just incase some x's are lower case letters.
df = df.apply(lambda x: x.astype(str).str.upper())
df.set_index('parent', inplace = True)
# create csv files. aka bom. Filename = parent.
for i in df.index.unique():
df_i = df[df.index == i]
if df_i.empty:
print(f'{i} is empty DataFrame')
break
else:
df_i.to_csv(f'output\\{i}.csv', index = None)
if __name__ == '__main__':
d = {
'parent':list('abbaaaacbbccdddeeeff')
}
df = | pd.DataFrame(d) | pandas.DataFrame |
import abc
from logging import getLogger
from os import path
import pandas as pd
from clinica.utils.inputs import check_caps_folder
from clinicadl.utils.exceptions import (
ClinicaDLArgumentError,
ClinicaDLConfigurationError,
ClinicaDLTSVError,
)
logger = getLogger("clinicadl")
class SplitManager:
def __init__(
self,
caps_directory,
tsv_path,
diagnoses,
baseline=False,
multi_cohort=False,
split_list=None,
):
self._check_tsv_path(tsv_path, multi_cohort)
self.tsv_path = tsv_path
self.caps_dict = self._create_caps_dict(caps_directory, multi_cohort)
self.multi_cohort = multi_cohort
self.diagnoses = diagnoses
self.baseline = baseline
self.split_list = split_list
@abc.abstractmethod
def max_length(self) -> int:
"""Maximum number of splits"""
pass
@abc.abstractmethod
def __len__(self):
pass
@property
@abc.abstractmethod
def allowed_splits_list(self):
"""
List of possible splits if no restriction was applied
Returns:
list[int]: list of all possible splits
"""
pass
def __getitem__(self, item):
"""
Returns a dictionary of DataFrames with train and validation data.
Args:
item (int): Index of the split wanted.
Returns:
Dict[str:pd.DataFrame]: dictionary with two keys (train and validation).
"""
self._check_item(item)
if self.multi_cohort:
tsv_df = | pd.read_csv(self.tsv_path, sep="\t") | pandas.read_csv |
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionBitwiseXorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_bitwise_xor_scalar(self):
self.assertEqual(dnp.bitwise_xor(1, 4), np.bitwise_xor(1, 4))
self.assertEqual(dnp.bitwise_xor(1, -5), np.bitwise_xor(1, -5))
self.assertEqual(dnp.bitwise_xor(0, 9), np.bitwise_xor(0, 9))
def test_function_math_binary_bitwise_xor_list(self):
lst1 = [0, 1, 2]
lst2 = [4, 6, 9]
assert_array_equal(dnp.bitwise_xor(lst1, lst2), np.bitwise_xor(lst1, lst2))
def test_function_math_binary_bitwise_xor_array_with_scalar(self):
npa = np.array([0, 1, 2])
dnpa = dnp.array([0, 1, 2])
assert_array_equal(dnp.bitwise_xor(dnpa, 1), np.bitwise_xor(npa, 1))
assert_array_equal(dnp.bitwise_xor(1, dnpa), np.bitwise_xor(1, npa))
def test_function_math_binary_bitwise_xor_array_with_array(self):
npa1 = np.array([0, 1, 2])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([0, 1, 2])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.bitwise_xor(dnpa1, dnpa2), np.bitwise_xor(npa1, npa2))
def test_function_math_binary_bitwise_xor_array_with_array_param_out(self):
npa1 = np.array([0, 1, 2])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([0, 1, 2])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.bitwise_xor(npa1, npa2, out=npa)
dnp.bitwise_xor(dnpa1, dnpa2, out=dnpa)
assert_array_equal(dnpa, npa)
def test_function_math_binary_bitwise_xor_array_with_series(self):
npa = np.array([0, 1, 2])
dnpa = dnp.array([0, 1, 2])
ps = pd.Series([4, 6, 9])
os = orca.Series([4, 6, 9])
assert_series_equal(dnp.bitwise_xor(dnpa, os).to_pandas(), np.bitwise_xor(npa, ps))
assert_series_equal(dnp.bitwise_xor(os, dnpa).to_pandas(), np.bitwise_xor(ps, npa))
pser = | pd.Series([1, 2, 4]) | pandas.Series |
#compare our program output to original dataset
import sys
import csv
import copy
import random
import numpy as np
import pandas as pd
from sklearn.neural_network import MLPClassifier as MLP
from sklearn.model_selection import train_test_split as TTS
from sklearn import preprocessing
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report, confusion_matrix
'''
This program is for comparing the chord progressions that we generated with actual chord progressions from our dataset.
We build a neural network using Scikit-Learn, train it on labeled instances of generated and real chords, and print the results.
We use the results of this program in our evaluation section of our report.
'''
def main():
fileName = sys.argv[1]
ourChords = pd.read_csv(fileName) #save each dataset
theirChords = pd.read_csv("theirChords.csv")
dataSet = ourChords.append(theirChords, sort = False) #concat two lists
dataSet = dataSet.sample(frac=1).reset_index(drop=True) #shuffle dataset
#split dataset into chords and labels
X = dataSet.iloc[:, 1:16]
Y = dataSet.iloc[:, 0]
print(X)
print(Y)
# encode labels as 0s and 1s
le = preprocessing.LabelEncoder()
Y = le.fit_transform(Y)
#one hot encode everything
X = | pd.get_dummies(X) | pandas.get_dummies |
import pytest
import numpy as np
import pandas as pd
import numpy.testing as npt
import pandas.testing as pdt
from scipy.stats import logistic
import zepid as ze
from zepid import (RiskRatio, RiskDifference, OddsRatio, NNT, IncidenceRateRatio, IncidenceRateDifference,
Sensitivity, Specificity, Diagnostics, interaction_contrast, interaction_contrast_ratio, spline,
table1_generator)
from zepid.calc import sensitivity, specificity
@pytest.fixture
def data_set():
df = | pd.DataFrame() | pandas.DataFrame |
import glob
import pandas
import os
def readJSON2Dict(input_file):
"""
Read a JSON file. Will return a list or dict.
:param input_file: input JSON file path.
"""
import json
with open(input_file) as f:
data = json.load(f)
return data
def writeDict2JSON(data_dict, out_file):
"""
Write some data to a JSON file. The data would commonly be structured as a dict
but could also be a list.
:param data_dict: The dict (or list) to be written to the output JSON file.
:param out_file: The file path to the output file.
"""
import json
with open(out_file, "w") as fp:
json.dump(
data_dict,
fp,
sort_keys=True,
indent=4,
separators=(",", ": "),
ensure_ascii=False,
)
def merge_annual_stats(input_pd_files, country_names_lut_file, out_feather=None, out_excel=None, excel_sheet=None, out_csv=None):
country_names_luts = readJSON2Dict(country_names_lut_file)
years = ['t2007', 't2008', 't2009', 't2010', 't2015', 't2016', 't2017', 't2018', 't2019', 't2020']
year_info = dict()
comb_df = None
for year in years:
year_info[year] = dict()
for in_file in input_pd_files:
if year in in_file:
year_info[year]['year_file'] = in_file
if 'year_file' in year_info[year]:
cln_year = year.replace('t', '')
yr_df = pandas.read_feather(year_info[year]['year_file'])
yr_df = yr_df.rename(columns={'count_gain': '{}_count_gain'.format(cln_year), 'area_gain': '{}_area_gain'.format(cln_year)})
yr_df = yr_df.rename(columns={'count_loss': '{}_count_loss'.format(cln_year), 'area_loss': '{}_area_loss'.format(cln_year)})
yr_df = yr_df.drop(['uid'], axis=1)
if cln_year == '2007':
comb_df = yr_df
else:
comb_df = | pandas.merge(left=comb_df, right=yr_df, how='outer', left_on='region', right_on='region') | pandas.merge |
"""Core managing the datasets.
Usage example:
# Create a dataset of 100 benign PE and 100 malware samples, with minimum
# malice of 0.9
DatasetCore.create_dataset_from_parameters(AnalyzedFileTypes.PE, 0.9,
9 * [True], 200, 0.5,
"pe_malice.csv")
# Create a dataset of 200 generic and trojan PE samples, with minimum malice
# of 0.9
DatasetCore.create_dataset_from_parameters(
AnalyzedFileTypes.PE, 0.9,
[True, True, False, False, False, False, False, False, False], 200, 0,
"pe_generic_vs_trojan.csv")
# Delete the created datasets
DatasetCore.remove_dataset("pe_malice.csv")
DatasetCore.remove_dataset("pe_generic_vs_trojan.csv")
"""
import json
import os
import typing
import modules.dataset.errors as errors
import pandas
import yaml
from modules.configuration.folder_structure import Files, Folders
from modules.configuration.parameters import Packages
from modules.dataset.types import AnalyzedFileTypes
from modules.utils.configuration_manager import ConfigurationManager
from modules.utils.types import ConfigurationSpaces
CONFIGURATION_KEYS = Packages.Dataset.ConfigurationKeys
class DatasetCore:
"""Class for working with datasets."""
@staticmethod
def _get_metadata(dataset_full_path: str) -> dict:
with open(dataset_full_path, "r") as dataset_file:
lines = dataset_file.readlines()
if len(lines) == 0:
return None
metadata_line = lines[0]
if (not metadata_line.startswith(
Packages.Dataset.METADATA_LINE_START)):
return None
metadata_line = metadata_line[len(Packages.Dataset.METADATA_LINE_START
):]
metadata = json.loads(metadata_line)
return metadata
@staticmethod
def _dump_metadata(dataset_full_path: str, metadata: dict) -> None:
stringified_metadata = json.dumps(metadata)
with open(dataset_full_path, "r+") as output_file:
content = output_file.read()
new_content = Packages.Dataset.METADATA_LINE_START
new_content += stringified_metadata + "\n" + content
output_file.seek(0, 0)
output_file.write(new_content)
@staticmethod
def create_dataset_from_parameters(file_type: AnalyzedFileTypes,
min_malice: float,
desired_families: typing.List[bool],
entries_count: int,
benign_ratio: float,
output_filename: str,
description: str = "") -> bool:
"""Creates a custom dataset based on the given parameters.
Args:
file_type (AnalyzedFileTypes): Type of files to include
min_malice (float): Minimum malice score of malware samples included
in the dataset
desired_families (typing.List[bool]): Array of booleans, in which
each entry indicates if the pointed family (via index) is
included into the dataset
entries_count (int): Mandatory number of entries in the dataset
benign_ratio (float): Ratio between the size of benign samples and
of the whole dataset
output_filename (str): The base name of the output file
description (str): Description of the dataset. Defaults to "".
Raises:
InsufficientEntriesForDatasetError: The dataset could not be build
due to insufficient entries.
Returns:
bool: Boolean indicating if the dataset was successfully created
"""
malware_labels_df = pandas.read_csv(Files.MALWARE_LABELS)
benign_labels_df = pandas.read_csv(Files.BENIGN_LABELS)
# Select only the desired file type
malware_labels_df = malware_labels_df[malware_labels_df["type"] ==
file_type.value.ID]
benign_labels_df = benign_labels_df[benign_labels_df["type"] ==
file_type.value.ID]
# Get the entries' count for each type of sample
malware_count = int((1 - benign_ratio) * entries_count)
benign_count = entries_count - malware_count
# Select the entries with malice above the minimum one
malware_labels_df = malware_labels_df[
malware_labels_df["malice"] >= min_malice]
# Check if a dataset can be built
if (len(malware_labels_df) < malware_count
or len(benign_labels_df) < benign_count):
raise errors.InsufficientEntriesForDatasetError()
# Select entries with the maximum membership to the given categories
desired_families_int = [1 if elem else 0 for elem in desired_families]
malware_labels_df["membership"] = malware_labels_df.iloc[:, 3:].dot(
desired_families_int)
malware_labels_df.sort_values("membership")
del malware_labels_df["membership"]
malware_labels_df = malware_labels_df.head(malware_count)
# Select the benign entries in a random manner
benign_labels_df = benign_labels_df.sample(n=benign_count)
# Merge the data frames
all_labels_df = pandas.concat([malware_labels_df, benign_labels_df])
all_labels_df = all_labels_df.sample(frac=1).reset_index(drop=True)
# Dump the dataframe to file
output_full_filename = os.path.join(Folders.CUSTOM_DATASETS,
output_filename)
all_labels_df.to_csv(output_full_filename, index=False)
# Create the metadata and dump them
desired_families_names = [
name for include, name in zip(
desired_families, malware_labels_df.columns[3:]) if include
]
metadata = {
"description": description,
"extension": file_type.value.STANDARD_EXTENSION,
"min_malice": min_malice,
"desired_families": desired_families_names,
"entries_count": entries_count,
"benign_ratio": benign_ratio
}
DatasetCore._dump_metadata(output_full_filename, metadata)
return True
@staticmethod
def create_dataset_from_config(config_filename: str) -> bool:
"""Creates a custom dataset based on the configuration from a file.
Args:
config_filename (str): YAML configuration file
Raises:
DatasetConfigurationFileNotFoundError: The configuration file of the
dataset could not be found or opened.
InvalidFileExtensionError: The mentioned file extension from the
dataset configuration file is invalid.
DatasetConfigurationMandatoryKeysNotPresentError: The dataset
configuration file does not contain all mandatory keys.
Returns:
bool: Boolean indicating if the dataset was successfully created
"""
# Get the malware families
configuration = ConfigurationManager()
dataset_config = configuration.get_space(ConfigurationSpaces.DATASET)
malware_families = dataset_config["malware_families"].keys()
malware_families = [family.lower() for family in malware_families]
try:
with open(config_filename, "r") as config_file:
configuration = yaml.load(config_file, Loader=yaml.SafeLoader)
except Exception:
raise errors.DatasetConfigurationFileNotFoundError()
# Check if the mandatory keys are present
valid_keys = [
elem.value for elem in CONFIGURATION_KEYS
if not elem.name.endswith("_")
]
for key in valid_keys:
if key not in configuration.keys():
raise errors.DatasetConfigurationMandatoryKeysNotPresentError()
# Map the families names to elements in an array of booleans
processed_desired_categories = 9 * [False]
for family in configuration[CONFIGURATION_KEYS.DESIRED_FAMILIES.value]:
try:
index = malware_families.index(family)
processed_desired_categories[index] = True
except Exception: # nosec
pass
configuration[CONFIGURATION_KEYS.DESIRED_FAMILIES.
value] = processed_desired_categories
# Map the desired extension to a file type
file_type = AnalyzedFileTypes.map_extension_to_type(
configuration.pop(CONFIGURATION_KEYS.FILE_EXTENSION.value))
if not file_type:
raise errors.InvalidFileExtensionError()
configuration["file_type"] = file_type
return DatasetCore.create_dataset_from_parameters(**configuration)
@staticmethod
def list_datasets() -> typing.List[typing.List]:
"""Lists all created datasets by collecting their metadata.
Returns:
typing.List[typing.List]: Datasets metadata
"""
all_metadata = | pandas.DataFrame() | pandas.DataFrame |
'''
Created on November 06, 2019
Function to determine which files have been modified after a given date
Returns a pandas DataFrame object
@author: pauladjata
'''
# import site-packages and modules
import glob
import os.path, time
import pandas as pd
import os
import datetime
import time
import re
def save_as_excel_file(path_, df):
from pandas import ExcelWriter
from datetime import datetime
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
# save df to Excel
options = {}
options['strings_to_formulas'] = False
options['strings_to urls'] = False
file_name_xls = 'df_of_modified_files_' + str(timestr) + '.xlsx'
file_to_save = path_ + '/' + file_name_xls
writer = pd.ExcelWriter(file_to_save, engine='xlsxwriter', options=options)
df.to_excel(writer, sheet_name='Elements', index=False)
writer.save()
print('DataFrame was saved as ' + file_to_save)
def get_sub_folder_of_returned_path(string_of_full_path, path):
index_of_last_occur_in_path = path.rfind('\\')
last_folder = path[index_of_last_occur_in_path + 1:]
index_of_last_occur_in_string = string_of_full_path.rfind(last_folder)
sub_folder_string = string_of_full_path[index_of_last_occur_in_path + index_of_last_occur_in_string + 1:]
return sub_folder_string
def get_filename_from_sub_filepath(string_of_sub_filepath):
index_of_last_occur_in_sub_filepath = string_of_sub_filepath.rfind('\\')
filename_ = string_of_sub_filepath[index_of_last_occur_in_sub_filepath + 1:]
return filename_
def main(date_, path_, save_as_excel_bool):
"""Returns a pandas dataframe of files modified after a given date and in a given directory"""
files = [f for f in glob.glob(path_ + r'\**\*', recursive=True)]
file_details_dict = {}
for i in range(0, len(files)):
key = i
file_details_dict.setdefault(key, [])
# gets last modified and created time
modified_time = time.ctime(os.path.getmtime(files[i]))
created_time = time.ctime(os.path.getctime(files[i]))
modified_time_date = datetime.datetime.strptime(modified_time, "%a %b %d %H:%M:%S %Y").date()
test_date = datetime.datetime.strptime(date_, '%b %d %Y').date()
NEW_FILE_BOOL = modified_time_date > test_date
#checks if path is a file
IS_FILE_BOOL = os.path.isfile(files[i])
file_details_dict[key] = {
'filepath': files[i],
'last_mod_time': modified_time,
'created_time': created_time,
'isFile': IS_FILE_BOOL,
'newFile': NEW_FILE_BOOL}
df_file_details = pd.DataFrame.from_dict(file_details_dict, orient='index')
# convert 'mod_date' and 'create_date' columns to datetime and then delete
df_file_details['mod_date'] = | pd.to_datetime(df_file_details['last_mod_time']) | pandas.to_datetime |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = pd.Timestamp('2022-09-21') # Opening of universities
t34 = pd.Timestamp('2022-10-31') # Start of autumn break
t35 = pd.Timestamp('2022-11-06') # End of autumn break
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t25 = pd.Timestamp('2021-11-22') # Start of mandatory telework + start easing in leisure restrictions
t26 = pd.Timestamp('2021-12-18') # Start of Christmass break for schools
t27 = | pd.Timestamp('2021-12-26') | pandas.Timestamp |
import numpy as np
import pandas as pd
import numba
import seaborn as sns
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from hfhd import hd
@numba.njit
def garch_11(n, sigma_sq_0, mu, alpha, beta, omega):
r"""
Generate GARCH(1, 1) log-returns of size n.
This function is accelerated via JIT with Numba.
Parameters
----------
n : int
The length of the wished time series.
sigma_sq_0 : float > 0
The variance starting value.
mu : float:
The drift of log-returns.
alpha : float >= 0
The volatility shock parameter. A higher value will lead to
larger spikes in volatility. A.k.a short-term persistence.
beta : float >= 0
The volatility persistence parameter. A larger value will
result in stronger persistence. A.k.a long-term persistence.
omega : float > 0
The variance constant. A higher value results in a higher
mean variance.
Returns
-------
r : numpy.ndarray
The GARCH log-returns time series.
sigma_sq : numpy.ndarray
The resulting variance time series with which each log-return
was generated.
Notes
-----
In general, the conditional variance of a GARCH(p,q) model is given by
.. math:: \sigma_{t}^{2}=\omega+\sum_{i=1}^{q} \alpha_{i}
\varepsilon_{t-i}^{2}+\sum_{j=1}^{p} \beta_{j} \sigma_{t-j}^{2}.
The unconditional variance is given by
.. math:: \sigma^{2}=\frac{\omega}{1-\sum_{i=1}^{q}
\alpha_{i}-\sum_{j=1}^{p} \beta_{j}}.
Here, :math:`p=q=1`,
and :math:`\epsilon_{t} \sim \mathcal{N}\left(0, 1\right)`
"""
nu = np.random.normal(0, 1, n)
r = np.zeros(n)
epsilon = np.zeros(n)
sigma_sq = np.zeros(n)
sigma_sq[0] = sigma_sq_0
if min(alpha, beta) < 0:
raise ValueError('alpha, beta need to be non-negative')
if omega <= 0:
raise ValueError('omega needs to be positive')
if alpha+beta >= 1:
print('''alpha+beta>=1, variance not defined
--> time series will not be weakly stationary''')
for i in range(n):
if i > 0:
sigma_sq[i] = omega + alpha * epsilon[i-1]**2 + beta * sigma_sq[i-1]
epsilon[i] = (sigma_sq[i]**0.5) * nu[i]
r[i] = mu + epsilon[i]
return r, sigma_sq
class Universe:
r"""
The universe is a specification from which simulated realizations
can be sampled. Stocks follow a factor model, they belong
to industries and have an idiosyncratic component. Stocks are predictable
by a single feature.
Attributes
----------
feature_beta : float
The true coefficient.
factor_garch_spec : list
The garch specification for factor returns.
``[sigma_sq_0, mu, alpha, beta, omega]``
industry_garch_spec : list
The garch specification for industry returns.
``[sigma_sq_0, mu, alpha, beta, omega]``
resid_garch_spec : list
The garch specification for residual returns.
``[sigma_sq_0, mu, alpha, beta, omega]``
factor_loadings : numpy.ndarray
An array with factor loadings for each stock and factor.
dim = n_stocks x n_factors
industry_loadings : numpy.ndarray
An array with industry loadings for each stock and industry.
dim = n_stocks x n_industry
This is usually a sparse matrix. One stock loads typically on
one or two industries. A good number of industries is 10 to 20.
liquidity : float
A value between 0 and 1 that describes liquidity.
A value of 1 means that the probability of observation
is 100% each minute. 0.5 means that there is a 50%
probability of observing a price each minute.
gamma : float >=0
The microstructure noise will be zero-mean Gaussian with variance
$\gamma^2 var(r)$, where $var(r)$ is the variance of the
underlying true return process. This noise is be added to the price.
freq : str, ``'s'`` or ``'m'``.
The granularity of the discretized continous price process.
"""
def __init__(self, feature_beta, factor_garch_spec, industry_garch_spec,
resid_garch_spec, factor_loadings, industry_loadings,
liquidity=0.5, gamma=2, freq='m'):
self.feature_beta = feature_beta
self.factor_garch_spec = factor_garch_spec
self.industry_garch_spec = industry_garch_spec
self.resid_garch_spec = resid_garch_spec
self.factor_loadings = factor_loadings
self.industry_loadings = industry_loadings
self.liquidity = liquidity
self.gamma = gamma
self.freq = freq
self.n_stocks = self.factor_loadings.shape[0]
self.n_ind = self.industry_loadings.shape[1]
self.n_factors = self.factor_loadings.shape[1]
@staticmethod
def uncond_var(spec):
'''
Compute the uncoditional variance from a
GARCH(1,1) specification.
Parameters
----------
spec : list
The garch specification.
``[sigma_sq_0, mu, alpha, beta, omega]``
Returns
-------
float
The unconditional variance.
'''
return spec[4]/(1-spec[2]-spec[3])
def uncond_cov(self):
'''
Compute the uncoditional covariance of stock returns
in the universe from a universe specification.
Returns
-------
numpy.ndarray
The unconditional covariance matrix.
'''
sf = np.diag([self.uncond_var(self.factor_garch_spec)]*self.n_factors)
sr = np.diag([self.uncond_var(self.resid_garch_spec)]*self.n_stocks)
si = np.diag([self.uncond_var(self.industry_garch_spec)]*self.n_ind)
return (self.factor_loadings @ sf @ self.factor_loadings.T
+ sr
+ self.industry_loadings @ si @ self.industry_loadings.T)
def cond_cov(self):
'''
Compute the daily coditional integrated covariance matrix of stock
returns within regular market hours in the universe from a realized
universe simulation.
Returns
-------
list
A list containing the conditional integrated covariance matrices
of each day.
'''
sr = pd.DataFrame(self.sigma_sq_resid)
sr.index = pd.to_datetime(sr.index, unit=self.freq)
sr = sr.between_time('9:30', '16:00',
include_start=True,
include_end=True)
sr = sr.resample('1d').sum()
si = pd.DataFrame(self.sigma_sq_industry)
si.index = pd.to_datetime(si.index, unit=self.freq)
si = si.between_time('9:30', '16:00',
include_start=True,
include_end=True)
si = si.resample('1d').sum()
sf = pd.DataFrame(self.sigma_sq_factor)
sf.index = | pd.to_datetime(sf.index, unit=self.freq) | pandas.to_datetime |
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import NMF
from sklearn.preprocessing import MinMaxScaler
def add_team_postfix(input_df):
output_df = input_df.copy()
top = output_df['inning'].str.contains('表')
output_df.loc[top, 'batter'] = output_df.loc[top, 'batter'] + '@' + output_df.loc[top, 'topTeam'].astype(str)
output_df.loc[~top, 'batter'] = output_df.loc[~top, 'batter'] + '@' + output_df.loc[~top, 'bottomTeam'].astype(str)
output_df.loc[ top, 'pitcher'] = output_df.loc[ top, 'pitcher'] + '@' + output_df.loc[ top, 'bottomTeam'].astype(str)
output_df.loc[~top, 'pitcher'] = output_df.loc[~top, 'pitcher'] + '@' + output_df.loc[~top, 'topTeam'].astype(str)
return output_df
def add_batter_order(input_df, is_train=True):
pass
def fill_na(input_df):
output_df = input_df.copy()
output_df['pitcherHand'] = output_df['pitcherHand'].fillna('R')
output_df['batterHand'] = output_df['batterHand'].fillna('R')
output_df['pitchType'] = output_df['pitchType'].fillna('-')
output_df['speed'] = output_df['speed'].str.extract(r'(\d+)').fillna(method='ffill')
output_df['ballPositionLabel'] = output_df['ballPositionLabel'].fillna('中心')
output_df['ballX'] = output_df['ballX'].fillna(0).astype(int)
output_df['ballY'] = output_df['ballY'].map({chr(ord('A')+i):i+1 for i in range(11)})
output_df['ballY'] = output_df['ballY'].fillna(0).astype(int)
output_df['dir'] = output_df['ballY'].map({chr(ord('A')+i):i+1 for i in range(26)})
output_df['dir'] = output_df['dir'].fillna(0).astype(int)
output_df['dist'] = output_df['dist'].fillna(0)
output_df['battingType'] = output_df['battingType'].fillna('G')
output_df['isOuts'] = output_df['isOuts'].fillna('-1').astype(int)
return output_df
def get_base_features(input_df, train_pitcher, test_pitcher, train_batter, test_batter):
output_df = input_df.copy()
output_df['inning'] = 2 * (output_df['inning'].str[0].astype(int) - 1) + output_df['inning'].str.contains('裏')
output_df['pitcherCommon'] = output_df['pitcher']
output_df['batterCommon'] = output_df['batter']
output_df.loc[~(output_df['pitcherCommon'].isin(train_pitcher & test_pitcher)), 'pitcherCommon'] = np.nan
output_df.loc[~(output_df['batterCommon'].isin(train_batter & test_batter)), 'batterCommon'] = np.nan
# label encoding
cat_cols = output_df.select_dtypes(include=['object']).columns
for col in cat_cols:
f = output_df[col].notnull()
output_df.loc[f, col] = LabelEncoder().fit_transform(output_df.loc[f, col].values)
output_df.loc[~f, col] = -1
output_df[col] = output_df[col].astype(int)
output_df['inningHalf'] = output_df['inning'] % 2
output_df['inningNumber'] = output_df['inning'] // 2
output_df['outCount'] = output_df['inning'] * 3 + output_df['O']
output_df['B_S_O'] = output_df['B'] + 4 * (output_df['S'] + 3 * output_df['O'])
output_df['b1_b2_b3'] = output_df['b1'] * 1 + output_df['b2'] * 2 + output_df['b3'] * 4
next_b = output_df.sort_values(['gameID', 'inning', 'O']).groupby(['gameID', 'inning'], group_keys=False)['b1', 'b2', 'b3'].shift(-1).rename(columns={'b1': 'n_b1', 'b2': 'n_b2', 'b3': 'n_b3'})
output_df = pd.merge(output_df, next_b, left_index=True, right_index=True)
def replace_b1(x):
if pd.isnull(x['n_b1']):
return x['b1']
else:
return x['n_b1']
def replace_b2(x):
if pd.isnull(x['n_b2']):
return x['b2']
else:
return x['n_b2']
def replace_b3(x):
if pd.isnull(x['n_b3']):
return x['b3']
else:
return x['n_b3']
output_df['n_b1'] = output_df.apply(replace_b2, axis=1)
output_df['n_b2'] = output_df.apply(replace_b2, axis=1)
output_df['n_b3'] = output_df.apply(replace_b3, axis=1)
output_df['plus_b1'] = output_df.apply(lambda x: x['b1'] < x['n_b1'], axis=1)
output_df['plus_b2'] = output_df.apply(lambda x: x['b2'] < x['n_b2'], axis=1)
output_df['plus_b3'] = output_df.apply(lambda x: x['b3'] < x['n_b3'], axis=1)
output_df['minus_b1'] = output_df.apply(lambda x: x['b1'] > x['n_b1'], axis=1)
output_df['minus_b2'] = output_df.apply(lambda x: x['b2'] > x['n_b2'], axis=1)
output_df['minus_b3'] = output_df.apply(lambda x: x['b3'] > x['n_b3'], axis=1)
return output_df
def aggregation(input_df, group_keys, group_values, agg_methods):
new_df = []
for agg_method in agg_methods:
for col in group_values:
if callable(agg_method):
agg_method_name = agg_method.__name__
else:
agg_method_name = agg_method
new_col = f'agg_{agg_method_name}_{col}_grpby_' + '_'.join(group_keys)
agg_df = input_df[[col]+group_keys].groupby(group_keys)[[col]].agg(agg_method)
agg_df.columns = [new_col]
new_df.append(agg_df)
new_df = pd.concat(new_df, axis=1).reset_index()
output_df = pd.merge(input_df, new_df, on=group_keys, how='left')
return output_df, list(new_df.columns)
def get_agg_gameID_inningHalf_features(input_df):
group_keys = ['subGameID', 'inningHalf']
group_values = ['S', 'B', 'b1', 'b2', 'b3']
agg_methods = ['mean', 'std']
output_df, cols = aggregation(
input_df, group_keys=group_keys, group_values=group_values, agg_methods=agg_methods)
return reduce_mem_usage(output_df)
'''
pivot table features
'''
def get_pivot_NMF9_features(input_df, n, value_col):
pivot_df = pd.pivot_table(input_df, index='subGameID', columns='outCount', values=value_col, aggfunc=np.median)
sc0 = MinMaxScaler().fit_transform(np.median(pivot_df.fillna(0).values.reshape(-1,54//3,3)[:,0::2,:], axis=-1))
sc1 = MinMaxScaler().fit_transform(np.median(pivot_df.fillna(0).values.reshape(-1,54//3,3)[:,1::2,:], axis=-1))
nmf = NMF(n_components=n, random_state=2021)
nmf_df0 = pd.DataFrame(nmf.fit_transform(sc0), index=pivot_df.index).rename(
columns=lambda x: f'pivot_{value_col}_NMF9T={x:02}')
nmf_df1 = pd.DataFrame(nmf.fit_transform(sc1), index=pivot_df.index).rename(
columns=lambda x: f'pivot_{value_col}_NMF9B={x:02}')
nmf_df = pd.concat([nmf_df0, nmf_df1], axis=1)
nmf_df = pd.merge(
input_df, nmf_df, left_on='subGameID', right_index=True, how='left')
return reduce_mem_usage(nmf_df)
# pivot tabel を用いた特徴量
def get_pivot_NMF27_features(input_df, n, value_col):
pivot_df = pd.pivot_table(input_df, index='subGameID', columns='outCount', values=value_col, aggfunc=np.median)
sc0 = MinMaxScaler().fit_transform(pivot_df.fillna(0).values.reshape(-1,54//3,3)[:,0::2].reshape(-1,27))
sc1 = MinMaxScaler().fit_transform(pivot_df.fillna(0).values.reshape(-1,54//3,3)[:,1::2].reshape(-1,27))
nmf = NMF(n_components=n, random_state=2021)
nmf_df0 = pd.DataFrame(nmf.fit_transform(sc0), index=pivot_df.index).rename(
columns=lambda x: f'pivot_{value_col}_NMF27T={x:02}')
nmf_df1 = pd.DataFrame(nmf.fit_transform(sc1), index=pivot_df.index).rename(
columns=lambda x: f'pivot_{value_col}_NMF27B={x:02}')
nmf_df = pd.concat([nmf_df0, nmf_df1], axis=1)
nmf_df = pd.merge(
input_df, nmf_df, left_on='subGameID', right_index=True, how='left')
return reduce_mem_usage(nmf_df)
# pivot tabel を用いた特徴量
def get_pivot_NMF54_features(input_df, n, value_col):
pivot_df = pd.pivot_table(input_df, index='subGameID', columns='outCount', values=value_col, aggfunc=np.median)
sc = MinMaxScaler().fit_transform(pivot_df.fillna(0).values)
nmf = NMF(n_components=n, random_state=2021)
nmf_df = pd.DataFrame(nmf.fit_transform(sc), index=pivot_df.index).rename(
columns=lambda x: f'pivot_{value_col}_NMF54={x:02}')
nmf_df = pd.merge(
input_df, nmf_df, left_on='subGameID', right_index=True, how='left')
return reduce_mem_usage(nmf_df)
'''
shift features
'''
def get_diff_feature(input_df, value_col, periods, in_inning=True, aggfunc=np.median):
pivot_df = pd.pivot_table(input_df, index='subGameID', columns='outCount', values=value_col, aggfunc=aggfunc)
if in_inning:
dfs = []
for inning in range(9):
df0 = pivot_df.loc[:, [out+inning*6 for out in range(0,3)]].diff(periods, axis=1)
df1 = pivot_df.loc[:, [out+inning*6 for out in range(3,6)]].diff(periods, axis=1)
dfs += [df0, df1]
pivot_df = pd.concat(dfs, axis=1).stack()
else:
df0 = pivot_df.loc[:, [out+inning*6 for inning in range(9) for out in range(0,3)]].diff(periods, axis=1)
df1 = pivot_df.loc[:, [out+inning*6 for inning in range(9) for out in range(3,6)]].diff(periods, axis=1)
pivot_df = pd.concat([df0, df1], axis=1).stack()
return pivot_df
def get_shift_feature(input_df, value_col, periods, in_inning=True, aggfunc=np.median):
pivot_df = pd.pivot_table(input_df, index='subGameID', columns='outCount', values=value_col, aggfunc=aggfunc)
if in_inning:
dfs = []
for inning in range(9):
df0 = pivot_df.loc[:, [out+inning*6 for out in range(0,3)]].shift(periods, axis=1)
df1 = pivot_df.loc[:, [out+inning*6 for out in range(3,6)]].shift(periods, axis=1)
dfs += [df0, df1]
pivot_df = | pd.concat(dfs, axis=1) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.metrics import r2_score
import scipy.stats
df_mort_siteid = pd.read_csv("mortality siteid obs vs expected.csv")
df_mort_surgid = pd.read_csv("mortality surgid obs vs expected.csv")
df_complics_siteid = pd.read_csv("Complics siteid obs vs expected.csv")
df_complics_surgid = pd.read_csv("Complics surgid obs vs expected.csv")
df = pd.read_csv("total_avg_surgid.csv")
df1 = pd.read_csv("total_avg_site_id.csv")
def siteid_obs_vs_expected_mort():
mask = df_mort_siteid['count_Reop'] == 0
df_reop = df_mort_siteid[~mask]
mask = df_mort_siteid['count_First'] == 0
df_op = df_mort_siteid[~mask]
ax = plt.gca()
ax.scatter(df_op['Year_avg_Firstop'], df_op['log_First'], color="plum",edgecolor='orchid', s=30)
# ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="lightskyblue", edgecolor='tab:blue', s=30)
plt.title('Siteid observe vs expected Mortality First operation')
plt.xlabel("Yearly AVG of first operation")
plt.ylabel("mortality observe vs expected of first operation")
x = df_op['Year_avg_Firstop']
y = df_op['log_First']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "purple")
# a = df_reop['Year_avg_reop']
# b = df_reop['log_Reoperation']
# c = np.polyfit(a, b, 1)
# t = np.poly1d(c)
# plt.plot(a, t(a), "mediumblue")
text = f" First : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
# text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
# r, p = scipy.stats.spearmanr(a, b)
r1, p1 = scipy.stats.spearmanr(x, y)
text3 = f" Spearman Corr : {r1:0.4f} P-value : {p1:0.4f}"
# text4 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
print(text)
# print(text2)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("orchid"), f("white")],
labels=[text, text3])
# fig = plt.figure()
# fig.subplots_adjust()
# plt.text(30, 50, text)
# plt.text(90, 45, text2)
# plt.title("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# print("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# plt.savefig('Surgid yearly average for Reoperation.png')
# plt.gca().text(0.05, 0.95, text, transform=plt.gca().transAxes,
# fontsize=14, verticalalignment='top')
plt.show()
def siteid_obs_vs_expected_mort_reop():
mask = df_mort_siteid['count_Reop'] == 0
df_reop = df_mort_siteid[~mask]
ax = plt.gca()
ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="lightskyblue", edgecolor='tab:blue', s=30)
plt.title('Siteid observe vs expected Mortality Reoperation')
plt.xlabel("Yearly AVG of Reoperation")
plt.ylabel("mortality observe vs expected Reoperation")
a = df_reop['Year_avg_reop']
b = df_reop['log_Reoperation']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "mediumblue")
r, p = scipy.stats.spearmanr(a, b)
text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
text3 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
print(text2)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[ f("lightskyblue"),f("white")],
labels=[ text2,text3])
plt.show()
def surgid_obs_vs_expected_mort():
mask = df_mort_surgid['count_Reop'] == 0
df_reop = df_mort_surgid[~mask]
mask = df_mort_surgid['count_First'] == 0
df_op = df_mort_surgid[~mask]
ax = plt.gca()
ax.scatter(df_op['Year_avg_Firstop'], df_op['log_First'], color="plum",edgecolor='orchid', s=30)
ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="lightskyblue", edgecolor='tab:blue', s=30)
plt.title('Surgid observe vs expected Mortality')
plt.xlabel("Yearly AVG of operation")
plt.ylabel("mortality observe vs expected")
x = df_op['Year_avg_Firstop']
y = df_op['log_First']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "purple")
a = df_reop['Year_avg_reop']
b = df_reop['log_Reoperation']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "mediumblue")
text = f" First : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" #\n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
r, p = scipy.stats.spearmanr(a, b)
r1, p1 = scipy.stats.spearmanr(x, y)
text3 = f" Spearman Corr : {r1:0.4f} P-value : {p1:0.4f}"
text4 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
print (text)
print(text2)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("orchid"),f("white"), f("steelblue"),f("white")],
labels=[text,text3, text2,text4])
# ax.text(right, top, 'right top',
# horizontalalignment='right',
# verticalalignment='top',
# transform=ax.transAxes)
# plt.text(100, 50, text)
# plt.text(100, 45, text2)
# plt.title("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# print("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# plt.savefig('Surgid yearly average for Reoperation.png')
# plt.gca().text(0.05, 0.95, text, transform=plt.gca().transAxes,
# fontsize=14, verticalalignment='top')
plt.show()
def siteid_obs_vs_expected_complics():
mask = df_complics_siteid['count_Reop'] == 0
df_reop = df_complics_siteid[~mask]
mask = df_complics_siteid['count_First'] == 0
df_op = df_complics_siteid[~mask]
ax = plt.gca()
ax.scatter(df_op['Year_avg_Firstop'], df_op['log_First'], color="palevioletred",edgecolor='indianred', s=30)
ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="darkturquoise",edgecolor='lightseagreen',s=30)
plt.title('Siteid observe vs expected Complications')
plt.xlabel("Yearly AVG of operation")
plt.ylabel("complication observe vs expected")
x = df_op['Year_avg_Firstop']
y = df_op['log_First']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "maroon")
a = df_reop['Year_avg_reop']
b = df_reop['log_Reoperation']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "darkgreen")
text = f" First : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
r, p = scipy.stats.spearmanr(a, b)
r1, p1 = scipy.stats.spearmanr(x, y)
text3 = f" Spearman Corr : {r1:0.4f} P-value : {p1:0.4f}"
text4 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("palevioletred"),f("white"), f("darkturquoise"),f("white")],
labels=[text,text3, text2,text4])
# fig = plt.figure()
# fig.subplots_adjust()
# plt.text(30, 50, text)
# plt.text(90, 45, text2)
# plt.title("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# print("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# plt.savefig('Surgid yearly average for Reoperation.png')
# plt.gca().text(0.05, 0.95, text, transform=plt.gca().transAxes,
# fontsize=14, verticalalignment='top')
plt.show()
def surgid_obs_vs_expected_complics():
mask = df_complics_surgid['count_Reop'] == 0
df_reop = df_complics_surgid[~mask]
mask = df_complics_surgid['count_First'] == 0
df_op = df_complics_surgid[~mask]
ax = plt.gca()
ax.scatter(df_op['Year_avg_Firstop'], df_op['log_First'], color="palevioletred",edgecolor='indianred',s=30)
ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="darkturquoise",edgecolor='lightseagreen',s=30)
plt.title('Surgid observe vs expected Complications')
plt.xlabel("Yearly AVG of operation")
plt.ylabel("complication observe vs expected")
x = df_op['Year_avg_Firstop']
y = df_op['log_First']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "maroon")
a = df_reop['Year_avg_reop']
b = df_reop['log_Reoperation']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "darkgreen")
text = f" First : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
print(text)
print(text2)
r, p = scipy.stats.spearmanr(a, b)
r1, p1 = scipy.stats.spearmanr(x, y)
text3 = f" Spearman Corr : {r1:0.4f} P-value : {p1:0.4f}"
text4 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("palevioletred"), f("white"), f("darkturquoise"), f("white")],
labels=[text, text3, text2, text4])
# fig = plt.figure()
# fig.subplots_adjust()
# plt.text(30, 50, text)
# plt.text(90, 45, text2)
# plt.title("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# print("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# plt.savefig('Surgid yearly average for Reoperation.png')
# plt.gca().text(0.05, 0.95, text, transform=plt.gca().transAxes,
# fontsize=14, verticalalignment='top')
plt.show()
def mortality_reop_surgid_boxplot():
mask = df['Year_sum_reop'] == 0
df_reop = df[~mask]
# total_year_sum
new_df=pd.DataFrame(data=df_reop,columns=['mortalty_reop_rate','total_year_avg'])
new_df['bins'] = pd.qcut(new_df['total_year_avg'], 3, labels=['low', 'mid', 'high'])
print(new_df)
new_df.to_csv("box_surgid_mort.csv")
mask = new_df['bins'] == 'low'
df_low = new_df[mask]
mask = new_df['bins'] == 'mid'
df_mid = new_df[mask]
mask = new_df['bins'] == 'high'
df_high = new_df[mask]
data = [df_low['mortalty_reop_rate'],df_mid['mortalty_reop_rate'],df_high['mortalty_reop_rate']]
print (df_low.describe())
print(df_mid.describe())
print(df_high.describe())
text = f" low\n ${df_low['total_year_avg'].min(): 0.2f} - ${df_low['total_year_avg'].max(): 0.2f}\n Mean = ${df_low['mortalty_reop_rate'].mean():0.6f} $"
text2 = f"mid\n ${df_mid['total_year_avg'].min(): 0.2f} - ${df_mid['total_year_avg'].max(): 0.2f}\n Mean = ${df_mid['mortalty_reop_rate'].mean():0.6f} $"
text3 =f"high\n${df_high['total_year_avg'].min(): 0.2f} - ${df_high['total_year_avg'].max(): 0.2f}\n Mean = ${df_high['mortalty_reop_rate'].mean():0.6f} $"
# ax = plt.gca()
# ax = sns.boxplot(x="day", y="total_bill", data=df_mid['mortalty_reop_rate'])
# show plot
labels = [text,text2,text3]
fig1, ax1 = plt.subplots()
ax1.set_title('Mortality surgid reop boxplot')
bp = ax1.boxplot(data, patch_artist=True, labels=labels)
colors = ['pink', 'lightblue', 'palegreen']
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
plt.legend(handles=[f("pink"), f("lightblue"), f("palegreen")],
labels=['low', 'mid', 'high'])
plt.ylabel("Mortality Reop rate")
plt.show()
# ax = plt.gca()
#
# f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
# new_df.boxplot(column='mortalty_reop_rate', by='bins')
# plt.legend(handles=[f("palevioletred"), f("mediumturquoise"), f("yellow")],
# labels=['low', 'mid','high'])
#
# plt.show()
def mortality_reop_siteid_boxplot():
mask = df1['Year_sum_reop'] == 0
df_reop = df1[~mask]
# total_year_sum
new_df=pd.DataFrame(data=df_reop,columns=['mortalty_reop_rate','total_year_avg'])
new_df['bins'] = pd.qcut(new_df['total_year_avg'], 3, labels=['low', 'mid', 'high'])
print(new_df)
new_df.to_csv("box_siteid_mort.csv")
mask = new_df['bins'] == 'low'
df_low = new_df[mask]
mask = new_df['bins'] == 'mid'
df_mid = new_df[mask]
mask = new_df['bins'] == 'high'
df_high = new_df[mask]
data = [df_low['mortalty_reop_rate'],df_mid['mortalty_reop_rate'],df_high['mortalty_reop_rate']]
print (df_low.describe())
print(df_mid.describe())
print(df_high.describe())
text = f" low\n ${df_low['total_year_avg'].min(): 0.2f} - ${df_low['total_year_avg'].max(): 0.2f}\n Mean = ${df_low['mortalty_reop_rate'].mean():0.6f} $"
text2 = f"mid\n ${df_mid['total_year_avg'].min(): 0.2f} - ${df_mid['total_year_avg'].max(): 0.2f}\n Mean = ${df_mid['mortalty_reop_rate'].mean():0.6f} $"
text3 = f"high\n${df_high['total_year_avg'].min(): 0.2f} - ${df_high['total_year_avg'].max(): 0.2f}\n Mean = ${df_high['mortalty_reop_rate'].mean():0.6f} $"
labels = [text, text2, text3]
fig1, ax1 = plt.subplots()
ax1.set_title('Mortality siteid reop boxplot')
bp = ax1.boxplot(data, patch_artist=True, labels=labels)
colors = ['pink', 'lightblue', 'palegreen']
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
plt.legend(handles=[f("pink"), f("lightblue"), f("palegreen")],
labels=['low', 'mid', 'high'])
plt.ylabel("Mortality Reop rate")
plt.show()
def complics_reop_surgid_boxplot():
mask = df['Year_sum_reop'] == 0
df_reop = df[~mask]
# total_year_sum
new_df=pd.DataFrame(data=df_reop,columns=['Complics_reop_rate','total_year_avg'])
new_df['bins'] = | pd.qcut(new_df['total_year_avg'], 3, labels=['low', 'mid', 'high']) | pandas.qcut |
# -*- coding: utf-8 -*-
"""
Functions for importing nyc tlc data.
"""
import numpy as np
import pandas as pd
import re
from twitterinfrastructure.tools import check_expected_list, create_table, \
connect_db, df_to_table, get_regex_files, haversine, output
from urllib.request import urlretrieve
def add_trip_columns(df, verbose=0):
"""Adds calculated trip columns to the dataframe. Assumes the dataframe
has already been cleaned. Also removes any trips with unreasonable
values. Can only calculate distance-related trip data for records with
pickup/dropoff lat/lon data.
Parameters
----------
df : dataframe
Dataframe to add trip calculation columns to.
verbose : int
Defines verbosity for output statements.
Returns
-------
df : dataframe
Dataframe with added columns.
Notes
-----
"""
col_names = list( | pd.Series(df.columns.values) | pandas.Series |
"""
We also test Series.notna in this file.
"""
import numpy as np
from pandas import (
Period,
Series,
)
import pandas._testing as tm
class TestIsna:
def test_isna_period_dtype(self):
# GH#13737
ser = Series([ | Period("2011-01", freq="M") | pandas.Period |
from transformers import (
BertTokenizerFast,
)
import os,re,copy,sys
import pandas as pd
import tensorflow as tf
import numpy as np
import news_retrieval_common_funcs
import multiprocessing_functions
from sklearn.model_selection import train_test_split
from sklearn.utils import resample as sklearnResample
from sklearn.utils import class_weight
import condor_tensorflow as condor
import multiprocessing
import dask, dask.bag
from IPython.display import display, HTML
class getdata:
def __init__(self, **settings):
settings.setdefault('workingdir', os.getcwd())
settings.setdefault('ckipModelName', 'ckiplab/albert-tiny-chinese')
settings.setdefault('num_workers', multiprocessing.cpu_count())
settings.setdefault('tokenizerSettingsDoc', {
'padding':'max_length',
'truncation':True,
'return_tensors':"np"
})
settings.setdefault('tokenizerSettingsQuery', {
'padding':'max_length',
'truncation':True,
'return_tensors':"np"
})
settings.setdefault('batch_size', 32)
settings.setdefault('random_state', 1)
settings.setdefault('test_size', 0.15)
settings.setdefault('querySynonyms',{ #支持同意贊成應該是應該的是合理的 被接受 被支持 是正確的 #不支持反對不贊成不同意不應該不應該的不合理的 是不對的 支持取消 禁止 允許 拒絕
'反對二代健保規定':['支持維持二代健保現有規定是不應該的','同意維持二代健保現有規定是不應該的','贊成維持二代健保現有規定是不應該的',
'同意維持二代健保規定是不合理的','維持二代健保規定是不對的','允許二代健保規定是不合理的','允許二代健保規定是不正確的','允許二代健保規定是錯誤的',
'不應該允許二代健保規定','不支持二代健保規定','不贊成二代健保規定','不同意二代健保規定','拒絕二代健保規定是正確的','不應該維持二代健保現有規定','拒絕二代健保規定是合理的',
'應該拒絕二代健保規定'],
'臺灣應開放含瘦肉精(萊克多巴胺)之美國牛肉進口':['支持臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口','同意臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口',
'贊成臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口','臺灣應該開放含瘦肉精(萊克多巴胺)之美國牛肉進口','臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口是應該的',
'臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口應該被接受','臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口應該被支持','臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口是正確的',
'不支持禁止臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口','反對禁止臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口','不贊成禁止臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口',
'不同意禁止臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口','不應該禁止臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口','禁止臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口是不應該的',
'禁止臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口是不合理的','禁止臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口是不對的','禁止臺灣開放含瘦肉精(萊克多巴胺)之美國牛肉進口是錯誤的'],
'國際賽事會場內應該可以持中華民國國旗':['支持國際賽事會場內可以持中華民國國旗','同意國際賽事會場內可以持中華民國國旗','贊成國際賽事會場內可以持中華民國國旗',
'國際賽事會場內可以持中華民國國旗是應該的','國際賽事會場內可以持中華民國國旗是合理的','國際賽事會場內可以持中華民國國旗應該被接受',
'國際賽事會場內可以持中華民國國旗應該被支持','國際賽事會場內可以持中華民國國旗是正確的','不支持禁止國際賽事會場內持中華民國國旗',
'反對禁止國際賽事會場內持中華民國國旗','不贊成禁止國際賽事會場內持中華民國國旗','不同意禁止國際賽事會場內持中華民國國旗',
'禁止國際賽事會場內持中華民國國旗是不應該的','不應該禁止國際賽事會場內持中華民國國旗','禁止國際賽事會場內持中華民國國旗是不合理的'],
'反對無圍牆校園':[
'支持有圍牆限制校園進出','同意有圍牆限制校園進出','贊成有圍牆限制校園進出','應該有圍牆限制校園進出','有圍牆限制校園進出是應該的',
'有圍牆限制校園進出是合理的','有圍牆限制校園進出是被接受的','有圍牆限制校園進出是應該被支持的','有圍牆限制校園進出是正確的',
'支持校園設圍牆','同意校園設圍牆','贊成校園設圍牆','校園應該設圍牆','校園設圍牆是應該的','校園設圍牆是合理的','校園設圍牆是可被接受的','校園設圍牆是可被支持的','校園設圍牆是正確的',
'不支持無圍牆校園','不贊成無圍牆校園','不同意無圍牆校園','校園不應該無圍牆','校園無圍牆是不應該的','校園無圍牆是不合理的',
'校園不應該取消圍牆','校園取消圍牆是不應該的','校園取消圍牆是不合理的','校園取消圍牆是不對的',
'不應該拒絕校園設圍牆','拒絕校園設圍牆是不應該的','拒絕校園設圍牆是不合理的','拒絕校園設圍牆是不對的',],
'另立專法保障同婚是正確的':['支持另立專法保障同婚','同意另立專法保障同婚','贊成另立專法保障同婚','應該另立專法保障同婚',
'另立專法保障同婚是應該的','另立專法保障同婚是合理的','另立專法保障同婚應該被接受','另立專法保障同婚應該被支持',
'不支持取消另立專法保障同婚','反對取消另立專法保障同婚','不贊成取消另立專法保障同婚','不同意取消另立專法保障同婚'
'不應該取消另立專法保障同婚','取消另立專法保障同婚是不應該的','取消另立專法保障同婚是不合理的'],
'堅決反對政府舉債發展前瞻建設計畫':['支持禁止政府舉債發展前瞻建設計畫','同意禁止政府舉債發展前瞻建設計畫','贊成禁止政府舉債發展前瞻建設計畫'
'應該禁止政府舉債發展前瞻建設計畫','禁止政府舉債發展前瞻建設計畫是應該的','禁止政府舉債發展前瞻建設計畫是合理的','禁止政府舉債發展前瞻建設計畫應該被接受'
'禁止政府舉債發展前瞻建設計畫應該被支持','不支持政府舉債發展前瞻建設計畫','反對政府舉債發展前瞻建設計畫','不贊成政府舉債發展前瞻建設計畫',
'不同意政府舉債發展前瞻建設計畫','政府不應該舉債發展前瞻建設計畫','政府舉債發展前瞻建設計畫是不應該的','政府舉債發展前瞻建設計畫是不合理的'],
'支持陳前總統保外就醫':['同意陳前總統保外就醫','贊成陳前總統保外就醫','陳前總統應該可以保外就醫','應該允許陳前總統保外就醫','允許陳前總統保外就醫是應該的',
'陳前總統保外就醫是合理的','不支持禁止陳前總統保外就醫','反對禁止陳前總統保外就醫','不贊成禁止陳前總統保外就醫','不同意禁止陳前總統保外就醫',
'不應該禁止陳前總統保外就醫','禁止陳前總統保外就醫是不應該的','拒絕陳前總統保外就醫是不合理的'],
'年金改革應取消或應調降軍公教月退之優存利率十八趴':['支持年金改革取消或調降軍公教月退之優存利率十八趴','同意年金改革取消或調降軍公教月退之優存利率十八趴',
'贊成年金改革取消或調降軍公教月退之優存利率十八趴','年金改革取消或調降軍公教月退之優存利率十八趴是應該的','年金改革取消或調降軍公教月退之優存利率十八趴是合理的',
'不支持年金改革或調升軍公教月退之優存利率十八趴','反對年金改革或調升軍公教月退之優存利率十八趴','不贊成年金改革或調升軍公教月退之優存利率十八趴',
'不同意年金改革或調升軍公教月退之優存利率十八趴','不應該年金改革或調升軍公教月退之優存利率十八趴','年金改革或調升軍公教月退之優存利率十八趴是不應該的',
'年金改革或調升軍公教月退之優存利率十八趴是不合理的'],
'同意動物實驗':['支持動物實驗','贊成動物實驗','應該接受動物實驗','接受動物實驗是應該的','接受動物實驗是合理的',
'不支持禁止動物實驗','反對禁止動物實驗','不贊成禁止動物實驗','不同意禁止動物實驗','不應該禁止動物實驗',
'禁止動物實驗是不應該的','禁止動物實驗是不合理的','動物實驗不應該被取消','動物實驗不應該被禁止',
'動物實驗應該可以被接受','動物實驗應該被支持'],
'油價應該凍漲或緩漲':['支持油價凍漲或緩漲','同意油價凍漲或緩漲','贊成油價凍漲或緩漲','油價凍漲或緩漲是應該的','油價凍漲或緩漲是合理的',
'油價凍漲或緩漲應該被接受','油價凍漲或緩漲應該被支持','不支持油價漲價或調漲','反對油價漲價或調漲','不贊成油價漲價或調漲',
'不同意油價漲價或調漲','油價不應該漲價或調漲','油價漲價或調漲是不應該的','油價漲價或調漲是不合理的','支持禁止油價漲價或調漲',
'同意禁止油價漲價或調漲','贊成禁止油價漲價或調漲','應該禁止油價漲價或調漲','禁止油價漲價或調漲是應該的','禁止油價漲價或調漲是合理的',
'禁止油價漲價或調漲應該被接受','禁止油價漲價或調漲應該被支持'],
'反對旺旺中時併購中嘉':['支持禁止旺旺中時併購中嘉','同意禁止旺旺中時併購中嘉','贊成禁止旺旺中時併購中嘉','應該禁止旺旺中時併購中嘉',
'禁止旺旺中時併購中嘉是應該的','禁止旺旺中時併購中嘉是合理的','禁止旺旺中時併購中嘉應該被接受','禁止旺旺中時併購中嘉應該被支持',
'不支持允許旺旺中時併購中嘉','反對允許旺旺中時併購中嘉','不贊成允許旺旺中時併購中嘉','不同意允許旺旺中時併購中嘉','不應該允許旺旺中時併購中嘉',
'允許旺旺中時併購中嘉是不應該的','允許旺旺中時併購中嘉是不合理的','反對媒體壟斷'],
'贊同課綱微調':['支持課綱微調','同意課綱微調','贊成課綱微調','課綱應該微調','課綱微調是應該的','課綱微調是合理的','課綱微調應該被接受',
'課綱微調應該被支持','課綱微調是正確的','應允許課綱微調','不支持取消課綱微調','反對取消課綱微調','不贊成取消課綱微調','不同意取消課綱微調','不應該取消課綱微調',
'取消課綱微調是不應該的','取消課綱微調是不合理的'],
'贊成流浪動物零撲殺':['支持流浪動物零撲殺','同意流浪動物零撲殺','流浪動物應該零撲殺','流浪動物零撲殺是應該的','流浪動物零撲殺是合理的',
'流浪動物零撲殺是正確的','不支持撲殺流浪動物','反對撲殺流浪動物','不贊成撲殺流浪動物','不同意撲殺流浪動物','不應該撲殺流浪動物','撲殺流浪動物是不應該的',
'撲殺流浪動物是不合理的','支持禁止撲殺流浪動物','同意禁止撲殺流浪動物','贊成禁止撲殺流浪動物','應該禁止撲殺流浪動物','禁止撲殺流浪動物是應該的',
'禁止撲殺流浪動物是合理的','禁止撲殺流浪動物應該被接受','禁止撲殺流浪動物應該被支持','禁止撲殺流浪動物是正確的'],
'核四應該啟用':['支持啟用核四','同意啟用核四','贊成啟用核四','啟用核四是應該的','啟用核四是合理的','啟用核四應該被接受','啟用核四應該被支持','啟用核四是正確的',
'不支持停用或封存核四','反對停用或封存核四','不贊成停用或封存核四','不同意停用或封存核四','不應該停用或封存核四','停用或封存核四是不應該的',
'停用或封存核四是不合理的','支持允許啟用核四','同意允許啟用核四','贊成允許啟用核四','應該允許啟用核四','允許啟用核四是應該的','允許啟用核四是正確的'
'支持核四應該啟用','不拒絕核四應該啟用','同意核四應該啟用','贊成核四應該啟用',
'核四啟用是應該的','核四啟用是合理的','核四啟用是可被接受的','核四啟用是可被支持的','核四啟用是正確的','核四啟用是對的',
'支持允許核四啟用','不拒絕允許核四啟用','同意允許核四啟用','贊成允許核四啟用',
'允許核四啟用是應該的','允許核四啟用是合理的','允許核四啟用是可被接受的','允許核四啟用是可被支持的','允許核四啟用是正確的','允許核四啟用是對的',
],
'贊成文林苑都更案可依法拆除王家':['支持文林苑都更案可依法拆除王家','同意文林苑都更案可依法拆除王家','文林苑都更案應該依法拆除王家','文林苑都更案依法拆除王家是應該的',
'文林苑都更案依法拆除王家是合理的','文林苑都更案依法拆除王家應該被接受','文林苑都更案依法拆除王家應該被支持','文林苑都更案依法拆除王家是正確的',
'不支持文林苑都更案不依法拆除王家','反對文林苑都更案不依法拆除王家','不贊成文林苑都更案不依法拆除王家','不同意文林苑都更案不依法拆除王家',
'不應該文林苑都更案不依法拆除王家','文林苑都更案不依法拆除王家是不應該的','文林苑都更案不依法拆除王家是不合理的'],
'十二年國教高中職「免學費補助」適用對象增加是不對的':['支持限制十二年國教高中職免學費補助適用對象增加','同意限制十二年國教高中職免學費補助適用對象增加',
'贊成限制十二年國教高中職免學費補助適用對象增加','應該限制十二年國教高中職免學費補助適用對象增加','限制十二年國教高中職免學費補助適用對象增加是應該的',
'限制十二年國教高中職免學費補助適用對象增加是合理的','限制十二年國教高中職免學費補助適用對象增加應該被接受','限制十二年國教高中職免學費補助適用對象增加應該被支持',
'限制十二年國教高中職免學費補助適用對象增加是正確的','不支持十二年國教高中職免學費補助適用對象增加','反對十二年國教高中職免學費補助適用對象增加',
'不贊成十二年國教高中職免學費補助適用對象增加','不同意十二年國教高中職免學費補助適用對象增加','十二年國教高中職免學費補助適用對象不應該增加',
'十二年國教高中職免學費補助適用對象增加是不應該的','十二年國教高中職免學費補助適用對象增加是不合理的','不應該允許十二年國教高中職免學費補助適用對象增加',
'允許十二年國教高中職免學費補助適用對象增加是不合理的','允許十二年國教高中職免學費補助適用對象增加是不對的'],
'遠雄大巨蛋工程應停工或拆除':['支持遠雄大巨蛋工程停工或拆除','同意遠雄大巨蛋工程停工或拆除','贊成遠雄大巨蛋工程停工或拆除','遠雄大巨蛋工程應該停工或拆除',
'遠雄大巨蛋工程停工或拆除是應該的','遠雄大巨蛋工程停工或拆除是合理的','遠雄大巨蛋工程停工或拆除應該被接受','遠雄大巨蛋工程停工或拆除應該被支持',
'遠雄大巨蛋工程停工或拆除是正確的','不支持遠雄大巨蛋工程繼續興建','反對遠雄大巨蛋工程繼續興建','不贊成遠雄大巨蛋工程繼續興建','不同意遠雄大巨蛋工程繼續興建',
'遠雄大巨蛋工程不應該繼續興建','遠雄大巨蛋工程繼續興建是不應該的','遠雄大巨蛋工程繼續興建是不合理的','遠雄大巨蛋工程繼續興建是不對的',
'支持禁止遠雄大巨蛋工程繼續興建','同意禁止遠雄大巨蛋工程繼續興建','贊成禁止遠雄大巨蛋工程繼續興建','應該禁止遠雄大巨蛋工程繼續興建','禁止遠雄大巨蛋工程繼續興建是應該的',
'禁止遠雄大巨蛋工程繼續興建是合理的','禁止遠雄大巨蛋工程繼續興建應該被接受','禁止遠雄大巨蛋工程繼續興建應該被支持','禁止遠雄大巨蛋工程繼續興建是正確的'],
'支持正名「臺灣」參與國際運動賽事':['同意正名臺灣參與國際運動賽事','贊成正名臺灣參與國際運動賽事','應該正名臺灣參與國際運動賽事',
'正名臺灣參與國際運動賽事是應該的','正名臺灣參與國際運動賽事是合理的','正名臺灣參與國際運動賽事應該被接受','正名臺灣參與國際運動賽事應該被支持',
'正名臺灣參與國際運動賽事是正確的','不支持不正名臺灣參與國際運動賽事','反對不正名臺灣參與國際運動賽事','不贊成不正名臺灣參與國際運動賽事',
'不同意不正名臺灣參與國際運動賽事','不應該不正名臺灣參與國際運動賽事','不正名臺灣參與國際運動賽事是不應該的','不正名臺灣參與國際運動賽事是不合理的'],
'拒絕公投通過門檻下修':['支持公投通過門檻不下修','同意公投通過門檻不下修','贊成公投通過門檻不下修','公投通過門檻應該不下修','公投通過門檻不下修是應該的',
'公投通過門檻不下修是合理的','公投通過門檻不下修應該被接受','公投通過門檻不下修應該被支持','公投通過門檻不下修是正確的','不支持公投通過門檻下修',
'反段公投通過門檻下修','不贊成公投通過門檻下修','不同意公投通過門檻下修','公投通過門檻不應該下修','公投通過門檻下修是不應該的','公投通過門檻下修是不合理的',
'支持禁止公投通過門檻下修','同意禁止公投通過門檻下修','贊成禁止公投通過門檻下修','應該禁止公投通過門檻下修','禁止公投通過門檻下修是應該的','禁止公投通過門檻下修是合理的',
'禁止公投通過門檻下修應該被接受','禁止公投通過門檻下修應該被支持','禁止公投通過門檻下修是正確的'],
'應該提高酒駕罰責以有效遏制酒駕':['支持提高酒駕罰責以有效遏制酒駕','同意提高酒駕罰責以有效遏制酒駕','贊成提高酒駕罰責以有效遏制酒駕',
'提高酒駕罰責以有效遏制酒駕是應該的','提高酒駕罰責以有效遏制酒駕是合理的','提高酒駕罰責以有效遏制酒駕應該被接受','提高酒駕罰責以有效遏制酒駕應該被支持',
'提高酒駕罰責以有效遏制酒駕是正確的','不支持降低或維持酒駕罰責','反對降低或維持酒駕罰責','不贊成降低或維持酒駕罰責','不同意降低或維持酒駕罰責',
'不應該降低或維持酒駕罰責','降低或維持酒駕罰責是不應該的','降低或維持酒駕罰責是不合理的','降低或維持酒駕罰責是不對的'],
"""
NOT in training data
"""
'ECFA早收清單可(有)達到其預期成效':['ECFA早收清單有效','ECFA早收清單成效良好','ECFA早收清單成效達標','ECFA早收清單沒有失效'],
'不支持使用加密貨幣':['支持限制使用加密貨幣','同意限制使用加密貨幣','贊成限制使用加密貨幣','應該限制使用加密貨幣',
'限制使用加密貨幣是應該的','限制使用加密貨幣是合理的','反對使用加密貨幣','不贊成使用加密貨幣','不同意使用加密貨幣',
'不應該使用加密貨幣','使用加密貨幣是不應該的','使用加密貨幣是不合理的','支持限制使用加密貨幣','同意限制使用加密貨幣',
'贊成限制使用加密貨幣','應該限制使用加密貨幣','限制使用加密貨幣是應該的'],
'不支持學雜費調漲':['支持限制學雜費調漲','同意限制學雜費調漲','贊成限制學雜費調漲','限制學雜費調漲是應該的','應該限制學雜費調漲',
'限制學雜費調漲是合理的','反對學雜費調漲','不贊成學雜費調漲','不同意學雜費調漲','學雜費不應該調漲','學雜費調漲是不應該的',
'學雜費調漲是不合理的','支持禁止學雜費調漲','同意禁止學雜費調漲','贊成禁止學雜費調漲','禁止學雜費調漲是應該的','應該禁止學雜費調漲',
'禁止學雜費調漲是合理的'],
'中華航空空服員罷工是合理的':['支持中華航空空服員罷工','同意中華航空空服員罷工','贊成中華航空空服員罷工','中華航空空服員應該可以罷工',
'中華航空空服員罷工是應該的','不支持禁止中華航空空服員罷工','反對禁止中華航空空服員罷工','不贊成禁止中華航空空服員罷工',
'不同意禁止中華航空空服員罷工','不應該禁止中華航空空服員罷工','禁止中華航空空服員罷工是不合理的','中華航空空服員罷工不應該譴責','不應該否定華航空服員罷工'],
'反對台鐵東移徵收案':['支持取消台鐵東移徵收案','同意取消台鐵東移徵收案','贊成取消台鐵東移徵收案','應該取消台鐵東移徵收案','取消台鐵東移徵收案是應該的',
'取消台鐵東移徵收案是合理的','不支持台鐵東移徵收案','不贊成台鐵東移徵收案','不同意台鐵東移徵收案','不應該台鐵東移徵收','台鐵東移徵收是不應該的',
'台鐵東移徵收是不合理的'],
'同意政府舉債發展前瞻建設計畫':['支持政府舉債發展前瞻建設計畫','贊成政府舉債發展前瞻建設計畫','政府應該舉債發展前瞻建設計畫','政府舉債發展前瞻建設計畫是應該的',
'政府舉債發展前瞻建設計畫是合理的','不支持禁止政府舉債發展前瞻建設計畫','反對禁止政府舉債發展前瞻建設計畫','不贊成禁止政府舉債發展前瞻建設計畫',
'不同意禁止政府舉債發展前瞻建設計畫','不應該禁止政府舉債發展前瞻建設計畫','禁止政府舉債發展前瞻建設計畫是不應該的','政府舉債發展前瞻建設計畫是不合理的'],
'性交易應該合法化':['支持性交易免罰','同意性交易免罰','贊成性交易免罰','性交易應該免罰','性交易免罰是應該的',
'性交易免罰是合理的','支持取消性交易限制','同意取消性交易限制','贊成取消性交易限制','應該取消性交易限制',
'取消性交易限制是應該的','取消性交易限制是合理的','性交易應該被接受','不支持禁止性交易','反對禁止性交易',
'不贊成禁止性交易','不同意禁止性交易','不應該禁止性交易','禁止性交易是不應該的','禁止性交易是不合理的',
'性交易不應該處罰','性交易不應該被禁止'],
'應該取消機車強制二段式左轉(待轉)':['支持機車可以直接轉彎','支持取消強制機車二段式左轉','同意取消強制機車二段式左轉',
'贊成取消強制機車二段式左轉','機車應該可以直接轉彎而不需要二段式左轉','不支持機車強制二段式左轉(待轉)',
'反段機車強制二段式左轉(待轉)','不贊成機車強制二段式左轉(待轉)','不同意機車強制二段式左轉(待轉)',
'不應該強制機車二段式左轉(待轉)','機車強制二段式左轉(待轉)是不合理的','機車不應該被強制二段式左轉(待轉)',
],
'應該減免證所稅':['支持減免證所稅','同意減免證所稅','贊成減免證所稅','減免證所稅是應該的','減免證所稅是合理的','應該減少課徵證所稅',
'應該減少課徵證券交易所得稅','應該減少證券交易所得稅的課徵','應該減少或免除證所稅','應該減少或免除證券交易所得稅',
'支持減少或免除證券交易所得稅','不支持增加證所稅','反對增加證所稅','不贊成增加證所稅','不同意增加證所稅','或應該增加證所稅',
'增加證所稅是不應該的','增加證所稅是不合理的'],
'支持中國學生納入健保':['同意中國學生納入健保','贊成中國學生納入健保','支持中國學生可以有健保','同意中國學生可以有健保',
'贊成中國學生可以有健保','支持健保納入中國學生','同意健保納入中國學生','贊成健保納入中國學生','中國學生應該納入健保',
'中國學生納入健保是應該的','中國學生納入健保是合理的','不支持禁止中國學生納入健保','反對禁止中國學生納入健保','不贊成禁止中國學生納入健保',
'不同意禁止中國學生納入健保','禁止中國學生納入健保是不應該的','不應該禁止中國學生納入健保','禁止中國學生納入健保是不合理的',
'支持取消中國學生納入健保的限制','同意取消中國學生納入健保的限制','贊成取消中國學生納入健保的限制','取消中國學生納入健保的甚至是合理的',
'應該取消中國學生納入健保的限制','取消中國學生納入勞健保的限制是合理的'],
'支持博弈特區在台灣合法化':['同意博弈特區在台灣合法化','贊成博弈特區在台灣合法化','博弈特區應該在台灣合法化',
'支持在台灣設立博弈特區','台灣應該設立博弈特區','同意台灣設立博弈特區',
'博弈特區在台灣合法化是合理的','不支持在台灣禁止博弈特區合法化','反對在台灣禁止博弈特區','不贊成在台灣禁止博弈特區',
'不同意在台灣禁止博弈特區','在台灣禁止博弈特區是不應該的','在台灣禁止博弈特區是不合理的','在台灣不應該處罰博弈特區內的博弈',
'在台灣的博弈特區內不應該處罰博弈行為'],
'支持臺灣中小學(含高職、專科)服儀規定(含髮、襪、鞋)給予學生自主':['支持廢除中小學生服儀規定','中小學生服儀規定應該廢除',
'同意中小學生服儀規定尊重自主或放寬','贊成廢除或放寬中小學生服儀規定','支持廢除中小學生髮禁','同意廢除中小學生髮禁',
'贊成廢除中小學生髮禁','贊成中小學生服儀規定尊重自主或放寬','中小學生服儀規定應該尊重自主或放寬','中小學生服儀規定尊重自主或放寬是應該的',
'中小學生服儀規定尊重自主或放寬是合理的','不支持中小學生服儀限制','反對中小學生服儀限制','不贊成中小學生服儀限制','不同意中小學生服儀限制',
'不應該限制中小學生服儀','中小學生服儀限制是不應該的','中小學生服儀限制是不合理的'],
'支持電競列入體育競技':['同意電競列入體育競技','贊成電競列入體育競技','應該將電競列入體育競技','電競列入體育競技是應該的','電競列入體育競技是合理的',
'不支持禁止電競列入體育競技','反對禁止電競列入體育競技','不贊成禁止電競列入體育競技','不同意禁止電競列入體育競技','不應該禁止電競列入體育競技',
'禁止電競列入體育競技是不應該的','禁止電競列入體育競技是不合理的'],
'贊成中油在觀塘興建第三天然氣接收站':['支持中油在觀塘興建第三天然氣接收站','同意中油在觀塘興建第三天然氣接收站','中油在觀塘興建第三天然氣接收站是應該的',
'中油應該在觀塘興建第三天然氣接收站','中油在觀塘興建第三天然氣接收站是合理的','不支持中油在觀塘取消興建第三天然氣接收站','反對中油取消在觀塘興建第三天然氣接收站',
'不贊成中油在觀塘取消興建第三天然氣接收站','不同意中油在觀塘取消興建第三天然氣接收站','中油不應該在觀塘取消興建第三天然氣接收站',
'中油取消在觀塘興建第三天然氣接收站是不應該的','中油取消在觀塘興建第三天然氣接收站是不合理的'],
'通姦在刑法上應該除罪化':['支持通姦除罪化','同意通姦除罪化','贊成通姦除罪化','通姦應該除罪化','通姦除罪化是合理的',
'通姦應該刑事免責','通姦應該無罪','通姦應該免刑','通姦應該刑事免刑','通姦應該免罰刑','通姦應該免刑',
'不應該以刑罰處罰通姦','不應該以刑法處罰通姦','通姦不應該被判刑','通姦不應該有罪','拒絕通姦在刑法上除罪化是不合理的'],
})
settings.setdefault('queryAntonyms',{ #支持不拒絕同意贊成應該是應該的是合理的 被接受 被支持 是正確的 是對的 允許 #不支持反對不贊成不同意不應該不應該的不合理的 是不對的 是不正確的 是錯誤的 支持取消 禁止 停止 允許 拒絕
'應該提高酒駕罰責以有效遏制酒駕':['不支持提高酒駕罰責以有效遏制酒駕','反對提高酒駕罰責以有效遏制酒駕','不贊成提高酒駕罰責以有效遏制酒駕','不同意提高酒駕罰責以有效遏制酒駕','不應該提高酒駕罰責以有效遏制酒駕',
'提高酒駕罰責以有效遏制酒駕是不應該的','提高酒駕罰責以有效遏制酒駕是不合理的','提高酒駕罰責以有效遏制酒駕是不對的','提高酒駕罰責以有效遏制酒駕是不正確的','提高酒駕罰責以有效遏制酒駕是錯誤的',
'支持取消提高酒駕罰責以有效遏制酒駕','不拒絕取消提高酒駕罰責以有效遏制酒駕','贊成取消提高酒駕罰責以有效遏制酒駕','同意取消提高酒駕罰責以有效遏制酒駕','應該取消提高酒駕罰責以有效遏制酒駕',
'取消提高酒駕罰責以有效遏制酒駕是應該的','取消提高酒駕罰責以有效遏制酒駕是合理的','取消提高酒駕罰責以有效遏制酒駕是對的','取消提高酒駕罰責以有效遏制酒駕是正確的',
'支持禁止提高酒駕罰責以有效遏制酒駕','不拒絕禁止提高酒駕罰責以有效遏制酒駕','贊成禁止提高酒駕罰責以有效遏制酒駕','同意禁止提高酒駕罰責以有效遏制酒駕','應該禁止提高酒駕罰責以有效遏制酒駕',
'禁止提高酒駕罰責以有效遏制酒駕是應該的','禁止提高酒駕罰責以有效遏制酒駕是合理的','禁止提高酒駕罰責以有效遏制酒駕是對的','禁止提高酒駕罰責以有效遏制酒駕是正確的',
'支持停止提高酒駕罰責以有效遏制酒駕','不拒絕停止提高酒駕罰責以有效遏制酒駕','贊成停止提高酒駕罰責以有效遏制酒駕','同意停止提高酒駕罰責以有效遏制酒駕','應該停止提高酒駕罰責以有效遏制酒駕',
'停止提高酒駕罰責以有效遏制酒駕是應該的','停止提高酒駕罰責以有效遏制酒駕是合理的','停止提高酒駕罰責以有效遏制酒駕是對的','停止提高酒駕罰責以有效遏制酒駕是正確的',
'支持拒絕提高酒駕罰責以有效遏制酒駕','不拒絕拒絕提高酒駕罰責以有效遏制酒駕','贊成拒絕提高酒駕罰責以有效遏制酒駕','同意拒絕提高酒駕罰責以有效遏制酒駕','應該拒絕提高酒駕罰責以有效遏制酒駕',
'拒絕提高酒駕罰責以有效遏制酒駕是應該的','拒絕提高酒駕罰責以有效遏制酒駕是合理的','拒絕提高酒駕罰責以有效遏制酒駕是對的','拒絕提高酒駕罰責以有效遏制酒駕是正確的'
],
'拒絕公投通過門檻下修':['支持公投通過門檻下修','不拒絕公投通過門檻下修','同意公投通過門檻下修','贊成公投通過門檻下修','公投通過門檻應該下修',
'公投通過門檻下修是應該的','公投通過門檻下修是合理的','公投通過門檻下修是可被接受的','公投通過門檻下修是可被支持的','公投通過門檻下修是正確的','公投通過門檻下修是對的',
'支持允許公投通過門檻下修','不拒絕允許公投通過門檻下修','同意允許公投通過門檻下修','贊成允許公投通過門檻下修','公投通過門檻應該允許下修',
'允許公投通過門檻下修是應該的','允許公投通過門檻下修是合理的','允許公投通過門檻下修是可被接受的','允許公投通過門檻下修是可被支持的','允許公投通過門檻下修是正確的','允許公投通過門檻下修是對的',
'不支持取消公投通過門檻下修','反對取消公投通過門檻下修','不贊成取消公投通過門檻下修','不同意取消公投通過門檻下修','取消公投通過門檻不應該下修',
'取消公投通過門檻下修是不應該的','取消公投通過門檻下修是不合理的','取消公投通過門檻下修是不對的','取消公投通過門檻下修是不正確的','取消公投通過門檻下修是錯誤的',
'不支持禁止公投通過門檻下修','反對禁止公投通過門檻下修','不贊成禁止公投通過門檻下修','不同意禁止公投通過門檻下修','禁止公投通過門檻不應該下修',
'禁止公投通過門檻下修是不應該的','禁止公投通過門檻下修是不合理的','禁止公投通過門檻下修是不對的','禁止公投通過門檻下修是不正確的','禁止公投通過門檻下修是錯誤的',
'不支持停止公投通過門檻下修','反對停止公投通過門檻下修','不贊成停止公投通過門檻下修','不同意停止公投通過門檻下修','停止公投通過門檻不應該下修',
'停止公投通過門檻下修是不應該的','停止公投通過門檻下修是不合理的','停止公投通過門檻下修是不對的','停止公投通過門檻下修是不正確的','停止公投通過門檻下修是錯誤的',
'不支持拒絕公投通過門檻下修','反對拒絕公投通過門檻下修','不贊成拒絕公投通過門檻下修','不同意拒絕公投通過門檻下修','拒絕公投通過門檻不應該下修',
'拒絕公投通過門檻下修是不應該的','拒絕公投通過門檻下修是不合理的','拒絕公投通過門檻下修是不對的','拒絕公投通過門檻下修是不正確的','拒絕公投通過門檻下修是錯誤的'],
'支持正名「臺灣」參與國際運動賽事':['不支持正名臺灣參與國際運動賽事','反對正名臺灣參與國際運動賽事','不贊成正名臺灣參與國際運動賽事','不同意正名臺灣參與國際運動賽事','不應該正名臺灣參與國際運動賽事',
'正名臺灣參與國際運動賽事是不應該的','正名臺灣參與國際運動賽事是不合理的','正名臺灣參與國際運動賽事是不對的','正名臺灣參與國際運動賽事是不正確的','正名臺灣參與國際運動賽事是錯誤的',
'支持拒絕正名臺灣參與國際運動賽事','同意拒絕正名臺灣參與國際運動賽事','贊成拒絕正名臺灣參與國際運動賽事','應該拒絕正名臺灣參與國際運動賽事',
'拒絕正名臺灣參與國際運動賽事是應該的','拒絕正名臺灣參與國際運動賽事是合理的','拒絕正名臺灣參與國際運動賽事是對的','拒絕正名臺灣參與國際運動賽事是正確的',
'支持取消正名臺灣參與國際運動賽事','同意取消正名臺灣參與國際運動賽事','贊成取消正名臺灣參與國際運動賽事','應該取消正名臺灣參與國際運動賽事',
'取消正名臺灣參與國際運動賽事是應該的','取消正名臺灣參與國際運動賽事是合理的','取消正名臺灣參與國際運動賽事是對的','取消正名臺灣參與國際運動賽事是正確的',
'支持禁止正名臺灣參與國際運動賽事','同意禁止正名臺灣參與國際運動賽事','贊成禁止正名臺灣參與國際運動賽事','應該禁止正名臺灣參與國際運動賽事',
'禁止正名臺灣參與國際運動賽事是應該的','禁止正名臺灣參與國際運動賽事是合理的','禁止正名臺灣參與國際運動賽事是對的','禁止正名臺灣參與國際運動賽事是正確的',
'支持停止正名臺灣參與國際運動賽事','同意停止正名臺灣參與國際運動賽事','贊成停止正名臺灣參與國際運動賽事','應該停止正名臺灣參與國際運動賽事',
'停止正名臺灣參與國際運動賽事是應該的','停止正名臺灣參與國際運動賽事是合理的','停止正名臺灣參與國際運動賽事是對的','停止正名臺灣參與國際運動賽事是正確的'
],
'遠雄大巨蛋工程應停工或拆除':['不支持遠雄大巨蛋工程停工或拆除','反對遠雄大巨蛋工程停工或拆除','不贊成遠雄大巨蛋工程停工或拆除','不同意遠雄大巨蛋工程停工或拆除','遠雄大巨蛋工程不應該停工或拆除',
'遠雄大巨蛋工程停工或拆除是不應該的','遠雄大巨蛋工程停工或拆除是不合理的','遠雄大巨蛋工程停工或拆除是不正確的','遠雄大巨蛋工程停工或拆除是錯誤的','遠雄大巨蛋工程停工或拆除是不對的',
'支持禁止遠雄大巨蛋工程停工或拆除','不拒絕禁止遠雄大巨蛋工程停工或拆除','同意禁止遠雄大巨蛋工程停工或拆除','贊成禁止遠雄大巨蛋工程停工或拆除','應該禁止遠雄大巨蛋工程停工或拆除',
'禁止遠雄大巨蛋工程停工或拆除是應該的','禁止遠雄大巨蛋工程停工或拆除是合理的','禁止遠雄大巨蛋工程停工或拆除是正確的','遠雄大巨蛋工程停工或拆除是對的',
'支持拒絕遠雄大巨蛋工程停工或拆除','不拒絕拒絕遠雄大巨蛋工程停工或拆除','同意拒絕遠雄大巨蛋工程停工或拆除','贊成拒絕遠雄大巨蛋工程停工或拆除','應該拒絕遠雄大巨蛋工程停工或拆除',
'拒絕遠雄大巨蛋工程停工或拆除是應該的','拒絕遠雄大巨蛋工程停工或拆除是合理的','拒絕遠雄大巨蛋工程停工或拆除是正確的','遠雄大巨蛋工程停工或拆除是對的',
'支持遠雄大巨蛋工程繼續施工','不拒絕遠雄大巨蛋工程繼續施工','同意遠雄大巨蛋工程繼續施工','贊成遠雄大巨蛋工程繼續施工','遠雄大巨蛋工程應該繼續施工',
'遠雄大巨蛋工程繼續施工是應該的','遠雄大巨蛋工程繼續施工是合理的','遠雄大巨蛋工程繼續施工是可被接受的','遠雄大巨蛋工程繼續施工是可被支持的','遠雄大巨蛋工程繼續施工是正確的','遠雄大巨蛋工程繼續施工是對的',
'支持允許遠雄大巨蛋工程繼續施工','不拒絕允許遠雄大巨蛋工程繼續施工','同意允許遠雄大巨蛋工程繼續施工','贊成允許遠雄大巨蛋工程繼續施工','應該允許遠雄大巨蛋工程繼續施工',
'允許遠雄大巨蛋工程繼續施工是應該的','允許遠雄大巨蛋工程繼續施工是合理的','允許遠雄大巨蛋工程繼續施工是可被接受的','允許遠雄大巨蛋工程繼續施工是可被支持的','允許遠雄大巨蛋工程繼續施工是正確的','允許遠雄大巨蛋工程繼續施工是對的'
],
'十二年國教高中職「免學費補助」適用對象增加是不對的':['支持十二年國教高中職免學費補助適用對象增加','不拒絕十二年國教高中職免學費補助適用對象增加','同意十二年國教高中職免學費補助適用對象增加','贊成十二年國教高中職免學費補助適用對象增加','十二年國教高中職免學費補助適用對象應該增加',
'十二年國教高中職免學費補助適用對象增加是應該的','十二年國教高中職免學費補助適用對象增加是合理的','十二年國教高中職免學費補助適用對象增加是可被接受的','十二年國教高中職免學費補助適用對象增加是可被支持的','十二年國教高中職免學費補助適用對象增加是正確的','十二年國教高中職免學費補助適用對象增加是對的',
'支持允許十二年國教高中職免學費補助適用對象增加','不拒絕允許十二年國教高中職免學費補助適用對象增加','同意允許十二年國教高中職免學費補助適用對象增加','贊成允許十二年國教高中職免學費補助適用對象增加','允許十二年國教高中職免學費補助適用對象應該增加',
'允許十二年國教高中職免學費補助適用對象增加是應該的','允許十二年國教高中職免學費補助適用對象增加是合理的','允許十二年國教高中職免學費補助適用對象增加是可被接受的','允許十二年國教高中職免學費補助適用對象增加是可被支持的','允許十二年國教高中職免學費補助適用對象增加是正確的','允許十二年國教高中職免學費補助適用對象增加是對的',
'不支持停止十二年國教高中職免學費補助適用對象增加','不拒絕停止十二年國教高中職免學費補助適用對象增加','不同意停止十二年國教高中職免學費補助適用對象增加','不贊成停止十二年國教高中職免學費補助適用對象增加','不應該停止十二年國教高中職免學費補助適用對象增加',
'停止十二年國教高中職免學費補助適用對象增加是不應該的','停止十二年國教高中職免學費補助適用對象增加是不合理的','停止十二年國教高中職免學費補助適用對象增加是不對的','停止十二年國教高中職免學費補助適用對象增加是不正確的','停止十二年國教高中職免學費補助適用對象增加是錯誤的',
'不支持取消十二年國教高中職免學費補助適用對象增加','不拒絕取消十二年國教高中職免學費補助適用對象增加','不同意取消十二年國教高中職免學費補助適用對象增加','不贊成取消十二年國教高中職免學費補助適用對象增加','不應該取消十二年國教高中職免學費補助適用對象增加',
'取消十二年國教高中職免學費補助適用對象增加是不應該的','取消十二年國教高中職免學費補助適用對象增加是不合理的','取消十二年國教高中職免學費補助適用對象增加是不對的','取消十二年國教高中職免學費補助適用對象增加是不正確的','取消十二年國教高中職免學費補助適用對象增加是錯誤的',
'不支持禁止十二年國教高中職免學費補助適用對象增加','不拒絕禁止十二年國教高中職免學費補助適用對象增加','不同意禁止十二年國教高中職免學費補助適用對象增加','不贊成禁止十二年國教高中職免學費補助適用對象增加','不應該禁止十二年國教高中職免學費補助適用對象增加',
'禁止十二年國教高中職免學費補助適用對象增加是不應該的','禁止十二年國教高中職免學費補助適用對象增加是不合理的','禁止十二年國教高中職免學費補助適用對象增加是不對的','禁止十二年國教高中職免學費補助適用對象增加是不正確的','禁止十二年國教高中職免學費補助適用對象增加是錯誤的',
'不支持拒絕十二年國教高中職免學費補助適用對象增加','不拒絕拒絕十二年國教高中職免學費補助適用對象增加','不同意拒絕十二年國教高中職免學費補助適用對象增加','不贊成拒絕十二年國教高中職免學費補助適用對象增加','不應該拒絕十二年國教高中職免學費補助適用對象增加',
'拒絕十二年國教高中職免學費補助適用對象增加是不應該的','拒絕十二年國教高中職免學費補助適用對象增加是不合理的','拒絕十二年國教高中職免學費補助適用對象增加是不對的','拒絕十二年國教高中職免學費補助適用對象增加是不正確的','拒絕十二年國教高中職免學費補助適用對象增加是錯誤的',
],
'贊成文林苑都更案可依法拆除王家':['不支持文林苑都更案可依法拆除王家','反對文林苑都更案可依法拆除王家','不贊成文林苑都更案可依法拆除王家','不同意文林苑都更案可依法拆除王家','文林苑都更案不應該依法拆除王家',
'文林苑都更案依法拆除王家是不應該的','文林苑都更案依法拆除王家是不合理的','文林苑都更案依法拆除王家是不對的','文林苑都更案依法拆除王家是不正確的','文林苑都更案依法拆除王家是錯誤的',
'支持拒絕文林苑都更案依法拆除王家','不反對拒絕文林苑都更案依法拆除王家','贊成拒絕文林苑都更案依法拆除王家','同意拒絕文林苑都更案依法拆除王家','應該拒絕文林苑都更案依法拆除王家',
'拒絕文林苑都更案依法拆除王家是應該的','拒絕文林苑都更案依法拆除王家是合理的','拒絕文林苑都更案依法拆除王家是對的','拒絕文林苑都更案依法拆除王家是正確的',
'支持停止文林苑都更案依法拆除王家','不反對停止文林苑都更案依法拆除王家','贊成停止文林苑都更案依法拆除王家','同意停止文林苑都更案依法拆除王家','應該停止文林苑都更案依法拆除王家',
'停止文林苑都更案依法拆除王家是應該的','停止文林苑都更案依法拆除王家是合理的','停止文林苑都更案依法拆除王家是對的','停止文林苑都更案依法拆除王家是正確的',
'支持取消文林苑都更案依法拆除王家','不反對取消文林苑都更案依法拆除王家','贊成取消文林苑都更案依法拆除王家','同意取消文林苑都更案依法拆除王家','應該取消文林苑都更案依法拆除王家',
'取消文林苑都更案依法拆除王家是應該的','取消文林苑都更案依法拆除王家是合理的','取消文林苑都更案依法拆除王家是對的','取消文林苑都更案依法拆除王家是正確的',
'支持禁止文林苑都更案依法拆除王家','不反對禁止文林苑都更案依法拆除王家','贊成禁止文林苑都更案依法拆除王家','同意禁止文林苑都更案依法拆除王家','應該禁止文林苑都更案依法拆除王家',
'禁止文林苑都更案依法拆除王家是應該的','禁止文林苑都更案依法拆除王家是合理的','禁止文林苑都更案依法拆除王家是對的','禁止文林苑都更案依法拆除王家是正確的',
],
'反對二代健保規定':['支持二代健保規定','同意二代健保規定','贊成二代健保規定','允許二代健保規定',
'應該支持二代健保規定','應該同意二代健保規定','應該贊成二代健保規定',
'支持二代健保規定是應該的','同意二代健保規定是應該的','贊成二代健保規定是應該的',
'二代健保規定是合理的','支持二代健保規定是合理的','同意二代健保規定是合理的','贊成二代健保規定是合理的','','','',
'二代健保規定應該被接受','支持二代健保規定應該被接受','同意二代健保規定應該被接受','贊成二代健保規定應該被接受','','','','','',
'二代健保規定應該被支持','同意二代健保規定應該被支持','贊成二代健保規定應該被支持','','',
'二代健保規定是正確的','支持二代健保規定是正確的','同意二代健保規定是正確的','贊成二代健保規定是正確的','','','',
'不應該不支持二代健保規定','應該反對二代健保規定','不應該不贊成二代健保規定','不應該不同意二代健保規定','','','',
'不支持二代健保規定是不應該的','反對二代健保規定是不應該的','不贊成二代健保規定是不應該的','不同意二代健保規定是不應該的','','','',
'不支持二代健保規定是不合理的','反對二代健保規定是不合理的','不贊成二代健保規定是不合理的','不同意二代健保規定是不合理的','','','','','','',
'不支持二代健保規定是不對的','反對二代健保規定是不對的','不贊成二代健保規定是不對的','不同意二代健保規定是不對的',
''],
'臺灣應開放含瘦肉精(萊克多巴胺)之美國牛肉進口':['不支持臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口','反對臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口','不贊成臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口','不同意臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口','臺灣不應該開放含瘦肉精萊克多巴胺的美國牛肉進口',
'不支持臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是應該的','反對臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是應該的','不贊成臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是應該的','不同意臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是應該的',
'不支持臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是合理的','反對臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是合理的','不贊成臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是合理的','不同意臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是合理的',
'支持臺灣禁止開放含瘦肉精萊克多巴胺的美國牛肉進口','同意臺灣禁止開放含瘦肉精萊克多巴胺的美國牛肉進口','贊成臺灣禁止開放含瘦肉精萊克多巴胺的美國牛肉進口','應該禁止臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口',
'支持臺灣拒絕開放含瘦肉精萊克多巴胺的美國牛肉進口','同意臺灣拒絕開放含瘦肉精萊克多巴胺的美國牛肉進口','贊成臺灣拒絕開放含瘦肉精萊克多巴胺的美國牛肉進口','應該拒絕臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口',
'不支持臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是可被接受的','反對臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是可被接受的','不贊成臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是可被接受的','不同意臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是可被接受的',
'不支持臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是可被支持的','反對臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是可被支持的','不贊成臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是可被支持的','不同意臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是可被支持的',
'不支持臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是正確的','反對臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是正確的','不贊成臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是正確的','不同意臺灣開放含瘦肉精萊克多巴胺的美國牛肉進口是正確的','','','','','','','','','','',
],
'國際賽事會場內應該可以持中華民國國旗':['不支持國際賽事會場內可以持中華民國國旗','反對國際賽事會場內可以持中華民國國旗','不贊成國際賽事會場內可以持中華民國國旗','不同意國際賽事會場內可以持中華民國國旗','國際賽事會場內不應該持中華民國國旗','國際賽事會場內持中華民國國旗是不應該的',
'國際賽事會場內持中華民國國旗是不合理的','國際賽事會場內持中華民國國旗是不對的',
'支持禁止國際賽事會場內持中華民國國旗','同意禁止國際賽事會場內持中華民國國旗','贊成禁止國際賽事會場內持中華民國國旗','應該禁止國際賽事會場內持中華民國國旗',
'禁止國際賽事會場內持中華民國國旗是應該的','禁止國際賽事會場內持中華民國國旗是可被支持的','禁止國際賽事會場內持中華民國國旗是合理的','禁止國際賽事會場內持中華民國國旗是是可被接受的','禁止國際賽事會場內持中華民國國旗是正確的',
'支持拒絕國際賽事會場內持中華民國國旗','同意拒絕國際賽事內持中華民國國旗','贊成拒絕國際賽事內持中華民國國旗','應該拒絕國際賽事內持中華民國國旗',
'拒絕國際賽事內持中華民國國旗是應該的','拒絕國際賽事內持中華民國國旗是合理的','拒絕國際賽事內持中華民國國旗是可被接受的','拒絕國際賽事內持中華民國國旗是可被支持的','拒絕國際賽事內持中華民國國旗是正確的',
],
'反對無圍牆校園':[
'不應該禁止校園設圍牆','禁止校園設圍牆是不應該的','禁止校園設圍牆是不合理的','禁止校園設圍牆是不對的',
'支持校園取消圍牆','同意校園取消圍牆','贊成校園取消圍牆','校園應該取消圍牆','校園取消圍牆是應該的','校園取消圍牆是可被接受的','校園取消圍牆是可被支持的','校園取消圍牆是合理的','校園取消圍牆是正確的',''
'支持校園拒絕設圍牆','同意校園拒絕設圍牆','贊成校園拒絕設圍牆','校園應該拒絕設圍牆','校園拒絕設圍牆是應該的','校園拒絕設圍牆是合理的','校園拒絕設圍牆是可被接受的','校園拒絕設圍牆是可被支持的','校園拒絕設圍牆是正確的','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',
'不支持校園設圍牆是不應該的','反對校園設圍牆是不應該的','不贊成校園設圍牆是不應該的','不同意校園設圍牆是不應該的',
'不支持校園設圍牆是不合理的','反對校園設圍牆是不合理的','不贊成校園設圍牆是不合理的','不同意校園設圍牆是不合理的',
'不支持校園設圍牆是不對的','反對校園設圍牆是不對的','不贊成校園設圍牆是不對的','不同意校園設圍牆是不對的'],
'另立專法保障同婚是正確的':['不支持另立專法保障同婚','不同意另立專法保障同婚','反對另立專法保障同婚','不贊成另立專法保障同婚','不應該另立專法保障同婚',
'另立專法保障同婚是不應該的','另立專法保障同婚是不合理的','另立專法保障同婚是不對的','拒絕另立專法保障同婚',
'不支持允許另立專法保障同婚','不同意允許另立專法保障同婚','不贊成允許另立專法保障同婚','不應該允許另立專法保障同婚',
'允許另立專法保障同婚是不應該的','允許另立專法保障同婚是不對的','允許另立專法保障同婚是不合理的','允許另立專法保障同婚是不正確的','允許另立專法保障同婚是錯誤的',
'支持取消專法保障同婚','同意取消專法保障同婚','贊成取消專法保障同婚','應該取消專法保障同婚',
'取消專法保障同婚是應該的','取消專法保障同婚是合理的','取消專法保障同婚是可被接受的','取消專法保障同婚是可被支持的','取消專法保障同婚是正確的',
'支持拒絕專法保障同婚','同意拒絕專法保障同婚','贊成拒絕專法保障同婚','拒絕專法保障同婚是應該的','拒絕專法保障同婚是合理的','拒絕專法保障同婚是可被接受的','拒絕專法保障同婚是可被支持的','拒絕專法保障同婚是正確的',
],
'堅決反對政府舉債發展前瞻建設計畫':['支持政府舉債發展前瞻建設計畫','同意政府舉債發展前瞻建設計畫','贊成政府舉債發展前瞻建設計畫','政府應該舉債發展前瞻建設計畫',
'政府舉債發展前瞻建設計畫是合理的','政府舉債發展前瞻建設計畫是可被接受的','政府舉債發展前瞻建設計畫是可被支持的','政府舉債發展前瞻建設計畫是正確的',
'不支持政府舉債發展前瞻建設計畫是不應該的','不同意政府舉債發展前瞻建設計畫是不應該的','不贊成政府舉債發展前瞻建設計畫是不應該的','不允許政府舉債發展前瞻建設計畫是不應該的',
'不支持禁止政府舉債發展前瞻建設計畫','不同意禁止政府舉債發展前瞻建設計畫','不贊成禁止政府舉債發展前瞻建設計畫','不應該禁止政府舉債發展前瞻建設計畫',
'禁止政府舉債發展前瞻建設計畫是不應該的','禁止政府舉債發展前瞻建設計畫是不合理的','禁止政府舉債發展前瞻建設計畫是不對的',
'不支持拒絕政府舉債發展前瞻建設計畫','不同意拒絕政府舉債發展前瞻建設計畫','不贊成拒絕政府舉債發展前瞻建設計畫','不應該拒絕政府舉債發展前瞻建設計畫',
'拒絕政府舉債發展前瞻建設計畫是不應該的','拒絕政府舉債發展前瞻建設計畫是不合理的','拒絕政府舉債發展前瞻建設計畫是不對的'
],
'支持陳前總統保外就醫':['不支持陳前總統保外就醫','反對陳前總統保外就醫','不贊成陳前總統保外就醫','不同意陳前總統保外就醫',
'不應該讓陳前總統保外就醫','讓陳前總統保外就醫是不應該的','讓陳前總統保外就醫是不合理的','讓陳前總統保外就醫是不對的','讓陳前總統保外就醫是不正確的','讓陳前總統保外就醫是錯誤的',
'支持禁止陳前總統保外就醫','同意禁止陳前總統保外就醫','贊成禁止陳前總統保外就醫','應該禁止陳前總統保外就醫',
'禁止陳前總統保外就醫是應該的','禁止陳前總統保外就醫是合理的','禁止陳前總統保外就醫是可被接受的','禁止陳前總統保外就醫是可被支持的','禁止陳前總統保外就醫是正確的','禁止陳前總統保外交易是對的',
'支持拒絕陳前總統保外就醫','同意拒絕陳前總統保外就醫','贊成拒絕陳前總統保外就醫','應該拒絕陳前總統保外就醫',
'拒絕陳前總統把未就醫是應該的','拒絕參選總統保外就醫是合理的','拒絕陳前總統保外覺得是可被接受的','拒絕陳前總統保外就醫是可被支持的','拒絕陳前總統保外就醫是正確的','拒絕陳前總統保外就醫是對的',
'反對允許陳前總統保外就醫','不支持允許陳前總統保外就醫','不贊成允許陳前總統保外就醫','不同意允許陳前總統保外就醫',
'允許陳前總統保外就醫是不應該的','允許陳前總統保外就醫是不合理的','允許陳前總統保外就醫是不對的','允許陳前總統保外就醫是不正確的','允許陳前總統保外就醫是錯誤的',
],
'年金改革應取消或應調降軍公教月退之優存利率十八趴':['不支持年金改革取消或調降軍公教月退優存利率18%十八趴','反對年金改革取消或調降軍公教月退優存利率18%十八趴','不贊成年金改革取消或調降軍公教月退優存利率18%十八趴','不同意年金改革取消或調降軍公教月退優存利率18%十八趴','年金改革不應該取消或調降軍公教月退優存利率18%十八趴',
'年金改革取消或調降軍公教月退優存利率18%十八趴是不應該的','年金改革取消或調降軍公教月退優存利率18%十八趴是不合理的','年金改革取消或調降軍公教月退優存利率18%十八趴是不可被接受的','年金改革取消或調降軍公教月退優存利率18%十八趴是不被支持的','年金改革取消或調降軍公教月退優存利率18%十八趴是不正確的','年金改革取消或調降軍公教月退優存利率18%十八趴是錯誤的','年金改革取消或調降軍公教月退優存利率18%十八趴是不對的',
'支持拒絕年金改革取消或調降軍公教月退優存利率18%十八趴','同意拒絕年金改革取消或調降軍公教月退優存利率18%十八趴','贊成拒絕年金改革取消或調降軍公教月退優存利率18%十八趴','應拒絕年金改革取消或調降軍公教月退優存利率18%十八趴',
'拒絕年金改革取消或調降軍公教月退優存利率18%十八趴是應該的','拒絕年金改革取消或調降軍公教月退優存利率18%十八趴是合理的','拒絕年金改革取消或調降軍公教月退優存利率18%十八趴是可被接受的','拒絕年金改革取消或調降軍公教月退優存利率18%十八趴是可被支持的','拒絕年金改革取消或調降軍公教月退優存利率18%十八趴是正確的','拒絕年金改革取消或調降軍公教月退優存利率18%十八趴是對的',
'不支持允許年金改革取消或調降軍公教月退優存利率18%十八趴','不同意年金改革取消或調降軍公教月退優存利率18%十八趴','不贊成年金改革取消或調降軍公教月退優存利率18%十八趴','年金改革不應該取消或調降軍公教月退優存利率18%十八趴',
'年金改革取消或調降軍公教月退優存利率18%十八趴是合理的','年金改革取消或調降軍公教月退優存利率18%十八趴是可被接受的','年金改革取消或調降軍公教月退優存利率18%十八趴是可被支持的','年金改革取消或調降軍公教月退優存利率18%十八趴是正確的','年金改革取消或調降軍公教月退優存利率18%十八趴是對的'
],
'同意動物實驗':['不支持動物實驗','反對動物實驗','不贊成動物實驗','不同意動物實驗','不應該動物實驗','動物實驗是不應該的','動物實驗是不合理的','動物實驗是不對的','動物實驗是不正確的','動物實驗是錯誤的',
'贊成禁止動物實驗','支持禁止動物實驗','同意禁止動物實驗','應該禁止動物實驗','禁止動物實驗是應該的','禁止動物實驗是合理的','禁止動物實驗是正確的','禁止動物實驗是對的',
'支持拒絕動物實驗','同意拒絕動物實驗','贊成拒絕動物實驗','拒絕動物實驗是應該的','應該拒絕動物實驗','拒絕動物實驗是合理的','拒絕動物實驗是正確的','拒絕動物實驗是對的',
'不支持允許動物實驗','反對允許動物實驗','不贊成允許動物實驗','不同意允許動物實驗','允許動物實驗是不應該的','不應該允許動物實驗','允許動物實驗是不合理的','允許動物實驗是不對的','允許動物實驗是不正確的','允許動物實驗是錯誤的',
],
'油價應該凍漲或緩漲':['支持油價漲價','同意油價漲價','贊成油價漲價','油價應該漲價','油價漲價是應該的','油價漲價是合理的','是可被接受的','油價漲價是被支持的','油價漲價是正確的',
'不支持油價應該凍漲或緩漲','反對油價應該凍漲或緩漲','不贊成油價應該凍漲或緩漲','不同意油價應該凍漲或緩漲','油價不應該凍漲或緩漲',
'油價凍漲或緩漲是不應該的','油價凍漲或緩漲是不合理的','油價凍漲或緩漲是不對的','油價凍漲或緩漲是不正確的','油價凍漲或緩漲是錯誤的',
'支持取消油價凍漲或緩漲','同意取消油價凍漲或緩漲','贊成取消油價凍漲或緩漲','反對取消油價漲價','不贊成取消油價漲價','不同意取消油價漲價','不應該取消油價漲價',
'取消油價漲價是不合理的','取消油價漲價是不應該的','取消油價漲價是不正確的','取消油價漲價是錯誤的','取消油價漲價是不對的',
'支持拒絕油價凍漲或緩漲','同意拒絕油價凍漲或緩漲','贊成拒絕油價凍漲或緩漲','反對拒絕油價漲價','不贊成拒絕油價漲價','不同意拒絕油價漲價','不應該拒絕油價漲價',
'拒絕油價漲價是不合理的','拒絕油價漲價是不應該的','拒絕油價漲價是不正確的','拒絕油價漲價是錯誤的','拒絕油價漲價是不對的',
'支持禁止油價凍漲或緩漲','同意禁止油價凍漲或緩漲','贊成禁止油價凍漲或緩漲','反對禁止油價漲價','不贊成禁止油價漲價','不同意禁止油價漲價','不應該禁止油價漲價',
'禁止油價漲價是不合理的','禁止油價漲價是不應該的','禁止油價漲價是不正確的','禁止油價漲價是錯誤的','禁止油價漲價是不對的'
],
'反對旺旺中時併購中嘉':['支持旺旺中時併購中嘉','同意旺旺中時併購中嘉','贊成旺旺中時併購中嘉','旺旺中時應可以併購中嘉',
'旺旺中時併購中嘉是應該的','旺旺中時併購中嘉是合理的','旺旺中時併購中嘉是可被接受的','旺旺中時併購中嘉是可被支持的','旺旺中時併購中嘉是正確的','旺旺中時併購中嘉是對的',
'支持允許旺旺中時併購中嘉','同意允許旺旺中時併購中嘉','贊成允許旺旺中時併購中嘉','應該允許旺旺中時併購中嘉','應允許旺旺中時併購中嘉',
'允許旺旺中時併購中嘉是應該的','允許旺旺中時併購中嘉是可被接受的','允許旺旺中時併購中嘉是可被支持的','允許旺旺中時併購中嘉是正確的','允許旺旺中時併購中嘉是對的',
'不支持禁止旺旺中時併購中嘉','反對禁止旺旺中時併購中嘉','不贊成禁止旺旺中時併購中嘉','不同意禁止旺旺中時併購中嘉','不應該禁止旺旺中時併購中嘉',
'禁止旺旺中時併購中嘉是不應該的','禁止旺旺中時併購中嘉是不合理的','禁止旺旺中時併購中嘉是不對的','禁止旺旺中時併購中嘉是不正確的','禁止旺旺中時併購中嘉是錯誤的',
'不支持拒絕旺旺中時併購中嘉','反對拒絕旺旺中時併購中嘉','不贊成拒絕旺旺中時併購中嘉','不同意拒絕旺旺中時併購中嘉','不應該拒絕旺旺中時併購中嘉',
'拒絕旺旺中時併購中嘉是不應該的','拒絕旺旺中時併購中嘉是不合理的','拒絕旺旺中時併購中嘉是不對的','拒絕旺旺中時併購中嘉是不正確的','拒絕旺旺中時併購中嘉是錯誤的',
],
'贊同課綱微調':['不支持課綱微調','反對課綱微調','不贊成課綱微調','不同意課綱微調','課綱不應該微調',
'課綱微調是不應該的','課綱微調是不合理的','課綱微調是不對的','課綱微調是不正確的','課綱微調是錯誤的',
'不支持允許課綱微調','反對允許課綱微調','不贊成允許課綱微調','不同意允許課綱微調','課綱不應該微調',
'允許課綱微調是不應該的','允許課綱微調是不合理的','允許課綱微調是不對的','允許課綱微調是不正確的','允許課綱微調是錯誤的',
'支持取消課綱微調','同意取消課綱微調','贊成取消課綱微調','應該取消課綱微調',
'取消課綱微調是應該的','取消課綱微調是合理的','取消課綱微調可被接受的','取消課綱微調可被支持的','取消課綱微調是正確的','取消課綱微調是對的',
'支持拒絕課綱微調','同意拒絕課綱微調','贊成拒絕課綱微調','應該拒絕課綱微調',
'拒絕課綱微調是應該的','拒絕課綱微調是合理的','拒絕課綱微調可被接受的','拒絕課綱微調可被支持的','拒絕課綱微調是正確的','拒絕課綱微調是對的',
'支持禁止課綱微調','同意禁止課綱微調','贊成禁止課綱微調','應該禁止課綱微調',
'禁止課綱微調是應該的','禁止課綱微調是合理的','禁止課綱微調可被接受的','禁止課綱微調可被支持的','禁止課綱微調是正確的','禁止課綱微調是對的'
],
'贊成流浪動物零撲殺':['不支持流浪動物零撲殺','反對流浪動物零撲殺','不贊成流浪動物零撲殺','不同意流浪動物零撲殺','不應該流浪動物零撲殺',
'流浪動物零撲殺是不應該的','流浪動物零撲殺是不合理的','流浪動物零撲殺是不對的',
'支持撲殺流浪動物','同意撲殺流浪動物','贊成撲殺流浪動物','應該撲殺流浪動物',
'撲殺流浪動物是應該的','撲殺流浪動物是合理的','撲殺流浪動物是可被接受的','撲殺流浪動物是可被支持的','撲殺流浪動物是正確的','撲殺流浪動物是對的',
'支持允許撲殺流浪動物','同意允許撲殺流浪動物','贊成允許撲殺流浪動物','應該允許撲殺流浪動物',
'允許撲殺流浪動物是應該的','允許撲殺流浪動物是合理的','允許撲殺流浪動物是可被接受的','允許撲殺流浪動物是可被支持的','允許撲殺流浪動物是正確的','允許撲殺流浪動物是對的',
'不支持拒絕撲殺流浪動物','反對拒絕撲殺流浪動物','不贊成拒絕撲殺流浪動物','不同意拒絕撲殺流浪動物','不應該拒絕撲殺流浪動物',
'拒絕撲殺流浪動物是不應該的','拒絕撲殺流浪動物是不合理的','拒絕撲殺流浪動物是不對的','拒絕撲殺流浪動物是不正確的','拒絕撲殺流浪動物是錯誤的',
'不支持取消撲殺流浪動物','反對取消撲殺流浪動物','不贊成取消撲殺流浪動物','不同意取消撲殺流浪動物','不應該取消撲殺流浪動物',
'取消撲殺流浪動物是不應該的','取消撲殺流浪動物是不合理的','取消撲殺流浪動物是不對的','取消撲殺流浪動物是不正確的','取消撲殺流浪動物是錯誤的',
'不支持禁止撲殺流浪動物','反對禁止撲殺流浪動物','不贊成禁止撲殺流浪動物','不同意禁止撲殺流浪動物','不應該禁止撲殺流浪動物',
'禁止撲殺流浪動物是不應該的','禁止撲殺流浪動物是不合理的','禁止撲殺流浪動物是不對的','禁止撲殺流浪動物是不正確的','禁止撲殺流浪動物是錯誤的',
'支持拒絕流浪動物零撲殺','同意拒絕流浪動物零撲殺','贊成拒絕流浪動物零撲殺','應該拒絕流浪動物零撲殺',
'拒絕流浪動物零撲殺是應該的','拒絕流浪動物零撲殺是合理的','拒絕流浪動物零撲殺是對的','拒絕流浪動物零撲殺是正確的',
'支持禁止流浪動物零撲殺','同意禁止流浪動物零撲殺','贊成禁止流浪動物零撲殺','不拒絕禁止流浪動物零撲殺','應該禁止流浪動物零撲殺',
'禁止流浪動物零撲殺是應該的','禁止流浪動物零撲殺是合理的','禁止流浪動物零撲殺是對的','禁止流浪動物零撲殺是正確的',
'支持取消流浪動物零撲殺','同意取消流浪動物零撲殺','贊成取消流浪動物零撲殺','不拒絕取消流浪動物零撲殺','應該取消流浪動物零撲殺',
'取消流浪動物零撲殺是應該的','取消流浪動物零撲殺是合理的','取消流浪動物零撲殺是對的','取消流浪動物零撲殺是正確的'
],
'核四應該啟用':['不支持核四啟用','反對核四啟用','不贊成核四啟用','不同意核四啟用','不支持核四應該啟用','反對核四應該啟用','不贊成核四應該啟用','不同意核四應該啟用',
'核四啟用是不應該的','核四啟用是不合理的','核四啟用是不對的','核四啟用是不正確的','核四啟用是錯誤的',
'支持停止核四啟用','不拒絕停止核四啟用','同意停止核四啟用','贊成停止核四啟用','應該停止核四啟用',
'停止核四啟用是應該的','停止核四啟用是合理的','停止核四啟用是可被接受的','停止核四啟用是可被支持的','停止核四啟用是正確的','停止核四啟用是對的',
'支持取消核四啟用','不取消取消核四啟用','同意取消核四啟用','贊成取消核四啟用','應該取消核四啟用',
'取消核四啟用是應該的','取消核四啟用是合理的','取消核四啟用是可被接受的','取消核四啟用是可被支持的','取消核四啟用是正確的','取消核四啟用是對的',
'支持禁止核四啟用','不拒絕禁止核四啟用','同意禁止核四啟用','贊成禁止核四啟用','應該禁止核四啟用',
'禁止核四啟用是應該的','禁止核四啟用是合理的','禁止核四啟用是可被接受的','禁止核四啟用是可被支持的','禁止核四啟用是正確的','禁止核四啟用是對的',
'支持拒絕核四啟用','不拒絕拒絕核四啟用','同意拒絕核四啟用','贊成拒絕核四啟用','應該拒絕核四啟用',
'拒絕核四啟用是應該的','拒絕核四啟用是合理的','拒絕核四啟用是可被接受的','拒絕核四啟用是可被支持的','拒絕核四啟用是正確的','拒絕核四啟用是對的',
'支持停用核四','不拒絕停用核四','同意停用核四','贊成停用核四','應該停用核四','停用核四是應該的','停用核四是合理的',
'停用核四是可被接受的','停用核四是可被支持的','停用核四是正確的','停用核四是對的'
],
})
self.settings = settings
self.ckiptokenizer = BertTokenizerFast.from_pretrained('bert-base-chinese')
#maxLenLimitAllowedInaSentence = 200
def getCKIPTokens(self, inputStr, srctype='Query'):
argInTokenize = self.settings['tokenizerSettingsDoc'] if srctype=='Doc' else self.settings['tokenizerSettingsQuery']
tokens = self.ckiptokenizer(inputStr, **argInTokenize)
return tokens
def readsrc(self, **kwargs):
kwargs.setdefault('candidate_negative_sampling_rate', 1.0) #數字越大利用擴增而來的負例越多
"""
td: Index(['Query', 'News_Index', 'Relevance'], dtype='object')
qs: Index(['Query_Index', 'Query'], dtype='object')
nc: Index(['News_Index', 'News_URL'], dtype='object')
newdf: Index(['News_URL', 'News_Fulltitle', 'News_Fullcontent'], dtype='object')
"""
td = pd.read_csv(os.path.join(self.settings['workingdir'],'TD.csv'))
qs = pd.read_csv(os.path.join(self.settings['workingdir'], 'QS_1.csv')).append(pd.read_csv(os.path.join(self.settings['workingdir'], 'QS_2.csv'))).reset_index(drop=True)
qs = pd.merge(left=td.loc[:,["Query"]].drop_duplicates(), right=qs, how='outer', on='Query').drop_duplicates(subset=['Query']).reset_index(drop=True)
nc = pd.read_csv(os.path.join(self.settings['workingdir'],'NC_1.csv')).append(pd.read_csv(os.path.join(self.settings['workingdir'],'NC_2.csv'))).drop_duplicates(subset=['News_Index']).reset_index(drop=True)
newsdf = pd.merge(
left=pd.read_json(os.path.join(self.settings['workingdir'],'url_to_title.json'), orient="index").reset_index().rename(columns={"index": "News_URL", 0: "News_Fulltitle"}),
right=pd.read_json(os.path.join(self.settings['workingdir'],'url2content.json'), orient="index").reset_index().rename(columns={"index": "News_URL", 0: "News_Fullcontent"}),
how='left', on='News_URL').reset_index(drop=True).append(
pd.read_csv(
os.path.join(self.settings['workingdir'],'newsdf3.csv')
).loc[:,["News_URL","News_Fulltitle","News_Fullcontent"]]
).reset_index(drop=True)
newsdf = newsdf[~pd.isnull(newsdf.News_Fullcontent)]
#newsdf['News_Fullcontent'] = multiprocessing_functions.df_apply_by_daskmultiprocessing(newsdf['News_Fullcontent'], lambda x: re.sub(r' +',' ',x).replace(''','\'').replace('audiojs.events.ready(function(){var as = audiojs.createAll();});',''), threading=True, computeoutput=True, workers=self.settings['num_workers']*10)
#newsdf['News_FullTitleContent'] = multiprocessing_functions.df_apply_by_daskmultiprocessing(newsdf, lambda x: x['News_Fulltitle']+'。'+x['News_Fullcontent'], threading=True, computeoutput=True, workers=self.settings['num_workers']*10)
newsdf['News_Fullcontent'] = newsdf['News_Fullcontent'].apply( func=lambda x: re.sub(r' +',' ',x).replace(''','\''))
newsdf['News_FullTitleContent'] = newsdf.apply( func=lambda x: x['News_Fulltitle']+'。'+x['News_Fullcontent'], axis=1)
newsdf = newsdf[~news_retrieval_common_funcs.is_nan(newsdf.News_FullTitleContent)]
for col in newsdf.columns:
newsdf = newsdf.drop_duplicates(subset=[col])
#newsdf['sentencebreakNews_FullTitleContent'] = multiprocessing_functions.df_apply_by_daskmultiprocessing(newsdf['News_FullTitleContent'], news_retrieval_common_funcs.sentencebreaker, meta=('sentencebreakNews_FullTitleContent', object), threading=True, computeoutput=True, workers=self.settings['num_workers']*10)
#newsdf['sentencebreakNews_FullTitleContent'] = multiprocessing_functions.df_apply_by_daskmultiprocessing(newsdf['sentencebreakNews_FullTitleContent'], lambda x: [s for s in x if s not in ['()','()','《》']], threading=True, computeoutput=True, workers=self.settings['num_workers']*10)
#刪除超長的句子 為了節省記憶體空間將新聞文章去除超長句子
#newsdf['sentencebreak_FullTitleContent_maxlen'] = multiprocessing_functions.df_apply_by_daskmultiprocessing(newsdf['sentencebreakNews_FullTitleContent'], lambda x: [s for s in x if len(s)<=maxLenLimitOf_aSentence], threading=True, computeoutput=True, workers=self.settings['num_workers'])
#newsdf['sentencebreakNews_FullTitleContent'] = multiprocessing_functions.df_apply_by_daskmultiprocessing(newsdf['sentencebreakNews_FullTitleContent'], lambda x: [s for s in x if (re.match('^\d+$', s)==None) and (s not in ['()','()','《》']) and (len(s)<=maxLenLimitAllowedInaSentence)], threading=True, computeoutput=True, workers=self.settings['num_workers']*10)
newsdf['News_FullTitleContent_fullStrlen'] = newsdf['News_FullTitleContent'].apply(len)
#newsdf['sentencebreak_FullTitleContent_N_of_sentences'] = multiprocessing_functions.df_apply_by_daskmultiprocessing(newsdf['sentencebreakNews_FullTitleContent'], lambda x: len(x), threading=True, computeoutput=True, workers=self.settings['num_workers']*10)
#newsdf['sentencebreak_FullTitleContent_maxlen'] = multiprocessing_functions.df_apply_by_daskmultiprocessing(newsdf['sentencebreakNews_FullTitleContent'], lambda x: max([len(s) for s in x]), threading=True, computeoutput=True, workers=self.settings['num_workers']*10)
#newsdf = newsdf[~pd.isnull(newsdf.sentencebreakNews_FullTitleContent)]
nc = nc[~pd.isnull(nc.News_Index)]
#maxlengthNews = newsdf.sort_values(by=['full_strlen'], ascending=False).iloc[0,:]['News_FullTitleContent']
compset = pd.merge(left=td, right=qs, how='outer', on='Query')
compset = | pd.merge(left=compset, right=nc, how='outer', on='News_Index') | pandas.merge |
from numpy.fft import fft
import pickle_compat
pickle_compat.patch()
import pandas as pd
from sklearn import metrics
import pickle
import numpy as np
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from datetime import timedelta as td
Raw_CGMData1=pd.read_csv('CGMData.csv', low_memory=False)
RawInsulinData1= | pd.read_csv('InsulinData.csv', low_memory=False) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = | tm.box_expected(idx, box) | pandas.util.testing.box_expected |
from dataclasses import make_dataclass
from datetime import datetime, timedelta
import logging
import os
import requests
from settings.constants import TEST_DATA_DIR, VITIGEOSS_CONFIG_FILE
import pandas as pd
import json
from settings.instance import settings
logger = logging.getLogger()
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["authorization"] = "Bearer " + self.token
return r
def get_bearerAuth(credentials: dict):
auth_url = os.path.join(settings.vitigeoss_api_base_url, settings.vitigeoss_api_auth_endpoint)
response = requests.post(auth_url, data=credentials).json()
b_id, bearer = response['id'], response['token']['hash']
bearerAuth = BearerAuth(f'{b_id}-{bearer}')
return bearerAuth
class WeatherStationDriver:
"""
Driver class in charge of interfacing with the Vitigeoss APIs to retrieve data coming from the sensors of
the weather stations
"""
def __init__(self) -> None:
""" Configure the endpoints to be called using settings """
self.api_base_url = settings.vitigeoss_api_base_url
self.station_endpoint = settings.get_vitigeoss_api_station_endpoint
self.sensor_endpoint = settings.get_vitigeoss_api_sensor_endpoint
self.sensor_name_id_dict = None
def register_station_sensor_ids(self, station: str, type_keys: list, bearerAuth: BearerAuth=get_bearerAuth(settings.get_api_auth_credentials())):
"""
Retrieve the list of sensors available at the specified station and filter them by their typeKey.
Save the filtered sensors ids in a dictionary, where the key is the corresponding typeKey
:param station: string containing the name of the weather station whose sensors are to be listed
:param type_keys: List of type keys that identify the type of sensor, e.g. ['temp', 'radiation']
:param bearerAuth: authentication token for the API service
:return: None - The class attribute "sensor_name_id_dict" is initialized
"""
station_url = os.path.join(self.api_base_url, self.station_endpoint(station))
response = requests.get(station_url, auth=bearerAuth)
data = response.json()
if response.status_code not in [200, 201]:
logger.warning(data)
return
self.sensor_name_id_dict = dict()
for sensor in data['sensors']:
if sensor['typeKey'] in type_keys:
self.sensor_name_id_dict[sensor['typeKey']] = sensor['_id']
def get_sensor_data(self, dateStart: str, dateEnd: str, bearerAuth: BearerAuth=get_bearerAuth(settings.get_api_auth_credentials())):
"""
For each type of sensor saved in the class attribute "sensor_name_id_dict" by the "register_station_sensor_ids()" method,
request to the Vitigeoss APIs the sensor data of the period between the dateStart and dateEnd dates
:returns: list of dictionaries, one for each sensor
"""
if self.sensor_name_id_dict is None:
raise Exception(f'Sensor ids not registered!')
sensors = []
for _, _id in self.sensor_name_id_dict.items():
sensor_url = f'{os.path.join(self.api_base_url, self.sensor_endpoint(_id))}?dateStart={dateStart}&dateEnd={dateEnd}&includeFields=dateStart,measure'
logger.warning(sensor_url)
response = requests.get(sensor_url, auth=bearerAuth)
sensor_data = response.json()
if response.status_code not in [200, 201]:
logger.warning(sensor_data)
continue
sensors.append(sensor_data)
return sensors
@staticmethod
def get_df_from_sensor_data(sensors: list):
""" Support method to organize the messy data coming from sensors in a single and good looking dataframe """
Measurement = make_dataclass("Measurement", [("datetime", str), ("typeKey", str), ("measure", float)])
measurements_list = []
for sensor in sensors:
for sensor_measurement in sensor['measurements']:
measurement = Measurement(datetime.strptime(sensor_measurement['dateStart'], '%Y-%m-%dT%H:%M:%S.000Z'), sensor['typeKey'], sensor_measurement['measure'])
measurements_list.append(measurement)
if len(measurements_list) == 0:
return None
return pd.DataFrame(measurements_list)
class MockedWeatherStationDriver(WeatherStationDriver):
"""
Mocked version of the WeatherStationDriver, used for testing purposes.
Instead of calling the external APIs to retrieve weather station data,
it reads a mocked sample from a json file and returns its content
"""
def __init__(self) -> None:
super().__init__()
def get_sensor_data(self, dateStart: str, dateEnd: str):
if self.sensor_name_id_dict is None:
raise Exception(f'Sensor ids not registered!')
with open(os.path.join(TEST_DATA_DIR, 'mocked_sensor_data.json')) as f:
mocked_sensor_data = json.loads(f.read())
return mocked_sensor_data
class WeatherStationManager:
def __init__(self, driver=WeatherStationDriver()) -> None:
with open(VITIGEOSS_CONFIG_FILE) as f:
self.config = json.loads(f.read())
self.input_data_features = settings.input_data_features
self.driver = driver
def get_wsdata_df(self, place, weather_station_missing_rows, chunk_days=366):
"""
Retrieve weather data from weather station sensors of the specified place.
This method only requests the data of the dates whose data are missing (not the data of the entire year).
"""
if weather_station_missing_rows.empty:
raise Exception('The Dataframe to be updated is empty!')
self.driver.register_station_sensor_ids(station=self.config['place_to_station'][place], type_keys=self.input_data_features)
weather_station_data_df = None
for dateStart in pd.date_range(weather_station_missing_rows.index[0].to_pydatetime(),
weather_station_missing_rows.index[-1].to_pydatetime(), freq=f'{chunk_days}d'):
# Compute dateEnd, chunk_days - 1 days later than dateStart. maximum dateEnd is the 31st of December of that year
dateEnd = min(dateStart.to_pydatetime() + timedelta(days=chunk_days - 1), datetime(dateStart.year, 12, 31))
dateEnd = dateEnd + timedelta(hours=23, minutes=59, seconds=59)
try:
weekly_sensor_data = self.driver.get_sensor_data(dateStart, dateEnd)
weekly_data_df = self.driver.get_df_from_sensor_data(weekly_sensor_data)
if weather_station_data_df is None:
weather_station_data_df = weekly_data_df
else:
weather_station_data_df = weather_station_data_df.append(weekly_data_df, ignore_index=True)
except Exception as e:
logger.warning(e)
if weather_station_data_df is None:
return None
pheno_phases_df = None
update_df = self.organize_weather_station_data(weather_station_data_df, pheno_phases_df)
update_df = self.feature_engineering(update_df)
update_df = self.manually_fix_df_errors(update_df, place, dateStart.year)
update_df = update_df.interpolate(limit_direction='both') # Interpolate to fill missing data values
return update_df
def organize_weather_station_data(self, weather_station_data_df: pd.DataFrame, pheno_phases_df: pd.DataFrame):
"""
Process data and obtain a refined version of the pandas DataFrame to be used as input for the inference models.
Still some steps have to be taken to obtain the final version of the df.
"""
dataframes = []
for ft in self.input_data_features:
dataframes.append(self.transform_df(weather_station_data_df[weather_station_data_df['typeKey'] == ft]))
transformed_station_data_df = | pd.concat(dataframes, axis=1) | pandas.concat |
import pandas as pd
from collections import defaultdict
import datetime
from xlrd import xldate_as_datetime
import os
import sys
import json
from openpyxl import load_workbook
from .os_functions import last_day_of_month,enter_exit, generate_md5
from .regex_functions import replace_re_special, strQ2B, strB2Q,symbol_to_spaces, normalize_punctuations
from .nlp_functions import get_keyword_dict, get_word_freq_dict, convert_key2list, process_text_eng
from .excel_functions import write_format_columns
from .func_classes import DfDict
import gc
import re
import warnings
import traceback
import logging
from pandas.errors import OutOfBoundsDatetime
import swifter
from flashtext import KeywordProcessor
warnings.filterwarnings('ignore')
def read_config_table(file_path, dtype=str):
df = pd.DataFrame([])
#读取数据文件,只读取第一个肉眼可见的sheet
if '.xlsx' == file_path[-5:]:
df_workbook = pd.ExcelFile(file_path)
sheets_property_list = df_workbook.book.sheets()
for sheet_property in sheets_property_list:
if sheet_property.visibility == 0:
df = df_workbook.parse(sheet_property.name, dtype=str)
break
else:
try:
df = pd.read_excel(file_path)
except:
df = pd.read_html(file_path,header=0)
if df.empty:
enter_exit(f'Cannot read any visible table in "{file_path}"')
return df
def read_data_file(file_path):
df = pd.DataFrame([])
#读取数据文件,只读取第一个sheet
if '.csv' == file_path[-4:]:
df = pd.read_csv(file_path)
elif '.xlsx' == file_path[-5:]:
df = pd.read_excel(file_path)
else:
try:
df = pd.read_excel(file_path)
except:
df = | pd.read_html(file_path,header=0) | pandas.read_html |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, time
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import (DataFrame, Series, Index,
Timestamp, DatetimeIndex,
to_datetime, date_range)
import pandas as pd
import pandas.tseries.offsets as offsets
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.compat import product
from pandas.tests.frame.common import TestData
class TestDataFrameTimeSeriesMethods(tm.TestCase, TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
self.assertEqual(result[0].dtype, np.float64)
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
def test_frame_ctor_datetime64_column(self):
rng = | date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s') | pandas.date_range |
import io
import urllib
import pandas as pd
import requests
from bs4 import BeautifulSoup
from django.core.files.images import ImageFile
from selenium import webdriver
from .models import Plot
def find_jobs_from(request, website, job_title, location, desired_characs, filename="results.xls"):
while(True):
if website == 'Indeed':
job_soup = load_indeed_jobs_div(job_title, location)
jobs_list, num_listings, nums = extract_job_information_indeed(
job_soup, desired_characs)
if nums != 0:
break
save_jobs_to_excel(request, num_listings, filename)
return jobs_list
def save_jobs_to_excel(request, jobs_list, filename):
df = | pd.DataFrame(jobs_list) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import scipy.special
import uproot
import oyaml as yaml
from . import geometry
__all__ = [
"create_hdf_from_root",
"convert",
"convert_table_to_fixed",
"basic_query",
"basic_eval",
"rename_column",
"object_cross_cleaning",
"shift_2dvector",
"object_cross_dphi",
"mindphi",
"weight_sigmoid",
"object_groupby",
"histogram",
]
def create_hdf_from_root(path, *cfgs):
for cfg in cfgs:
outpath = os.path.join(
cfg["output"]["direc"],
os.path.splitext(os.path.basename(path))[0]+".h5",
)
if os.path.exists(outpath):
os.remove(outpath)
for cfg in cfgs:
with open(cfg["dataset"]["cfg"], 'r') as f:
dataset_cfg = yaml.safe_load(f)["datasets"]
# find cfg for current path
path_cfg = None
for dataset in dataset_cfg:
if path in dataset["files"]:
path_cfg = dataset
break
outpath = os.path.join(
cfg["output"]["direc"],
os.path.splitext(os.path.basename(path))[0]+".h5",
)
for df in uproot.pandas.iterate(path, cfg["tree"], **cfg["iterate_kwargs"]):
df = df.astype(cfg.get("dtypes", {}))
for key in cfg["dataset"]["keys"]:
df[key] = path_cfg[key]
df.to_hdf(
outpath, cfg["output"]["tree"],
format='table', append=True,
complib='zlib', complevel=9,
)
def convert(path, trees, outdir, kwargs):
for tree in trees:
new_path = os.path.join(
outdir, os.path.basename(path),
)
| pd.read_hdf(path, tree) | pandas.read_hdf |
##########
# Built-in
##########
import glob
import logging
from pathlib import Path
########
# Libs #
########
import pandas as pd
from pandas.api.types import is_numeric_dtype
logger = logging.getLogger(__name__)
class CustomIngestor():
def __init__(self, cfg):
"""Custom ingestor for VAERS dataset.
Takes in 3 raw csv files, converts them to 1 merged DataFrame for initial EDA and subsequent preprocessing.
Gets the following variables from a configuration file, and sets them as class attrbutes:
Args:
cfg (module/object): configuration file. Sets the following configuration variables as class attributes:
- FILEPATH (dict): details on relative input and output filepaths.
- folder_path (str): relative file path to folder containing the 3 raw csv files
- merged_filepath (str): relative file path to save merged DataFrame as csv file
- SYMP_COLS_DICT (dict): a dictionary with the following key-value pairs:
- key: name of newly aggregated column
- values: list of column names to use for aggregation
- SYMP_CSV_COLS (list): list of column names to subset Symptoms DataFrame
- VAX_CSV_COLS (list): list of column names to subset Vaccine DataFrame
- MAX_DATE (str): upper limit of the date range for RECVDATE. Used to filter the dataset.
Only rows with RECVDATE less than max_date will remain
"""
# attributes from config
self.filepath = getattr(cfg, "FILEPATH", None)
self.folder_path = self.filepath.get("folder_path")
self.merged_filepath = self.filepath.get("merged_filepath")
self.symp_cols_dict = getattr(cfg, "SYMP_COLS_DICT", None)
self.symp_csv_cols = getattr(cfg, "SYMP_CSV_COLS", None)
self.vax_csv_cols = getattr(cfg, "VAX_CSV_COLS", None)
self.max_date = getattr(cfg, "MAX_DATE", None)
##########
# Helpers
##########
def get_df_list(self, folder_path: str) -> list:
"""Get a list of csv filepaths to load as DataFrames
Args:
folder_path (str): relative file path to raw data folder containing csv files
Returns:
df_list (list): a list of absolute filepaths to load as DataFrames
"""
full_folder_path = Path(__file__).parents[2] / folder_path
df_list = [
file
for file in glob.glob(
f'{full_folder_path}/*.csv'
)
]
return df_list
def read_csvs(self, df_list: list) -> pd.DataFrame:
"""Read a list of csv filepaths as pandas DataFrames
Args:
df_list (list): a list of absolute filepaths to load as DataFrames
Returns:
main_df (pd.DataFrame): main raw DataFrame for VAERS dataset, from 2021VAERSDATA.csv
symp_df (pd.DataFrame): raw DataFrame for SYMPTOMS csv file, from 2021VAERSSYMPTOMS.csv
vax_df (pd.DataFrame): raw DataFrame for vaccine csv file, from 2021VAERSVAX.csv
"""
for file_path in df_list:
if 'DATA' in Path(file_path).name:
main_df = pd.read_csv(
file_path,
na_values = [
"n/a", "na", "-", "<NA>", "Na", "None", "none"
],
low_memory = False
)
elif 'SYMPTOMS' in Path(file_path).name:
symp_df = pd.read_csv(file_path)
elif 'VAX' in Path(file_path).name:
vax_df = | pd.read_csv(file_path) | pandas.read_csv |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import corner
import numpy as np
import pandas as pd
import emcee
import sys
import mm_likelihood
from astropy.time import Time
import commentjson as json
import mm_param
import mm_make_geo_pos
from tqdm import tqdm
import matplotlib.colors as colors
import functools
class ReadJson(object):
def __init__(self, filename):
print('Read the runprops.txt file')
self.data = json.load(open(filename))
def outProps(self):
return self.data
#chain = (nwalkers, nlink, ndim)
def sample_deltas(i, draws, names, fixed_df, total_df_names, fit_scale, names_dict, runprops, nobj, obsdf, geo_obj_pos):
paramdf = mm_param.from_fit_array_to_param_df(draws[i,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0]
length = len(paramdf)
dlong = np.zeros((runprops.get('numobjects')-1, length))
dlat = np.zeros((runprops.get('numobjects')-1, length))
resid_long = np.zeros((runprops.get('numobjects')-1, length))
resid_lat = np.zeros((runprops.get('numobjects')-1, length))
drawparams = paramdf.iloc[:,:-nobj].values
DeltaLong_Model, DeltaLat_Model, obsdf = mm_likelihood.mm_chisquare(paramdf, obsdf, runprops, geo_obj_pos, gensynth = True)
chisq_total, residuals = mm_likelihood.mm_chisquare(paramdf, obsdf, runprops, geo_obj_pos)
for j in range(0,runprops.get('numobjects')-1):
dlong[j,:] = DeltaLong_Model[j]
dlat[j,:] = DeltaLat_Model[j]
resid_long[j,:] = residuals[2*j]
resid_lat[j,:] = residuals[2*j+1]
return dlong, dlat, drawparams, resid_long, resid_lat
def posterior(sampler, fit_scale, float_names, obsdf, runprops, geo_obj_pos, fixed_df, total_df_names, pool):
numdraws = 1000
# Getting log likelihood posterior values and flatchain for use throughout
burnin = int(runprops.get('nburnin'))
clusterburn = int(runprops.get('clustering_burnin'))
thin_plots = int(runprops.get('nthinning'))
flatchain = sampler.get_chain(discard=int(burnin/thin_plots+clusterburn/thin_plots),flat = True, thin=thin_plots)
print(flatchain.shape, 'shape')
all_llhoods = sampler.get_log_prob(discard=int(burnin/thin_plots+clusterburn/thin_plots),flat = True, thin=thin_plots)
# Getting parameter names
names = []
for i in float_names:
names.append(i)
names_dict = runprops.get("names_dict")
# Choose random draws from the flatchain
drawsindex = np.random.randint(flatchain.shape[0], size = numdraws)
draws = flatchain[drawsindex,:]
llhoods = all_llhoods[drawsindex]
# Get time arrays
converttimes = ["2021-10-01","2022-09-30"]
t = Time(converttimes)
timesdic = {'start': t.isot[0], 'stop': t.isot[1], 'step': '1d'}
# Make a geocentric position file
#geo_obj_pos = mm_make_geo_pos.mm_make_geo_pos(objname, timesdic, runprops, True)
# Creating a fake observtions data frame
times = geo_obj_pos.values[:,0].flatten()
fakeobsdf = obsdf.loc[[0,1],:]
for i in range(len(times)):
if i == 0 or i == 1:
fakeobsdf.iloc[i,0] = times[i]
fakeobsdf = fakeobsdf.append(fakeobsdf.iloc[-1,:])
fakeobsdf['time'].iloc[-1] = times[i]
fakeobsdf = fakeobsdf.iloc[2:]
# Creating arrays to hold outputs
dlong = np.zeros((draws.shape[0], runprops.get('numobjects')-1, times.size))
dlat = np.zeros((draws.shape[0], runprops.get('numobjects')-1, times.size))
resid_long = np.zeros((draws.shape[0], runprops.get('numobjects')-1, times.size))
resid_lat = np.zeros((draws.shape[0], runprops.get('numobjects')-1, times.size))
# Holding paramvalues
nobj = runprops.get('numobjects')
print(mm_param.from_fit_array_to_param_df(draws[0,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0])
ndims = mm_param.from_fit_array_to_param_df(draws[0,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0].iloc[:,:-nobj].size
print(ndims)
paramnames = mm_param.from_fit_array_to_param_df(draws[0,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0].columns.tolist()[0:-nobj]
print(paramnames)
drawparams = np.zeros((ndims, numdraws))
# Looping to get model values
deltas = functools.partial(sample_deltas, draws=draws, names=names, fixed_df=fixed_df, total_df_names=total_df_names, fit_scale=fit_scale, names_dict=names_dict, runprops=runprops, nobj=nobj, obsdf=obsdf, geo_obj_pos=geo_obj_pos)
x = tqdm(range(draws.shape[0]))
data = pool.map(deltas, x)
dlong = np.zeros((draws.shape[0],2,length))
dlat = np.zeros((draws.shape[0],2,length))
resid_long = np.zeros((draws.shape[0],2,length))
resid_lat = np.zeros((draws.shape[0],2,length))
for i in range(len(data)):
dlong[i] = data[i][0]
dlat[i] = data[i][1]
drawparams[:,i] = data[i][2]
resid_long[i] = data[i][3]
resid_lat[i] = data[i][4]
# Now collapse the arrays with a std call
dlongstd = np.std(dlong,axis = 0)
dlatstd = np.std(dlat,axis = 0)
dlongmean = np.mean(dlong,axis = 0)
dlatmean = np.mean(dlat,axis = 0)
print(dlongstd.shape)
print(dlatstd.shape)
totaldf = pd.DataFrame(drawparams.T, columns = paramnames)
# Calculate average (mean for now) error in the real data
name_dict = runprops.get("names_dict")
objectnames = []
for i in name_dict.values():
objectnames.append(i)
typicalerror = np.zeros((2,runprops.get('numobjects')-1))
for i in range(1,runprops.get('numobjects')):
typicalerror[0,i-1] = np.median(obsdf["DeltaLong_" + objectnames[i] + "_err"].values.flatten())
typicalerror[1,i-1] = np.median(obsdf["DeltaLat_" + objectnames[i] + "_err"].values.flatten())
# Plot dlong vs dlat with color for j2
from matplotlib.backends.backend_pdf import PdfPages
#===================================Here we create the residuals heat map========================================
residpdf = PdfPages("resid_map.pdf")
for i in range(1, runprops.get('numobjects')-1):
colorcycle = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3','#999999', '#e41a1c', '#dede00']
xvals1 = np.linspace(-1.0,1.0,num=1000)
xvals2 = np.linspace(-2.0,2.0,num=1000)
xvals3 = np.linspace(-3.0,3.0,num=1000)
circle1 = np.sqrt(1 - xvals1**2)
circle2 = np.sqrt(4 - xvals2**2)
circle3 = np.sqrt(9 - xvals3**2)
plt.figure()
plt.plot(xvals1, circle1, color = "black")
plt.plot(xvals1,-circle1, color = "black")
plt.plot(xvals2, circle2, color = "black", alpha = 0.5)
plt.plot(xvals2,-circle2, color = "black", alpha = 0.5)
plt.plot(xvals3, circle3, color = "black", alpha = 0.25)
plt.plot(xvals3,-circle3, color = "black", alpha = 0.25)
#print(nobjects, np.array(residuals).shape, objectnames)
print('plotting ', i, ' ',objectnames[i])
plt.hist2d(resid_long[:,1,i], resid_lat[:,1,i], bins=40, range=[[-4.0, 4.0], [-3.0, 3.0]],label = objectnames[i], edgecolors = None)
plt.xlabel("Delta Longitude")
plt.ylabel("Delta Latitude")
plt.axis("equal")
plt.legend()
residpdf.close()
#==============================================We create the posterior.pdf====================================================
predictionspdf = PdfPages("posterior.pdf")
markers = ["o","D","^"]
for i in range(len(dlong[0,0,:])):
plt.figure()
plt.axis("equal")
plt.scatter(0,0, color = "black")
for j in range(runprops.get('numobjects')-1):
plt.scatter(dlong[:,j,i], dlat[:,j,i], c=llhoods, cmap = "coolwarm",marker=markers[j])
plt.errorbar(np.median(dlong[:,j,i]), np.median(dlat[:,j,i]), xerr = typicalerror[0,j], yerr = typicalerror[1,j], ecolor = "red")
plt.xlabel("dLon")
plt.ylabel("dLat")
plt.title("JD "+str(obsdf['time'][i]))
color_bar = plt.colorbar()
color_bar.set_alpha(1)
color_bar.draw_all()
color_bar.set_label('Log-Likelihood')
predictionspdf.savefig()
predictionspdf.close()
#==============================================We create the brightness.pdf====================================================
if runprops.get("photo_offset"):
mass_rat = totaldf['mass_2']/totaldf['mass_1']
bright_rat = abs(totaldf['f_val_1'])*mass_rat**(2/3)
hubble_sep_arc = 2.1*10**5*5.5*10**(-7)/2.4
brightnesspdf = PdfPages("brightness.pdf")
for i in range(len(dlong[0,0,:])):
plt.figure()
plt.axis('equal')
plt.scatter(0,0, color = "black")
plt.scatter(dlong[:,0,i], dlat[:,0,i], c=bright_rat, cmap = "coolwarm", norm=colors.LogNorm())
plt.xlabel("dLon")
plt.ylabel("dLat")
plt.title("JD "+str(obsdf['time'][i]))
color_bar = plt.colorbar()
color_bar.set_alpha(1)
color_bar.draw_all()
color_bar.set_label('Brightness ratio')
brightnesspdf.savefig()
brightnesspdf.close()
#==========================================We create the brightness_seperation.pdf=================================================
brightnessratpdf = PdfPages("brightness_seperation.pdf")
for i in range(len(dlong[0,0,:])):
plt.figure()
plt.axis('equal')
sep = np.sqrt(dlong[:,0,i]**2+dlat[:,0,i]**2)
plt.scatter(sep, bright_rat, c=llhoods, cmap = "coolwarm")
plt.axvline(x=hubble_sep_arc, color='r')
plt.axvline(x=0.2, color='b')
plt.xlabel("total separation")
plt.ylabel("brightness ratio")
plt.title("JD "+str(obsdf['time'][i]))
color_bar = plt.colorbar()
color_bar.set_label('Log-Likelihood')
color_bar.set_alpha(1)
color_bar.draw_all()
brightnessratpdf.savefig()
brightnessratpdf.close()
#Actually build the plots here
#====================================================================================================
import glob, os
if __name__ == '__main__':
from schwimmbad import MPIPool
with MPIPool() as pool:
if not pool.is_master():
pool.wait()
sys.exit(0)
if 'results' in os.getcwd():
getData = ReadJson('runprops.txt')
else:
getData = ReadJson('most_recent_runprops.txt')
runprops = getData.outProps()
objname = runprops.get("objectname")
if not 'results' in os.getcwd():
os.chdir('../../../results/'+objname+'/')
results = max(glob.glob(os.path.join(os.getcwd(), '*/')), key=os.path.getmtime)
os.chdir(results)
backend = emcee.backends.HDFBackend('chain.h5')
fit_scale = pd.read_csv('fit_scale.csv',index_col=0)
float_names = runprops.get('float_names')
obsdf = | pd.read_csv(objname+'_obs_df.csv',index_col=0) | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Implement DataFrame public API as Pandas does.
Almost all docstrings for public and magic methods should be inherited from Pandas
for better maintability. So some codes are ignored in pydocstyle check:
- D101: missing docstring in class
- D102: missing docstring in public method
- D105: missing docstring in magic method
Manually add documentation for methods which are not presented in pandas.
"""
import pandas
from pandas.core.common import apply_if_callable
from pandas.core.dtypes.common import (
infer_dtype_from_object,
is_dict_like,
is_list_like,
is_numeric_dtype,
)
from pandas.core.indexes.api import ensure_index_from_sequences
from pandas.util._validators import validate_bool_kwarg
from pandas.io.formats.printing import pprint_thing
from pandas._libs.lib import no_default
from pandas._typing import Label
import itertools
import functools
import numpy as np
import sys
from typing import Optional, Sequence, Tuple, Union, Mapping
import warnings
from modin.error_message import ErrorMessage
from modin.utils import _inherit_docstrings, to_pandas, hashable
from modin.config import IsExperimental
from .utils import (
from_pandas,
from_non_pandas,
)
from .iterator import PartitionIterator
from .series import Series
from .base import BasePandasDataset, _ATTRS_NO_LOOKUP
from .groupby import DataFrameGroupBy
from .accessor import CachedAccessor, SparseFrameAccessor
@_inherit_docstrings(pandas.DataFrame, excluded=[pandas.DataFrame.__init__])
class DataFrame(BasePandasDataset):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""
Distributed DataFrame object backed by Pandas dataframes.
Parameters
----------
data: NumPy ndarray (structured or homogeneous) or dict:
Dict can contain Series, arrays, constants, or list-like
objects.
index: pandas.Index, list, ObjectID
The row index for this DataFrame.
columns: pandas.Index
The column names for this DataFrame, in pandas Index object.
dtype: Data type to force.
Only a single dtype is allowed. If None, infer
copy: bool
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
query_compiler: query_compiler
A query compiler object to manage distributed computation.
"""
if isinstance(data, (DataFrame, Series)):
self._query_compiler = data._query_compiler.copy()
if index is not None and any(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if isinstance(data, Series):
# We set the column name if it is not in the provided Series
if data.name is None:
self.columns = [0] if columns is None else columns
# If the columns provided are not in the named Series, pandas clears
# the DataFrame and sets columns to the columns provided.
elif columns is not None and data.name not in columns:
self._query_compiler = from_pandas(
DataFrame(columns=columns)
)._query_compiler
if index is not None:
self._query_compiler = data.loc[index]._query_compiler
elif columns is None and index is None:
data._add_sibling(self)
else:
if columns is not None and any(i not in data.columns for i in columns):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if index is None:
index = slice(None)
if columns is None:
columns = slice(None)
self._query_compiler = data.loc[index, columns]._query_compiler
# Check type of data and use appropriate constructor
elif query_compiler is None:
distributed_frame = from_non_pandas(data, index, columns, dtype)
if distributed_frame is not None:
self._query_compiler = distributed_frame._query_compiler
return
warnings.warn(
"Distributing {} object. This may take some time.".format(type(data))
)
if is_list_like(data) and not is_dict_like(data):
old_dtype = getattr(data, "dtype", None)
values = [
obj._to_pandas() if isinstance(obj, Series) else obj for obj in data
]
if isinstance(data, np.ndarray):
data = np.array(values, dtype=old_dtype)
else:
try:
data = type(data)(values, dtype=old_dtype)
except TypeError:
data = values
elif is_dict_like(data) and not isinstance(
data, (pandas.Series, Series, pandas.DataFrame, DataFrame)
):
data = {
k: v._to_pandas() if isinstance(v, Series) else v
for k, v in data.items()
}
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __repr__(self):
from pandas.io.formats import console
num_rows = pandas.get_option("display.max_rows") or 10
num_cols = pandas.get_option("display.max_columns") or 20
if pandas.get_option("display.max_columns") is None and pandas.get_option(
"display.expand_frame_repr"
):
width, _ = console.get_console_size()
width = min(width, len(self.columns))
col_counter = 0
i = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i += 1
num_cols = i
i = len(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i -= 1
num_cols += len(self.columns) - i
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self): # pragma: no cover
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_columns(self):
"""
Get the columns for this DataFrame.
Returns
-------
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_columns(self, new_columns):
"""
Set the columns for this DataFrame.
Parameters
----------
new_columns: The new index to set this
"""
self._query_compiler.columns = new_columns
columns = property(_get_columns, _set_columns)
@property
def ndim(self):
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
def drop_duplicates(
self, subset=None, keep="first", inplace=False, ignore_index=False
):
return super(DataFrame, self).drop_duplicates(
subset=subset, keep=keep, inplace=inplace
)
@property
def dtypes(self):
return self._query_compiler.dtypes
def duplicated(self, subset=None, keep="first"):
import hashlib
df = self[subset] if subset is not None else self
# if the number of columns we are checking for duplicates is larger than 1, we must
# hash them to generate a single value that can be compared across rows.
if len(df.columns) > 1:
hashed = df.apply(
lambda s: hashlib.new("md5", str(tuple(s)).encode()).hexdigest(), axis=1
).to_frame()
else:
hashed = df
duplicates = hashed.apply(lambda s: s.duplicated(keep=keep)).squeeze(axis=1)
# remove Series name which was assigned automatically by .apply
duplicates.name = None
return duplicates
@property
def empty(self):
return len(self.columns) == 0 or len(self.index) == 0
@property
def axes(self):
return [self.index, self.columns]
@property
def shape(self):
return len(self.index), len(self.columns)
def add_prefix(self, prefix):
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._get_axis_number(axis)
query_compiler = super(DataFrame, self).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not isinstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
getattr(pandas, type(self).__name__)(**init_kwargs).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
).__name__
except Exception:
return_type = type(self).__name__
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if isinstance(result, Series):
if axis == 0 and result.name == self.index[0] or result.name == 0:
result.name = None
elif axis == 1 and result.name == self.columns[0] or result.name == 0:
result.name = None
return result
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze: bool = no_default,
observed=False,
dropna: bool = True,
):
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
axis = self._get_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to drop the data column before doing the
# groupby. The typical pandas behavior is to drop when the data came from this
# dataframe. When a string, Series directly from this dataframe, or list of
# strings is passed in, the data used for the groupby is dropped before the
# groupby takes place.
drop = False
if (
not isinstance(by, (pandas.Series, Series))
and is_list_like(by)
and len(by) == 1
):
by = by[0]
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
drop = by in self.columns
idx_name = by
if (
self._query_compiler.has_multiindex(axis=axis)
and by in self.axes[axis].names
or hasattr(self.axes[axis], "name")
and self.axes[axis].name == by
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif isinstance(by, Series):
drop = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column groupby
if (
not isinstance(by, Series)
and axis == 0
and all(
(
(isinstance(o, str) and (o in self))
or (isinstance(o, Series) and (o._parent is self))
)
for o in by
)
):
# We can just revert Series back to names because the parent is
# this dataframe:
by = [o.name if isinstance(o, Series) else o for o in by]
by = self.__getitem__(by)._query_compiler
drop = True
else:
mismatch = len(by) != len(self.axes[axis])
if mismatch and all(
isinstance(obj, str)
and (
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch and any(
isinstance(obj, str) and obj not in self.columns for obj in by
):
names = [o.name if isinstance(o, Series) else o for o in by]
raise KeyError(next(x for x in names if x not in self))
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
drop=drop,
dropna=dropna,
)
def keys(self):
return self.columns
def transpose(self, copy=False, *args):
return DataFrame(query_compiler=self._query_compiler.transpose(*args))
T = property(transpose)
def add(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"add",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=False):
if sort is False:
warnings.warn(
"Due to https://github.com/pandas-dev/pandas/issues/35092, "
"Pandas ignores sort=False; Modin correctly does not sort."
)
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is not None:
# other must have the same index name as self, otherwise
# index name will be reset
name = other.name
# We must transpose here because a Series becomes a new row, and the
# structure of the query compiler is currently columnar
other = other._query_compiler.transpose()
other.index = pandas.Index([name], name=self.index.name)
else:
# See note above about transpose
other = other._query_compiler.transpose()
elif isinstance(other, list):
if not all(isinstance(o, BasePandasDataset) for o in other):
other = DataFrame(pandas.DataFrame(other))._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = (
self.index.append(other.index)
if not isinstance(other, list)
else self.index.append([o.index for o in other])
)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def assign(self, **kwargs):
df = self.copy()
for k, v in kwargs.items():
if callable(v):
df[k] = v(df)
else:
df[k] = v
return df
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
backend=backend,
**kwargs,
)
def combine(self, other, func, fill_value=None, overwrite=True):
return super(DataFrame, self).combine(
other, func, fill_value=fill_value, overwrite=overwrite
)
def compare(
self,
other: "DataFrame",
align_axis: Union[str, int] = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> "DataFrame":
return self._default_to_pandas(
pandas.DataFrame.compare,
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def corr(self, method="pearson", min_periods=1):
return self.__constructor__(
query_compiler=self._query_compiler.corr(
method=method,
min_periods=min_periods,
)
)
def corrwith(self, other, axis=0, drop=False, method="pearson"):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop, method=method
)
def cov(self, min_periods=None, ddof: Optional[int] = 1):
numeric_df = self.drop(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
is_notna = True
if all(numeric_df.notna().all()):
if min_periods is not None and min_periods > len(numeric_df):
result = np.empty((numeric_df.shape[1], numeric_df.shape[1]))
result.fill(np.nan)
return numeric_df.__constructor__(result)
else:
cols = numeric_df.columns
idx = cols.copy()
numeric_df = numeric_df.astype(dtype="float64")
denom = 1.0 / (len(numeric_df) - ddof)
means = numeric_df.mean(axis=0)
result = numeric_df - means
result = result.T._query_compiler.conj().dot(result._query_compiler)
else:
result = numeric_df._query_compiler.cov(min_periods=min_periods)
is_notna = False
if is_notna:
result = numeric_df.__constructor__(
query_compiler=result, index=idx, columns=cols
)
result *= denom
else:
result = numeric_df.__constructor__(query_compiler=result)
return result
def dot(self, other):
if isinstance(other, BasePandasDataset):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("Matrices are not aligned")
qc = other.reindex(index=common)._query_compiler
if isinstance(other, DataFrame):
return self.__constructor__(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=False
)
)
else:
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=True
)
)
other = np.asarray(other)
if self.shape[1] != other.shape[0]:
raise ValueError(
"Dot product shape mismatch, {} vs {}".format(self.shape, other.shape)
)
if len(other.shape) > 1:
return self.__constructor__(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
def eq(self, other, axis="columns", level=None):
return self._binary_op(
"eq", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def equals(self, other):
if isinstance(other, pandas.DataFrame):
# Copy into a Modin DataFrame to simplify logic below
other = DataFrame(other)
return (
self.index.equals(other.index)
and self.columns.equals(other.columns)
and self.eq(other).all().all()
)
def explode(self, column: Union[str, Tuple], ignore_index: bool = False):
return self._default_to_pandas(
pandas.DataFrame.explode, column, ignore_index=ignore_index
)
def eval(self, expr, inplace=False, **kwargs):
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
return_type = type(
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.eval(expr, **kwargs)
).__name__
if return_type == type(self).__name__:
return self._create_or_update_from_compiler(new_query_compiler, inplace)
else:
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
return getattr(sys.modules[self.__module__], return_type)(
query_compiler=new_query_compiler
)
def floordiv(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"floordiv",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
@classmethod
def from_dict(
cls, data, orient="columns", dtype=None, columns=None
): # pragma: no cover
ErrorMessage.default_to_pandas("`from_dict`")
return from_pandas(
pandas.DataFrame.from_dict(
data, orient=orient, dtype=dtype, columns=columns
)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
): # pragma: no cover
ErrorMessage.default_to_pandas("`from_records`")
return from_pandas(
pandas.DataFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
return self._binary_op(
"ge", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def gt(self, other, axis="columns", level=None):
return self._binary_op(
"gt", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwds,
): # pragma: no cover
return self._default_to_pandas(
pandas.DataFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwds,
)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
def put_str(src, output_len=None, spaces=2):
src = str(src)
return src.ljust(output_len if output_len else len(src)) + " " * spaces
def format_size(num):
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f} {x}"
num /= 1024.0
return f"{num:3.1f} PB"
output = []
type_line = str(type(self))
index_line = self.index._summary()
columns = self.columns
columns_len = len(columns)
dtypes = self.dtypes
dtypes_line = f"dtypes: {', '.join(['{}({})'.format(dtype, count) for dtype, count in dtypes.value_counts().items()])}"
if max_cols is None:
max_cols = 100
exceeds_info_cols = columns_len > max_cols
if buf is None:
buf = sys.stdout
if null_counts is None:
null_counts = not exceeds_info_cols
if verbose is None:
verbose = not exceeds_info_cols
if null_counts and verbose:
# We're gonna take items from `non_null_count` in a loop, which
# works kinda slow with `Modin.Series`, that's why we call `_to_pandas()` here
# that will be faster.
non_null_count = self.count()._to_pandas()
if memory_usage is None:
memory_usage = True
def get_header(spaces=2):
output = []
head_label = " # "
column_label = "Column"
null_label = "Non-Null Count"
dtype_label = "Dtype"
non_null_label = " non-null"
delimiter = "-"
lengths = {}
lengths["head"] = max(len(head_label), len(pprint_thing(len(columns))))
lengths["column"] = max(
len(column_label), max(len(pprint_thing(col)) for col in columns)
)
lengths["dtype"] = len(dtype_label)
dtype_spaces = (
max(lengths["dtype"], max(len(pprint_thing(dtype)) for dtype in dtypes))
- lengths["dtype"]
)
header = put_str(head_label, lengths["head"]) + put_str(
column_label, lengths["column"]
)
if null_counts:
lengths["null"] = max(
len(null_label),
max(len(pprint_thing(x)) for x in non_null_count)
+ len(non_null_label),
)
header += put_str(null_label, lengths["null"])
header += put_str(dtype_label, lengths["dtype"], spaces=dtype_spaces)
output.append(header)
delimiters = put_str(delimiter * lengths["head"]) + put_str(
delimiter * lengths["column"]
)
if null_counts:
delimiters += put_str(delimiter * lengths["null"])
delimiters += put_str(delimiter * lengths["dtype"], spaces=dtype_spaces)
output.append(delimiters)
return output, lengths
output.extend([type_line, index_line])
def verbose_repr(output):
columns_line = f"Data columns (total {len(columns)} columns):"
header, lengths = get_header()
output.extend([columns_line, *header])
for i, col in enumerate(columns):
i, col, dtype = map(pprint_thing, [i, col, dtypes[col]])
to_append = put_str(" {}".format(i), lengths["head"]) + put_str(
col, lengths["column"]
)
if null_counts:
non_null = pprint_thing(non_null_count[col])
to_append += put_str(
"{} non-null".format(non_null), lengths["null"]
)
to_append += put_str(dtype, lengths["dtype"], spaces=0)
output.append(to_append)
def non_verbose_repr(output):
output.append(columns._summary(name="Columns"))
if verbose:
verbose_repr(output)
else:
non_verbose_repr(output)
output.append(dtypes_line)
if memory_usage:
deep = memory_usage == "deep"
mem_usage_bytes = self.memory_usage(index=True, deep=deep).sum()
mem_line = f"memory usage: {format_size(mem_usage_bytes)}"
output.append(mem_line)
output.append("")
buf.write("\n".join(output))
def insert(self, loc, column, value, allow_duplicates=False):
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if isinstance(value, Series):
# TODO: Remove broadcast of Series
value = value._to_pandas()
if not self._query_compiler.lazy_execution and len(self.index) == 0:
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
elif len(self.columns) == 0 and loc == 0:
new_query_compiler = DataFrame(
data=value, columns=[column], index=self.index
)._query_compiler
else:
if (
is_list_like(value)
and not isinstance(value, pandas.Series)
and len(value) != len(self.index)
):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction: Optional[str] = None,
limit_area=None,
downcast=None,
**kwargs,
):
return self._default_to_pandas(
pandas.DataFrame.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def iterrows(self):
def iterrow_builder(s):
return s.name, s
partition_iterator = PartitionIterator(self, 0, iterrow_builder)
for v in partition_iterator:
yield v
def items(self):
def items_builder(s):
return s.name, s
partition_iterator = PartitionIterator(self, 1, items_builder)
for v in partition_iterator:
yield v
def iteritems(self):
return self.items()
def itertuples(self, index=True, name="Pandas"):
def itertuples_builder(s):
return next(s._to_pandas().to_frame().T.itertuples(index=index, name=name))
partition_iterator = PartitionIterator(self, 0, itertuples_builder)
for v in partition_iterator:
yield v
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if on is not None:
return self.__constructor__(
query_compiler=self._query_compiler.join(
other._query_compiler,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
if isinstance(other, DataFrame):
# Joining the empty DataFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
new_columns = (
pandas.DataFrame(columns=self.columns)
.join(
pandas.DataFrame(columns=other.columns),
lsuffix=lsuffix,
rsuffix=rsuffix,
)
.columns
)
other = [other]
else:
# This constraint carried over from Pandas.
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
new_columns = (
pandas.DataFrame(columns=self.columns)
.join(
[pandas.DataFrame(columns=obj.columns) for obj in other],
lsuffix=lsuffix,
rsuffix=rsuffix,
)
.columns
)
new_frame = DataFrame(
query_compiler=self._query_compiler.concat(
1, [obj._query_compiler for obj in other], join=how, sort=sort
)
)
new_frame.columns = new_columns
return new_frame
def le(self, other, axis="columns", level=None):
return self._binary_op(
"le", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def lookup(self, row_labels, col_labels):
return self._default_to_pandas(pandas.DataFrame.lookup, row_labels, col_labels)
def lt(self, other, axis="columns", level=None):
return self._binary_op(
"lt", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
):
return self.__constructor__(
query_compiler=self._query_compiler.melt(
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
)
def memory_usage(self, index=True, deep=False):
if index:
result = self._reduce_dimension(
self._query_compiler.memory_usage(index=False, deep=deep)
)
index_value = self.index.memory_usage(deep=deep)
return Series(index_value, index=["Index"]).append(result)
return super(DataFrame, self).memory_usage(index=index, deep=deep)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
if isinstance(right, Series):
if right.name is None:
raise ValueError("Cannot merge a Series without a name")
else:
right = right.to_frame()
if not isinstance(right, DataFrame):
raise TypeError(
f"Can only merge Series or DataFrame objects, a {type(right)} was passed"
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
return self.__constructor__(
query_compiler=self._query_compiler.merge(
right._query_compiler,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
)
def mod(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"mod",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def mul(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"mul",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
rmul = multiply = mul
def ne(self, other, axis="columns", level=None):
return self._binary_op(
"ne", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def nlargest(self, n, columns, keep="first"):
return DataFrame(query_compiler=self._query_compiler.nlargest(n, columns, keep))
def nsmallest(self, n, columns, keep="first"):
return DataFrame(
query_compiler=self._query_compiler.nsmallest(
n=n, columns=columns, keep=keep
)
)
def slice_shift(self, periods=1, axis=0):
if periods == 0:
return self.copy()
if axis == "index" or axis == 0:
if abs(periods) >= len(self.index):
return DataFrame(columns=self.columns)
else:
if periods > 0:
new_index = self.index.drop(labels=self.index[:periods])
new_df = self.drop(self.index[-periods:])
else:
new_index = self.index.drop(labels=self.index[periods:])
new_df = self.drop(self.index[:-periods])
new_df.index = new_index
return new_df
else:
if abs(periods) >= len(self.columns):
return DataFrame(index=self.index)
else:
if periods > 0:
new_columns = self.columns.drop(labels=self.columns[:periods])
new_df = self.drop(self.columns[-periods:], axis="columns")
else:
new_columns = self.columns.drop(labels=self.columns[periods:])
new_df = self.drop(self.columns[:-periods], axis="columns")
new_df.columns = new_columns
return new_df
def unstack(self, level=-1, fill_value=None):
if not isinstance(self.index, pandas.MultiIndex) or (
isinstance(self.index, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
):
return self._reduce_dimension(
query_compiler=self._query_compiler.unstack(level, fill_value)
)
else:
return DataFrame(
query_compiler=self._query_compiler.unstack(level, fill_value)
)
def pivot(self, index=None, columns=None, values=None):
return self.__constructor__(
query_compiler=self._query_compiler.pivot(
index=index, columns=columns, values=values
)
)
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
):
result = DataFrame(
query_compiler=self._query_compiler.pivot_table(
index=index,
values=values,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
)
return result
@property
def plot(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=False,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwargs,
):
return self._to_pandas().plot
def pow(self, other, axis="columns", level=None, fill_value=None):
if isinstance(other, Series):
return self._default_to_pandas(
"pow", other, axis=axis, level=level, fill_value=fill_value
)
return self._binary_op(
"pow",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
axis = self._get_axis_number(axis)
axis_to_apply = self.columns if axis else self.index
if (
skipna is not False
and numeric_only is None
and min_count > len(axis_to_apply)
):
new_index = self.columns if not axis else self.index
return Series(
[np.nan] * len(new_index), index=new_index, dtype=np.dtype("object")
)
data = self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)
if level is not None:
return data.__constructor__(
query_compiler=data._query_compiler.prod_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
)
if min_count > 1:
return data._reduce_dimension(
data._query_compiler.prod_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
)
return data._reduce_dimension(
data._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
)
product = prod
radd = add
def query(self, expr, inplace=False, **kwargs):
ErrorMessage.non_verified_udf()
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.query(expr, **kwargs)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
def rename(
self,
mapper=None,
index=None,
columns=None,
axis=None,
copy=True,
inplace=False,
level=None,
errors="ignore",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
# We have to do this with the args because of how rename handles kwargs. It
# doesn't ignore None values passed in, so we have to filter them ourselves.
args = locals()
kwargs = {k: v for k, v in args.items() if v is not None and k != "self"}
# inplace should always be true because this is just a copy, and we will use the
# results after.
kwargs["inplace"] = False
if axis is not None:
axis = self._get_axis_number(axis)
if index is not None or (mapper is not None and axis == 0):
new_index = pandas.DataFrame(index=self.index).rename(**kwargs).index
else:
new_index = None
if columns is not None or (mapper is not None and axis == 1):
new_columns = (
pandas.DataFrame(columns=self.columns).rename(**kwargs).columns
)
else:
new_columns = None
if inplace:
obj = self
else:
obj = self.copy()
if new_index is not None:
obj.index = new_index
if new_columns is not None:
obj.columns = new_columns
if not inplace:
return obj
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.replace(
to_replace=to_replace,
value=value,
inplace=False,
limit=limit,
regex=regex,
method=method,
)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
def rfloordiv(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rfloordiv",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def rmod(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rmod",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def rpow(self, other, axis="columns", level=None, fill_value=None):
if isinstance(other, Series):
return self._default_to_pandas(
"rpow", other, axis=axis, level=level, fill_value=fill_value
)
return self._binary_op(
"rpow",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def rsub(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rsub",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def rtruediv(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rtruediv",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
rdiv = rtruediv
def select_dtypes(self, include=None, exclude=None):
# Validates arguments for whether both include and exclude are None or
# if they are disjoint. Also invalidates string dtypes.
pandas.DataFrame().select_dtypes(include, exclude)
if include and not is_list_like(include):
include = [include]
elif include is None:
include = []
if exclude and not | is_list_like(exclude) | pandas.core.dtypes.common.is_list_like |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index= | pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64') | pandas.Float64Index |
# Question: Please concatenate this file with this one to a single text file.
# The content of the output file should look like below.
# http://www.pythonhow.com/data/sampledata.txt
# http://pythonhow.com/data/sampledata_x_2.txt
# Expected output:
# x,y
# 3,5
# 4,9
# 6,10
# 7,11
# 8,12
# 6,10
# 8,18
# 12,20
# 14,22
# 16,24
# Answer:
import pandas as pd
df1 = pd.read_csv('http://www.pythonhow.com/data/sampledata.txt')
df2 = | pd.read_csv('http://pythonhow.com/data/sampledata_x_2.txt') | pandas.read_csv |
from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
import numpy as np
import pandas as pd
import os
import cv2
def create_embeddings():
workers = 0 if os.name == 'nt' else 4
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
mtcnn = MTCNN(
image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
device=device
)
resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)
pictures=[]
directory=os.listdir('images_cropped')
for i in range(len(directory)):
count = 0
flag = False
while(flag==False):
path='images_cropped'+'/'+str(directory[i])+'/'+str(count)+'.jpg'
pic=cv2.imread(path)
print(count)
if pic is not None:
tens=mtcnn(pic)
if tens is not None:
pictures.append(tens)
flag=True
break
count=0
print(directory[i])
print('here')
else:
count=count+1
else:
print(directory[i])
count=count+1
print(pictures)
embeddings=[]
for x in pictures:
embeddings.append(resnet(x.unsqueeze(0)).detach().cpu())
dists = [[(e1 - e2).norm().item() for e2 in embeddings] for e1 in embeddings]
print( | pd.DataFrame(dists, columns=directory, index=directory) | pandas.DataFrame |
##############################################################
# #
# <NAME> and <NAME> (2017) #
# Machine Learning for the Quantified Self #
# Springer #
# Chapter 3 #
# #
##############################################################
import scipy
import math
from sklearn.mixture import GaussianMixture
import numpy as np
import pandas as pd
import util.util as util
import copy
from tqdm import tqdm
# Class for outlier detection algorithms based on some distribution of the data. They
# all consider only single points per row (i.e. one column).
class DistributionBasedOutlierDetection:
# Finds outliers in the specified column of datatable and adds a binary column with
# the same name extended with '_outlier' that expresses the result per data point.
def chauvenet(self, data_table, col, C=2):
# Taken partly from: https://www.astro.rug.nl/software/kapteyn/
# Computer the mean and standard deviation.
mean = data_table[col].mean()
std = data_table[col].std()
N = len(data_table.index)
criterion = 1.0/(C*N)
# Consider the deviation for the data points.
deviation = abs(data_table[col] - mean)/std
# Express the upper and lower bounds.
low = -deviation/math.sqrt(2)
high = deviation/math.sqrt(2)
prob = []
mask = []
# Pass all rows in the dataset.
for i in range(0, len(data_table.index)):
# Determine the probability of observing the point
prob.append(1.0 - 0.5 * (scipy.special.erf(high[i]) - scipy.special.erf(low[i])))
# And mark as an outlier when the probability is below our criterion.
mask.append(prob[i] < criterion)
data_table[col + '_outlier'] = mask
return data_table
# Fits a mixture model towards the data expressed in col and adds a column with the probability
# of observing the value given the mixture model.
def mixture_model(self, data_table, col, n=3):
print('Applying mixture models')
# Fit a mixture model to our data.
data = data_table[data_table[col].notnull()][col]
data_table = data_table[data_table[col].notnull()]
g = GaussianMixture(n_components=n, max_iter=100, n_init=1)
reshaped_data = np.array(data.values.reshape(-1,1))
g.fit(reshaped_data)
# Predict the probabilities
probs = g.score_samples(reshaped_data)
# Create the right data frame and concatenate the two.
data_probs = pd.DataFrame(np.power(10, probs), index=data.index, columns=[col+'_mixture'])
data_table = pd.concat([data_table, data_probs], axis=1)
return data_table
# Class for distance based outlier detection.
class DistanceBasedOutlierDetection:
# Create distance table between rows in the data table. Here, only cols are considered and the specified
# distance function is used to compute the distance.
def distance_table(self, data_table, cols, d_function):
data_table[cols] = data_table.loc[:, cols].astype('float32')
return pd.DataFrame(scipy.spatial.distance.squareform(util.distance(data_table.loc[:, cols], d_function)),
columns=data_table.index, index=data_table.index).astype('float32')
# The most simple distance based algorithm. We assume a distance function, e.g. 'euclidean'
# and a minimum distance of neighboring points and frequency of occurrence.
def simple_distance_based(self, data_table, cols, d_function, dmin, fmin):
print('Calculating simple distance-based criterion.')
# Normalize the dataset first.
new_data_table = util.normalize_dataset(data_table.dropna(axis=0, subset=cols), cols)
# Create the distance table first between all instances:
self.distances = self.distance_table(new_data_table, cols, d_function)
mask = []
# Pass the rows in our table.
for i in tqdm(range(0, len(new_data_table.index))):
# Check what faction of neighbors are beyond dmin.
frac = (float(sum([1 for col_val in self.distances.iloc[i,:].tolist() if col_val > dmin]))/len(new_data_table.index))
# Mark as an outlier if beyond the minimum frequency.
mask.append(frac > fmin)
data_mask = | pd.DataFrame(mask, index=new_data_table.index, columns=['simple_dist_outlier']) | pandas.DataFrame |
#!/usr/bin/python
from pathlib import Path
import numpy as np
import pandas as pd
from bgcArgo import sprof
sprof.set_dirs(
argo_path='/Users/gordonc/Documents/data/Argo',
woa_path='/Users/gordonc/Documents/data/WOA18',
ncep_path='/Users/gordonc/Documents/data/NCEP'
)
sagepath = Path('/Users/gordonc/Documents/data/Argo/sage/')
gui_files = sagepath.glob('*guidata*.h5')
flt_files = sagepath.glob('*floatdata*.h5')
df = pd.DataFrame(columns=['SDN', 'CYCLE', 'LAT', 'LON', 'SURF_SAT', 'REF_SAT', 'GAINS', 'pyGAIN', 'WMO'])
for gfn, ffn in zip(gui_files, flt_files):
gdf = pd.read_hdf(gfn)
fdf = pd.read_hdf(ffn)
wmo = int(str(ffn).split('\\')[-1].split('_')[-1].split('.')[0])
syn = sprof(wmo)
woa_gains = syn.calc_gains(ref='WOA')
try:
print(wmo)
ncep_gains = syn.calc_gains()
ncep_flag = True
except:
print('shoot {}'.format(wmo))
ncep_flag = False
pyWOAGAIN = np.nan*np.ones((gdf.shape[0]))
pyAIRGAIN = np.nan*np.ones((gdf.shape[0]))
for i,c in enumerate(gdf.CYCLE):
if c in syn.CYCLE:
ix = syn.CYCLE == c
g = woa_gains[ix]
if ncep_flag:
a = ncep_gains[ix]
if g.shape[0] > 1:
g = np.nanmean(g)
if ncep_flag:
a = np.nanmean(a)
else:
g = g[0]
if ncep_flag:
a = a[0]
pyWOAGAIN[i] = g
if ncep_flag:
pyAIRGAIN[i] = a
gdf['pyWOAGAIN'] = pyWOAGAIN
gdf['pyAIRGAIN'] = pyAIRGAIN
gdf['WMO'] = np.array(gdf.shape[0]*[wmo])
df = | pd.concat([df, gdf]) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with | tm.assert_produces_warning(FutureWarning) | pandas.util.testing.assert_produces_warning |
import json, time
from decimal import Decimal
import pandas as pd
import numpy as np
import requests
class API():
def __init__(self, base_url):
self.base_url = base_url
@staticmethod
def _http_error_message(e, r):
response_text = json.loads(r.text)['message']
return f'\n\nRequests HTTP error: {e}\n\n\tUrl: {r.url}\n\tStatus Code: {r.status_code}\n\tResponse Text: {response_text}\n\tNote: Check the url and endpoint\n'
@staticmethod
def _random_float_between_zero_one():
rand_int_below_ten = Decimal(str(np.random.randint(11)))
return float(rand_int_below_ten / Decimal('10'))
def get(self, endpoint, params={}, auth=None):
url = f'{self.base_url}{endpoint}'
try:
r = requests.get(url=url, auth=auth, params=params)
r.raise_for_status()
except requests.ConnectionError as e:
raise e
except requests.HTTPError as e:
raise requests.HTTPError(self._http_error_message(e, r))
else:
return r
def post(self, endpoint, params={}, data={}, auth=None):
url = f'{self.base_url}{endpoint}'
data = json.dumps(data)
try:
r = requests.post(url=url, auth=auth, params=params, data=data)
r.raise_for_status()
except requests.HTTPError as e:
raise requests.HTTPError(self._http_error_message(e, r))
except requests.ConnectTimeout as e:
raise e
except requests.ConnectionError as e:
raise e
else:
return r
def handle_page_nation(self, endpoint, start_date, date_field='created_at', params={}, auth=None):
all_results = []
def make_request(after=None):
response = self.get(endpoint, params={**params, 'after':after}, auth=auth)
end_cursor = response.headers.get('cb-after', None) # end of page index; used for older results
data = response.json()
number_of_results = len(data)
if number_of_results == 0:
# no data available in this page (request)
return
# flatten data;
df = pd.json_normalize(data, sep='.')
df[date_field] = | pd.to_datetime(df[date_field]) | pandas.to_datetime |
'''
Created on 22-Feb-2019
@author: <NAME>,
Junior Research Fellow (PhD Student),
National Centre fo Microbial Resource,
National Centre for Cell Science,
Pune, Maharshtra, India.
'''
import pandas as pd
import numpy as np
import subprocess
import os
import re
import sys
def baseDir():
"""Return baseDir of installation
"""
return os.path.dirname(os.path.abspath(__file__))
def blast(query,db,in_numCores,cwd):
"""
Do BLAST with provided query and database
Args:
query: Query file
db: db path/name for doing blast
in_numCores: Number of cores to use for BLAST
in_percIdentCutOff: Percent identity cutoff
cwd: Current working directory
Returns:
None
"""
if(sys.platform in ['darwin','linux','cygwin']):
cmd = "blastn -out " + os.path.join(cwd,"out.blast") + " -outfmt 6 -query " + query + " -db " + db + " -num_threads " + in_numCores + " -max_target_seqs 1"
elif(sys.platform == 'win32'):
cmd = "blastn.exe -out " + os.path.join(cwd,"out.blast") + " -outfmt 6 -query " + query + " -db " + db + " -num_threads " + in_numCores + " -max_target_seqs 1"
subBlast = subprocess.Popen(cmd,stdin=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
output,error = subBlast.communicate()
if(subBlast.returncode != 0):
exit(error)
return None
def selectBlastHits_assignGenus_subsetOtuTable(blastOut,otuTable,blastcutoff):
"""
Select best hits from BLAST output and assign taxonmy with respect to given identity cutoff
Args:
blastOut: Filtered blast output file (outfmt 6)
otuTable: OTU table
blastcutoff: percent identity cutoff to assign genus to the sequences
Returns:
DataFrame: OTU/ASV table containg OTUs/ASVs which have assigned taxonomy through BLAST
"""
df = pd.read_csv(blastOut,sep='\t',index_col=0,header=None)
df = df.groupby(level=0).max()
df = df.loc[df[2]>=blastcutoff]
if(df.empty):
exit('None of OTU/ASV sequences passed the given percent identity cut-off')
otuId_tax_dict = dict(zip(df.index,df[1].str.split("_",expand=True)[1]))
df = pd.read_csv(otuTable,sep="\t",index_col=0)
df.columns = df.columns.astype(str)
df = df.reindex(list(otuId_tax_dict.keys()))
df["taxonomy"] = list(otuId_tax_dict.values())
df = df.groupby(["taxonomy"]).sum()
return otuId_tax_dict,df
def makeTable16S(df,func,taxonomyList):
"""
Consolidate the 16S rRNA copy number table with respect to taxonomy
Args:
df: Dataframe of 16S rRNA copy numner
func: mean/mode/median to be taken
taxonomyList: list of taxonomy
Returns:
DataFrame: consolidated dataframe for organisms provided in taxonomy list
"""
#make table
temp_dict = dict()
for tax in taxonomyList:
num = round(float(df[df.index.str.contains(tax,na=False)].mean()),2)
if(num == 0):
temp_dict[tax] = 1
else:
temp_dict[tax] = num
df_consolidated = pd.DataFrame.from_dict(temp_dict,orient="index")
del(temp_dict)
return df_consolidated
def makeKOTable(df,abundData,coreNum):
"""
Consolidate the KO copy number table with respect to OTU table
Args:
df: Gene copy number table (dataframe)
abundData: OTU abundance table (output of selectBlastHits_assignGenus_subsetOtuTable)
coreNum: value in range 0 to 1. If a gene present in coreNum*genus, then it will be considered as core gene.
Returns:
DataFrame: Consolidated gene copy number table (dataframe)
"""
# old
taxonomyList = list(abundData.index)
dfToReturn = pd.DataFrame()
for tax in taxonomyList:
temp_df = df[df.index.str.contains(tax,na=False)]
n = round(temp_df.shape[0]*coreNum)
temp_df = temp_df[temp_df.columns[temp_df.astype(bool).sum()>=n]]
median_series = temp_df.mean()
median_series[median_series.between(0,1,False)] = 1
#median_df = pd.Series.to_frame(median_series).transpose().round()
median_df = pd.Series.to_frame(median_series).transpose()
dfToReturn = dfToReturn.append(median_df, ignore_index = True,sort=False)
dfToReturn.index = taxonomyList
#replace NA with 0
dfToReturn = dfToReturn.fillna(0)
return dfToReturn
def addAnnotations(metagenomeDf,keggFile):
"""
Add Kegg Annotations to predicted metagenome profile
Args:
metagenomeDf: predicted metagenome profile DataFrame
Returns: Predicted metagenome with KEGG annotations
"""
# read kegg annotations
kodf = pd.read_csv(keggFile, sep="\t", index_col=0,engine='python')
metagenomeDf = metagenomeDf.join(kodf)
return metagenomeDf
def summarizeByFun(metagenomeDf,group):
"""
Consolidate on the basis of group
Args:
metagenomeDf: Annotated Metagenme matrix
group: string by which dataFrame is to be categorized
Returns: Consolidated DataFrame
"""
return metagenomeDf.groupby(group).mean()
def runMinPath(metagenomeDf,funpredPath,outPath,typeOfPrediction):
"""
Run MinPath
Args:
metagenomeDf: Predicted Meatagenome matrix
funpredPath: Path of funPred
outPath: path to store files
typeOfPrediction: kegg or metacyc depending on type of input KO or EC
Returns:
DataFrame: Pruned metagenome content dataframe
"""
#make input for minPath and run MinPath
if(typeOfPrediction == "kegg"):
minpathOutFiles = []
for sampleName in metagenomeDf.columns.difference(['Pathway_Module','A','B','C','EC']):
minPtahInFile = os.path.join(outPath,sampleName + '_minpath_in.txt')
minpathOutFiles.append(os.path.join(outPath,sampleName + '_minpath.out.details'))
#create input file and run MinPath
minPtahInFile_fh = open(minPtahInFile,"w")
minPtahInFile_fh.writelines([str(i) + "\t" + str(j) + "\n" for i,j in enumerate(list(metagenomeDf[metagenomeDf[sampleName]>0].index))])
if(sys.platform == 'linux'):
cmd = "python3 " + os.path.join(funpredPath,'MinPath1.4_micfunpred.py') + " " + funpredPath + " " + outPath + " -ko " + minPtahInFile + " -report " + os.path.join(outPath,sampleName + '_minpath.out') + " -details " + os.path.join(outPath,sampleName + '_minpath.out.details')
elif(sys.platform == 'win32'):
cmd = "python.exe " + os.path.join(funpredPath,'MinPath1.4_micfunpred.py') + " " + funpredPath + " " + outPath + " -ko " + minPtahInFile + " -report " + os.path.join(outPath,sampleName + '_minpath.out') + " -details " + os.path.join(outPath,sampleName + '_minpath.out.details')
a = os.popen(cmd).read()
#create pathway abundance dataframe from all files
metagenomeDf_reindexed = metagenomeDf.copy()
metagenome_daraframes = []
sampleName_list = []
annotation_dataframe = pd.DataFrame(columns=['KO','Pathway'])
for minpath_out in minpathOutFiles:
kos_present = []
sampleName = os.path.basename(minpath_out).split('_minpath')[0]
sampleName_list.append(sampleName)
# read KOs present as per minpath
iFH = open(minpath_out,"r")
for line in iFH.readlines():
# pathways
matchObj = re.match("^path.*\#\s(.*)",line)
if(matchObj):
pathway = matchObj.group(1)
# KOs
matchObj = re.search("(K\d+)",line)
if(matchObj):
ko = matchObj.group(1)
# create a lsit of KOs present and annotation dataframe
kos_present.append(ko)
annotation_dataframe = annotation_dataframe.append({'KO':ko,'Pathway':pathway},ignore_index=True)
kos_present = set(kos_present)
# append dataframe to dataframe list
df_temp = metagenomeDf_reindexed.loc[kos_present][sampleName]
metagenome_daraframes.append(df_temp)
# merge all dataframes
df_kos = pd.concat(metagenome_daraframes,axis=1).fillna(0)
# add annotation
annotation_dataframe = annotation_dataframe.drop_duplicates()
df_kos_annotated = | pd.merge(df_kos,annotation_dataframe,left_index=True,right_on='KO') | pandas.merge |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/30 11:31
Desc: 股票数据-总貌-市场总貌
股票数据-总貌-成交概括
http://www.szse.cn/market/overview/index.html
http://www.sse.com.cn/market/stockdata/statistic/
"""
import warnings
from io import BytesIO
from akshare.utils import demjson
import pandas as pd
import requests
warnings.filterwarnings('ignore')
def stock_szse_summary(date: str = "20200619") -> pd.DataFrame:
"""
深证证券交易所-总貌
http://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 深证证券交易所-总貌
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(BytesIO(r.content))
temp_df["证券类别"] = temp_df["证券类别"].str.strip()
temp_df.iloc[:, 2:] = temp_df.iloc[:, 2:].applymap(lambda x: x.replace(",", ""))
temp_df.columns = [
'证券类别',
'数量',
'成交金额',
'成交量',
'总股本',
'总市值',
'流通股本',
'流通市值']
temp_df['数量'] = pd.to_numeric(temp_df['数量'])
temp_df['成交金额'] = pd.to_numeric(temp_df['成交金额'])
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'])
temp_df['总股本'] = pd.to_numeric(temp_df['总股本'], errors="coerce")
temp_df['总市值'] = pd.to_numeric(temp_df['总市值'], errors="coerce")
temp_df['流通股本'] = pd.to_numeric(temp_df['流通股本'], errors="coerce")
temp_df['流通市值'] = pd.to_numeric(temp_df['流通市值'], errors="coerce")
return temp_df
def stock_sse_summary() -> pd.DataFrame:
"""
上海证券交易所-总貌
http://www.sse.com.cn/market/stockdata/statistic/
:return: 上海证券交易所-总貌
:rtype: pandas.DataFrame
"""
url = "http://query.sse.com.cn/commonQuery.do"
params = {
'sqlId': 'COMMON_SSE_SJ_GPSJ_GPSJZM_TJSJ_L',
'PRODUCT_NAME': '股票,主板,科创板',
'type': 'inParams',
'_': '1640855495128',
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
data_json.keys()
temp_df = pd.DataFrame(data_json['result']).T
temp_df.reset_index(inplace=True)
temp_df['index'] = [
"流通股本",
"总市值",
"平均市盈率",
"上市公司",
"上市股票",
"流通市值",
"报告时间",
"-",
"总股本",
"项目",
]
temp_df = temp_df[temp_df['index'] != '-'].iloc[:-1, :]
temp_df.columns = [
'项目',
'股票',
'科创板',
'主板',
]
return temp_df
def stock_sse_deal_daily(date: str = "20220225") -> pd.DataFrame:
"""
上海证券交易所-数据-股票数据-成交概况-股票成交概况-每日股票情况
http://www.sse.com.cn/market/stockdata/overview/day/
:return: 每日股票情况
:rtype: pandas.DataFrame
"""
if int(date) <= 20211224:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"searchDate": "-".join([date[:4], date[4:6], date[6:]]),
"sqlId": "COMMON_SSE_SJ_GPSJ_CJGK_DAYCJGK_C",
"stockType": "90",
"_": "1616744620492",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df.T
temp_df.reset_index(inplace=True)
temp_df.columns = [
"单日情况",
"主板A",
"股票",
"主板B",
"_",
"股票回购",
"科创板",
]
temp_df = temp_df[
[
"单日情况",
"股票",
"主板A",
"主板B",
"科创板",
"股票回购",
]
]
temp_df["单日情况"] = [
"流通市值",
"流通换手率",
"平均市盈率",
"_",
"市价总值",
"_",
"换手率",
"_",
"挂牌数",
"_",
"_",
"_",
"_",
"_",
"成交笔数",
"成交金额",
"成交量",
"次新股换手率",
"_",
"_",
]
temp_df = temp_df[temp_df["单日情况"] != "_"]
temp_df["单日情况"] = temp_df["单日情况"].astype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"成交笔数",
"平均市盈率",
"换手率",
"次新股换手率",
"流通换手率",
]
temp_df["单日情况"].cat.set_categories(list_custom_new)
temp_df.sort_values("单日情况", ascending=True, inplace=True)
temp_df.reset_index(drop=True, inplace=True)
temp_df['股票'] = pd.to_numeric(temp_df['股票'], errors="coerce")
temp_df['主板A'] = pd.to_numeric(temp_df['主板A'], errors="coerce")
temp_df['主板B'] = pd.to_numeric(temp_df['主板B'], errors="coerce")
temp_df['科创板'] = pd.to_numeric(temp_df['科创板'], errors="coerce")
temp_df['股票回购'] = pd.to_numeric(temp_df['股票回购'], errors="coerce")
return temp_df
elif int(date) <= 20220224:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
'sqlId': 'COMMON_SSE_SJ_GPSJ_CJGK_MRGK_C',
'SEARCH_DATE': "-".join([date[:4], date[4:6], date[6:]]),
'_': '1640836561673',
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df.T
temp_df.reset_index(inplace=True)
temp_df.columns = [
"单日情况",
"主板A",
"主板B",
"科创板",
"-",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[[
"单日情况",
"主板A",
"主板B",
"科创板",
]]
temp_df["单日情况"] = [
"市价总值",
"成交量",
"平均市盈率",
"换手率",
"成交金额",
"-",
"流通市值",
"流通换手率",
"报告日期",
"挂牌数",
"-",
]
temp_df = temp_df[temp_df["单日情况"] != "-"]
temp_df["单日情况"] = temp_df["单日情况"].astype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"平均市盈率",
"换手率",
"流通换手率",
]
temp_df["单日情况"].cat.set_categories(list_custom_new)
temp_df.sort_values("单日情况", ascending=True, inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df['主板A'] = pd.to_numeric(temp_df['主板A'], errors="coerce")
temp_df['主板B'] = pd.t | o_numeric(temp_df['主板B'], errors="coerce") | pandas.to_numeric |
from os import path
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from pathlib import Path
import ptitprince as pt
# ----------
# Loss Plots
# ----------
def save_loss_plot(path, loss_function, v_path=None, show=True):
df = pd.read_csv(path)
if v_path is not None:
vdf = pd.read_csv(v_path)
else:
vdf = None
p = Path(path)
n = p.stem
d = p.parents[0]
out_path = os.path.join(d, n + '_loss.png')
fig, ax = plot_loss(df, vdf=vdf, x_lab='Iteration', y_lab=loss_function, save=out_path, show=show)
def plot_loss(df, vdf=None, x_lab='Iteration', y_lab='BCE Loss', save=None, show=True):
x = df['Unnamed: 0'].values
y = df['loss'].values
epochs = len(df['epoch'].unique())
no_batches = int(len(x) / epochs)
epoch_ends = np.array([((i + 1) * no_batches) - 1 for i in range(epochs)])
epoch_end_x = x[epoch_ends]
epoch_end_y = y[epoch_ends]
fig, ax = plt.subplots()
leg = ['loss',]
ax.plot(x, y, linewidth=2)
ax.scatter(epoch_end_x, epoch_end_y)
title = 'Training loss'
if vdf is not None:
if len(vdf) > epochs:
vy = vdf.groupby('batch_id').mean()['validation_loss'].values
vx = vdf['batch_id'].unique()
else:
vy = vdf['validation_loss'].values
vx = epoch_end_x
title = title + ' with validation loss'
leg.append('validation loss')
if len(vdf) > epochs:
#vy_err = v_df.groupby('batch_id').sem()['validation_loss'].values
#ax.errorbar(vx, vy, vy_err, marker='.')
ax.plot(vx, vy, linewidth=2, marker='o')
else:
ax.plot(vx, vy, linewidth=2, marker='o')
ax.set(xlabel=x_lab, ylabel=y_lab)
ax.set_title(title)
ax.legend(leg)
fig.set_size_inches(13, 9)
if save is not None:
plt.savefig(save, dpi=300)
if show:
plt.show()
return fig, ax
def save_channel_loss_plot(path, show=True):
df = pd.read_csv(path)
p = Path(path)
n = p.stem
d = p.parents[0]
out_path = os.path.join(d, n + '_channel-loss.png')
fig, ax = plot_channel_losses(df, save=out_path, show=show)
def plot_channel_losses(df, x_lab='Iteration', y_lab='BCE Loss', save=None, show=True):
cols = list(df.columns)
x = df['Unnamed: 0'].values
non_channel_cols = ['Unnamed: 0', 'epoch', 'batch_num', 'loss', 'data_id']
channel_losses = [col for col in cols if col not in non_channel_cols]
#print(channel_losses)
if len(channel_losses) > 5:
#print('four plots')
fig, axs = plt.subplots(2, 2)
zs, ys, xs, cs = [], [], [], []
for col in channel_losses:
y = df[col].values
if col.startswith('z'):
ls = _get_linestyle(zs)
axs[0, 0].plot(x, y, linewidth=1, linestyle=ls)
zs.append(col)
if col.startswith('y'):
ls = _get_linestyle(ys)
axs[0, 1].plot(x, y, linewidth=1, linestyle=ls)
ys.append(col)
if col.startswith('x'):
ls = _get_linestyle(xs)
axs[1, 0].plot(x, y, linewidth=1, linestyle=ls)
xs.append(col)
if col.startswith('cent') or col == 'mask':
ls = _get_linestyle(cs)
axs[1, 1].plot(x, y, linewidth=1, linestyle=ls)
cs.append(col)
axs[0, 0].set_title('Z affinities losses')
axs[0, 0].legend(zs)
axs[0, 1].set_title('Y affinities losses')
axs[0, 1].legend(ys)
axs[1, 0].set_title('X affinities losses')
axs[1, 0].legend(xs)
axs[1, 1].set_title('Object interior losses')
axs[1, 1].legend(cs)
fig.set_size_inches(13, 9)
elif len(channel_losses) <= 5:
#print('two plots')
fig, axs = plt.subplots(2, 1)
affs, cs = [], []
for col in channel_losses:
y = df[col].values
if col.startswith('z') or col.startswith('y') or col.startswith('x'):
ls = _get_linestyle(affs)
axs[0].plot(x, y, linewidth=2, linestyle=ls)
affs.append(col)
if col.startswith('cent') or col == 'mask':
axs[1].plot(x, y, linewidth=2)
cs.append(col)
axs[0].set_title('Affinities losses')
axs[0].legend(affs)
axs[1].set_title('Object interior losses')
axs[1].legend(cs)
fig.set_size_inches(14, 14)
for ax in axs.flat:
ax.set(xlabel=x_lab, ylabel=y_lab)
if save is not None:
plt.savefig(save, dpi=300)
if show:
plt.show()
return fig, axs
def _get_linestyle(lis):
if len(lis) == 0:
ls = '-'
elif len(lis) == 1:
ls = '--'
else:
ls = ':'
return ls
# --------
# VI Plots
# --------
def VI_plot(
path,
cond_ent_over="GT | Output",
cond_ent_under="Output | GT",
lab="",
save=False,
show=True):
df = pd.read_csv(path)
overseg = df[cond_ent_over].values
o_groups = [cond_ent_over] * len(overseg)
underseg = df[cond_ent_under].values
u_groups = [cond_ent_under] * len(underseg)
groups = o_groups + u_groups
x = 'Variation of information'
y = 'Conditional entropy'
data = {
x : groups,
y : np.concatenate([overseg, underseg])
}
data = pd.DataFrame(data)
o = 'h'
pal = 'Set2'
sigma = .2
f, ax = plt.subplots(figsize=(12, 10))
pt.RainCloud(x = x, y = y, data = data, palette = pal, bw = sigma,
width_viol = .6, ax = ax, orient = o)
p = Path(path)
plt.title(p.stem)
if save:
save_path = os.path.join(p.parents[0], p.stem + lab + '_VI_rainclout_plot.png')
plt.savefig(save_path, bbox_inches='tight')
if show:
plt.show()
def experiment_VI_plots(
paths,
names,
title,
out_name,
out_dir,
cond_ent_over="GT | Output",
cond_ent_under="Output | GT",
show=True
):
plt.rcParams.update({'font.size': 16})
groups = []
ce0 = []
ce1 = []
for i, p in enumerate(paths):
df = pd.read_csv(p)
ce0.append(df[cond_ent_over].values)
ce1.append(df[cond_ent_under].values)
groups += [names[i]] * len(df)
x = 'Experiment'
data = {
x : groups,
cond_ent_over : np.concatenate(ce0),
cond_ent_under : np.concatenate(ce1)
}
data = pd.DataFrame(data)
o = 'h'
pal = 'Set2'
sigma = .2
f, axs = plt.subplots(1, 2, figsize=(14, 10)) #, sharex=True) #, sharey=True)
ax0 = axs[0]
ax1 = axs[1]
pt.RainCloud(x = x, y = cond_ent_over, data = data, palette = pal, bw = sigma,
width_viol = .6, ax = ax0, orient = o)
ax0.set_title('Over-segmentation conditional entropy')
pt.RainCloud(x = x, y = cond_ent_under, data = data, palette = pal, bw = sigma,
width_viol = .6, ax = ax1, orient = o)
ax1.set_title('Under-segmentation conditional entropy')
f.suptitle(title)
os.makedirs(out_dir, exist_ok=True)
save_path = os.path.join(out_dir, out_name + '_VI_rainclould_plots.png')
plt.savefig(save_path, bbox_inches='tight')
if show:
plt.show()
# -----------------------
# Average Precision Plots
# -----------------------
def plot_experiment_APs(paths, names, title, out_dir, out_name, show=True):
dfs = [pd.read_csv(path) for path in paths]
os.makedirs(out_dir, exist_ok=True)
out_path = os.path.join(out_dir, out_name)
plot_AP(dfs, names, out_path, title, show=show)
def plot_AP(dfs, names, out_path, title, thresh_name='threshold', ap_name='average_precision', show=True):
plt.rcParams.update({'font.size': 16})
plt.rcParams["figure.figsize"] = (10,10)
fig = plt.figure()
for df in dfs:
plt.plot(df[thresh_name].values, df[ap_name].values)
plt.xlabel('IoU threshold')
plt.ylabel('Average precision')
plt.title(title)
plt.legend(names)
fig.savefig(out_path)
if show:
plt.show()
# ------------------------------
# Object Number Difference Plots
# ------------------------------
def plot_experiment_no_diff(paths, names, title, out_dir, out_name, col_name='n_diff', show=True):
dfs = [ | pd.read_csv(path) | pandas.read_csv |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with | tm.assertRaisesRegexp(period.IncompatibleFrequency, msg) | pandas.util.testing.assertRaisesRegexp |
import os
import numpy as np
import argparse
import json
import pandas as pd
import time
import shutil
import zipfile
from azureml.core import Run
import requests
import threading
import psutil
def refine_image_path(image_name):
global image_dir
for prefix in ['xray-testing-data', 'Phoi_Labelling_Data', 'vinmec/anonymized-images']:
path = os.path.join(image_dir, prefix, image_name)
if os.path.exists(path):
return path
return image_name
class MyThread(threading.Thread):
def run(self):
os.system('/bin/bash -c "source /opt/intel/openvino_2021/bin/setupvars.sh && cd /code/build/chest-detection && ./chest-detection"')
def convert_json_predict(ret_obs, threshold = 0.75, type_disease = 'TUB'):
ret_obs = ret_obs['observations']
tub_obs = list(filter(lambda x:x["code"] == type_disease, ret_obs))[0]
confidence_score = tub_obs["confidentScore"]
threshold = tub_obs["threshold"]
print("Value threshold", threshold)
if confidence_score >= threshold:
predict = 1
else:
predict = 0
return predict
def predict(image_path):
headers = {
'Content-Type': 'application/json',
'x-auth-token': '123<PASSWORD>'
}
url = "http://127.0.0.1:8080/images"
data = {"imageUrl": image_path}
try:
response = requests.post(url, json=data, headers=headers)
except Exception as e:
print(e)
return response
def call_api(image_path):
header = {'x-auth-token': '1<PASSWORD>', 'Content-Type': 'application/json'}
try:
raw = requests.post("http://127.0.0.1:8080/images",
data=json.dumps({'imageUrl': image_path}), headers=header)
if raw.status_code == 200:
# print('ok')
if json.loads(raw.text) is None:
return raw.json()
return json.loads(raw.text)
except:
raise ValueError(image_path)
def call_api(image_path):
# image_path = "https://api-int.draid.ai/dicom/studies/2.25.160091743131929387903451525446410949934/series/2.25.148244682741390089451659848730132728740/instances/2.25.97039210792747508295850276140512446062/frames/1/rendered?tenantCode=VB&accept=image/png"
headers = {
'Content-Type': 'application/json',
# 'x-auth-token': '<PASSWORD>'
}
url = "http://127.0.0.1:8080/images"
data = {
"imageUrl": image_path,
"mode": "detection",
"additional": {
"histogram_adjustment": False
}
}
try:
raw = requests.post("http://127.0.0.1:8080/images",
json=data, headers=headers)
print(raw)
if raw.status_code == 200:
# print('ok')
if json.loads(raw.text) is None:
return raw.json()
return json.loads(raw.text)
except Exception as e:
# raise ValueError(image_path)
print('image_path', image_path)
print('Exception', e)
total_image = pd.read_csv("/home/haiduong/Documents/VIN_BRAIN/Measurement/Tuberculosis/Convert_data/testing_tuberculosis.csv")
l_total_image = []
for index, row in total_image.iterrows():
name_image = row["Images"].split("/")[-1]
l_total_image.append(name_image)
done_image = | pd.read_csv("/home/haiduong/Documents/VIN_BRAIN/Measurement/Tuberculosis/Convert_data/Done/Done_09_07.csv") | pandas.read_csv |
import sys
import pandas
FILENAME = sys.argv[1] #"20220121 Overview CG plates and compounds _consolidated RTG.xlsx"
dfs_per_sheetname = pandas.read_excel(FILENAME, sheet_name=None)
assert "experiments" in dfs_per_sheetname
df = dfs_per_sheetname["experiments"]
assert "experiment ID" in df.columns
assert "compound map see corresponding excel table" in df.columns
assert df["experiment ID"].is_unique
## non-limited list
assert "imaging campaigns" in dfs_per_sheetname
df = dfs_per_sheetname["imaging campaigns"]
assert "imaging campaign ID" in df.columns
assert "experiment ID" in df.columns
assert "timepoint in hours" in df.columns
assert "raw data available in zip file" in df.columns
assert "processed images available in folder" in df.columns
assert "cq1 analysis available in folder" in df.columns
assert "incucyte analyzed data available in csv file" in df.columns
##
assert df["imaging campaign ID"].is_unique
#assert "incucyte timestamp" in df.columns
assert "compounds" in dfs_per_sheetname
df = dfs_per_sheetname["compounds"]
assert "compound ID" in df.columns
assert "SMILES" in df.columns
df2 = df[df.duplicated(subset=["SMILES"], keep=False)]
if len(df2) > 0:
print("Sheet 'compounds': The following groups of entries have the same SMILES but different compound IDs:")
for g, s in df2.groupby("SMILES"):
print(f"{g} : ")
print(s)
print("---")
df2 = df[df.duplicated(subset=["compound ID"], keep=False)]
if len(df2) > 0:
print("Sheet 'compounds': The following groups of entries have the same compound ID but different SMILES:")
for g, s in df2.groupby("compound ID"):
print(f"{g} : ")
print(s)
print("---")
assert df["compound ID"].is_unique
assert df["SMILES"].is_unique
assert not df["SMILES"].str.contains("\n").any()
assert "compound batches" in dfs_per_sheetname
df = dfs_per_sheetname["compound batches"]
assert "compound batch ID" in df.columns
assert "compound ID" in df.columns
df2 = df[df.duplicated(subset=["compound batch ID"], keep=False)]
if len(df2) > 0:
print("Sheet 'compound batches': The following groups of entries have the same compound batch ID:")
for g, s in df2.groupby("compound batch ID"):
print(f"{g} : ")
print(s)
print("---")
assert df["compound batch ID"].is_unique
mapping_tables_to_check = list( [s for s in dfs_per_sheetname if "compound map" in s] )
for mapping_table_name in mapping_tables_to_check:
assert mapping_table_name in dfs_per_sheetname
df = dfs_per_sheetname[mapping_table_name]
assert "well ID" in df.columns
assert "well name" in df.columns
assert "compound batch ID" in df.columns
assert "concentration uM" in df.columns
assert "experimental type" in df.columns
## complex tests follow...
acceptable_experimental_types = ["chemogenomic candidate", "unrelated to this experiment", "blank", "control", "cells only"]
for mapping_table_name in mapping_tables_to_check:
df = dfs_per_sheetname[mapping_table_name]
## check that all rows contain one of the allowed values above
assert df["experimental type"].isin(acceptable_experimental_types).all()
# concentration should be only nan if experimental type is one of the below
cond1 = df["experimental type"] == "blank"
cond2 = df["concentration uM"].isna()
cond3 = df["experimental type"] == "cells only"
cond3b = df["experimental type"] == "unrelated to this experiment"
assert df[cond1].equals(df[(cond1) & (cond2)])
assert df[cond3].equals(df[(cond3) & (cond2)])
assert df[cond3b].equals(df[(cond3b) & (cond2)])
assert df[cond2].equals(df[(cond1) | (cond3) | (cond3b)])
# concentration should be >0 if experimental type is different than the ones above
df_out = df[~((cond1)|(cond3)|(cond3b))].query("not `concentration uM` > 0")
if len( df_out ) > 0:
print(f"Concentrations in table '{mapping_table_name}' are not in the expected range:")
print(df_out)
print("---")
# compound batch should be only nan if experimental type is one of the above
cond4 = df["compound batch ID"].isna()
assert df[cond1].equals(df[(cond4) & (cond1)])
assert df[cond3].equals(df[(cond4) & (cond3)])
assert df[cond3b].equals(df[(cond4) & (cond3b)])
assert df[cond4].equals(df[(cond1) | (cond3) | (cond3b)])
## ID reference tests
foo = dfs_per_sheetname["experiments"]["experiment ID"]
bar = dfs_per_sheetname["imaging campaigns"]["experiment ID"]
assert foo.isin(bar.values).all()
assert bar.isin(foo.values).all()
foo = dfs_per_sheetname["compound batches"]["compound ID"]
bar = dfs_per_sheetname["compounds"]["compound ID"]
bar_foo = set(bar) - set(foo)
if len(bar_foo) > 0:
print("INFO: There are compound IDs in table 'compounds', which are not referenced in table 'compound batches':")
print(bar_foo)
print("---")
foo_bar = set(foo) - set(bar)
if len(foo_bar) > 0:
print("There are compound IDs in table 'compound batches', which cannot be resolved from table 'compounds':")
print(foo_bar)
print("---")
assert foo.isin(bar.values).all()
assert bar.isin(foo.values).all()
for mapping_table_name in mapping_tables_to_check:
foo = dfs_per_sheetname["compound batches"]["compound batch ID"].unique()
bar = dfs_per_sheetname[mapping_table_name]
bar = bar[ bar["experimental type"] != "cells only" ]
bar = bar[ bar["experimental type"] != "blank"]
bar = bar[ bar["experimental type"] != "unrelated to this experiment"]
bar = bar["compound batch ID"].unique()
bar_foo = set(bar) - set(foo)
if len(bar_foo) > 0:
print(f"There are compound batches in table '{mapping_table_name}', which cannot be resolved from table 'compound batches':")
print(bar_foo)
print("---")
print("Done.")
## BLOCK to replace dummy values in the whole excel file
if True:
did_i_change_anything = False
mapping = {
# "old" : "new",
"dummy1" : "dummy1",
"dummy2" : "EUB0001080a",
"dummy3" : "DP000007a",
"dummy4" : "EUB0001108a",
"EUB0000500a" : "EUB0000871a",
"EUB0000528a" : "EUB0000841a",
"EUB0000543aCl" : "EUB0000213bCl",
"EUB0000550aCl" : "EUB0000196bCl",
"EUB0000657aPO4" : "EUB0000140bPO4",
"EUB0000667aCit" : "EUB0000286bCit",
"EUB0000675aCl" : "EUB0000130bCl",
"EUB0000092a" : "EUB0000092b"
}
import openpyxl
wb = openpyxl.load_workbook(FILENAME)
for sheetname in wb.sheetnames:
ws = wb[sheetname]
dimension = ws.calculate_dimension()
for row in ws[dimension]:
for cell in row:
if cell.value in mapping:
print(f"Changing cell {cell} from value {cell.value} to {mapping[cell.value]}")
cell.value = mapping[cell.value]
did_i_change_anything = True
if did_i_change_anything:
wb.save(FILENAME + ".changed.xlsx")
## ... end of BLOCK.
## BLOCK to check the whole excel file for trailing spaces in the fields
if True:
import openpyxl
wb = openpyxl.load_workbook(FILENAME)
for sheetname in wb.sheetnames:
ws = wb[sheetname]
dimension = ws.calculate_dimension()
for row in ws[dimension]:
for cell in row:
if type(cell.value) == str and cell.value.strip() != cell.value:
print(f"Sheet '{sheetname}', cell {cell.coordinate} contains undesired whitespace: '{cell.value}'")
## ... end of BLOCK.
## BLOCK to condense a list of superfluous entries in table 'compounds' vs correct table 'compound batches'
if False:
foo = dfs_per_sheetname["compound batches"]["compound ID"]
bar = dfs_per_sheetname["compounds"]["compound ID"]
bar_foo = set(bar) - set(foo)
dfs_per_sheetname["compounds"][~bar.isin(bar_foo)].to_excel("2022-02-03-new-compounds-sheet.xlsx")
## ... end of BLOCK.
## BLOCK to check for expected pattern in compound concentrations in one plate...
if False:
for mapping_table_name in mapping_tables_to_check:
foo = dfs_per_sheetname[mapping_table_name]
foo = foo[foo["experimental type"]=="chemogenomic candidate"]
print(mapping_table_name)
print("total len:",len(foo))
counter=0
for groupname, series in foo.groupby("eubopen ID"):
if len(series)!=2:
if len(series)==1:
if series["concentration uM"].item() == 10.0:
counter+=1
continue
print("potential ERROR:")
print(series)
else:
if sorted(series["concentration uM"].values) == [1.0, 10.0]:
counter+=2
else:
print("potential ERROR:")
print(series)
print("rather unsuspicious:", counter)
## ... end of BLOCK.
### BLOCK to check for consistency in data and produce condensed output, if EUbOPEN, SGC IDs, and compound names are given in the compound maps ...
if False:
collect_mappings_between_sgc_and_eubopen_id = {}
collect_mappings_between_compound_names_and_eubopen_id = {}
for mapping_table_name in mapping_tables_to_check:
spam = dfs_per_sheetname[mapping_table_name][["SGC ID", "eubopen ID"]].dropna().drop_duplicates()
spam = dfs_per_sheetname[mapping_table_name][["SGC ID", "eubopen ID"]].drop_duplicates()
same_sgc_different_eubopen = spam[spam.duplicated(subset="SGC ID", keep=False)]
same_eubopen_different_sgc = spam[spam.duplicated(subset="eubopen ID", keep=False)]
if len(same_eubopen_different_sgc)>0:
print(f"There are compound batches in table '{mapping_table_name}', which have different SGC IDs, but the same EUbOPEN ID:")
print(same_eubopen_different_sgc)
print("---")
if len(same_sgc_different_eubopen)>0:
print(f"There are compound batches in table '{mapping_table_name}', which have the same SGC ID, but different EUbOPEN IDs:")
print(same_sgc_different_eubopen)
print("---")
#assert len(same_sgc_different_eubopen) == 0
#assert len(same_eubopen_different_sgc) == 0
for sgc_id, s in spam.groupby("SGC ID"):
if sgc_id in collect_mappings_between_sgc_and_eubopen_id:
value = s["eubopen ID"].item()
if value != collect_mappings_between_sgc_and_eubopen_id[sgc_id] and not (pandas.isna(value) and pandas.isna(collect_mappings_between_sgc_and_eubopen_id[sgc_id])):
print(f"ERROR for {sgc_id}: {repr(value)} != {repr(collect_mappings_between_sgc_and_eubopen_id[sgc_id])}")
else:
collect_mappings_between_sgc_and_eubopen_id.update( {sgc_id: s["eubopen ID"].item()} )
spam2 = dfs_per_sheetname[mapping_table_name][["compound ID", "eubopen ID"]].drop_duplicates()
for compound_name, s in spam2.groupby("compound ID"):
if pandas.isna(compound_name) or len(s)>1:
print(f"compound name is nan: {s}")
if compound_name in collect_mappings_between_compound_names_and_eubopen_id:
value = s["eubopen ID"].item()
if value != collect_mappings_between_compound_names_and_eubopen_id[compound_name] and not (pandas.isna(value) and pandas.isna(collect_mappings_between_compound_names_and_eubopen_id[compound_name])):
print(f"ERROR for {compound_name}: {repr(value)} != {repr(collect_mappings_between_compound_names_and_eubopen_id[compound_name])}")
else:
collect_mappings_between_compound_names_and_eubopen_id.update( {compound_name: s["eubopen ID"].item()} )
print(collect_mappings_between_sgc_and_eubopen_id)
print(collect_mappings_between_compound_names_and_eubopen_id)
df1 = pandas.DataFrame.from_dict(collect_mappings_between_compound_names_and_eubopen_id, orient="index", columns=["eubopen ID"])
df1["compound ID"] = df1.index
#print(df1[df1.duplicated("eubopen ID", keep=False)])
df2 = | pandas.DataFrame.from_dict(collect_mappings_between_sgc_and_eubopen_id, orient="index", columns=["eubopen ID"]) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# # 剽窃检测,特征工程
#
# 在此项目中,你需要构建一个剽窃检测器,它会检测答案文本文件并进行二元分类:根据文本文件与提供的原文之间的相似度,将文件标为剽窃文件或非剽窃文件。
#
# 你的第一个任务是创建一些可以用于训练分类模型的特征。这个任务将分为以下几个步骤:
#
# * 清理和预处理数据。
# * 定义用于比较答案和原文之间相似性的特征,并提取相似性特征。
# * 通过分析不同特征之间的相关性,选择“合适的”特征。
# * 创建训练/测试 `.csv` 文件,其中包含训练/测试数据点的相关特征和类别标签。
#
# 在下个 notebook (Notebook 3) 中,你将使用在此 notebook 中创建的这些特征和 `.csv` 文件在 SageMaker notebook 实例中训练一个二元分类模型。
#
# 你将根据[这篇论文](https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c412841_developing-a-corpus-of-plagiarised-short-answers/developing-a-corpus-of-plagiarised-short-answers.pdf)中的说明定义几个不同的相似性特征,这些特征将帮助你构建强大的剽窃检测器。
#
# 在完成此 notebook 时,你需要完成此 notebook 中的所有练习并回答所有问题。
# > 所有任务都标有**练习**,所有问题都标有**问题**。
#
# 你需要决定在最终训练和测试数据中包含哪些特征。
#
# ---
# ## 读取数据
#
# 以下单元格将下载必要的项目数据并将文件解压缩到文件夹 `data/` 中。
#
# 此数据是谢菲尔德大学 <NAME>(信息研究)和 <NAME>(计算机科学)创建的数据集的稍加修改版本。要了解数据收集和语料库信息,请访问[谢菲尔德大学网站](https://ir.shef.ac.uk/cloughie/resources/plagiarism_corpus.html).
#
# > **数据引用**:<NAME>. and <NAME>. Developing A Corpus of Plagiarised Short Answers, Language Resources and Evaluation: Special Issue on Plagiarism and Authorship Analysis, In Press. [下载]
# In[1]:
# NOTE:
# you only need to run this cell if you have not yet downloaded the data
# otherwise you may skip this cell or comment it out
get_ipython().system('wget https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c4147f9_data/data.zip')
get_ipython().system('unzip data')
# In[4]:
# import libraries
import pandas as pd
import numpy as np
import os
# 这个剽窃数据集由多个文本文件组成;每个文件的特性都在名为 `file_information.csv` 的 `.csv` 文件中进行了总结,我们可以使用 `pandas` 读取该文件。
# In[3]:
csv_file = 'data/file_information.csv'
plagiarism_df = pd.read_csv(csv_file)
# print out the first few rows of data info
plagiarism_df.head()
# ## 剽窃类型
#
# 每个文本文件都有一个相关**任务**(任务 A-E)和一个剽窃**类别**,从上述 DataFrame 中可以看出。
#
# ### A-E 五种任务
#
# 每个文本文件都包含一个简短问题的答案;这些问题标为任务 A-E。例如任务 A 的问题是:“面向对象编程中的继承是什么意思?”
#
# ### 剽窃类别
#
# 每个文本文件都有相关的剽窃标签/类别:
#
# **1. 剽窃类别:`cut`、`light` 和 `heavy`。**
# * 这些类别表示不同级别的剽窃答案文本。`cut` 类答案直接复制了原文,`light` 类答案在原文的基础上稍微改写了一下,而 `heavy` 类答案以原文为基础,但是改写程度很大(可能是最有挑战性的剽窃检测类型)。
#
# **2. 非剽窃类别:`non`。**
# * `non` 表示答案没有剽窃,没有参考维基百科原文。
#
# **3. 特殊的原文类别:`orig`。**
# * 这是原始维基百科文本对应的一种类别。这些文件仅作为比较基准。
# ---
# ## 预处理数据
#
# 在下面的几个单元格中,你将创建一个新的 DataFrame,其中包含关于 `data/` 目录下所有文件的相关信息。该 DataFrame 可以为特征提取和训练二元剽窃分类器准备好数据。
# ### 练习:将类别转换为数值数据
#
# 你将发现数据集中的 `Category` 列包含字符串或类别值,要为特征提取准备这些特征,我们需要将类别转换为数值。此外,我们的目标是创建一个二元分类器,所以我们需要一个二元类别标签,可以表示答案文本是剽窃文件 (1) 还是非剽窃文件 (0)。请完成以下函数 `numerical_dataframe`,它会根据名称读取 `file_information.csv`,并返回新的 DataFrame,其中包含一个数值 `Category` 列,以及新的 `Class` 列,该列会将每个答案标为剽窃文件或非剽窃文件。
#
# 你的函数应该返回一个具有以下属性的新 DataFrame:
#
# * 4 列:`File`、`Task`、`Category`、`Class`。`File` 和 `Task` 列可以与原始 `.csv` 文件一样。
# * 根据以下规则将所有 `Category` 标签转换为数值标签(更高的值表示更高级别的剽窃行为):
# * 0 = `non`
# * 1 = `heavy`
# * 2 = `light`
# * 3 = `cut`
# * -1 = `orig`,这是表示原始文件的特殊值。
# * 对于新的 `Class` 列
# * 任何非剽窃 (`non`) 答案文本的类别标签都应为 `0`。
# * 任何剽窃类答案文本的类别标签都应为 `1`。
# * 任何 `orig` 文本都对应特殊标签 `-1`。
#
# ### 预期输出
#
# 运行函数后,应该获得行如下所示的 DataFrame:
# ```
#
# File Task Category Class
# 0 g0pA_taska.txt a 0 0
# 1 g0pA_taskb.txt b 3 1
# 2 g0pA_taskc.txt c 2 1
# 3 g0pA_taskd.txt d 1 1
# 4 g0pA_taske.txt e 0 0
# ...
# ...
# 99 orig_taske.txt e -1 -1
#
# ```
# In[5]:
# Read in a csv file and return a transformed dataframe
def numerical_dataframe(csv_file='data/file_information.csv'):
'''Reads in a csv file which is assumed to have `File`, `Category` and `Task` columns.
This function does two things:
1) converts `Category` column values to numerical values
2) Adds a new, numerical `Class` label column.
The `Class` column will label plagiarized answers as 1 and non-plagiarized as 0.
Source texts have a special label, -1.
:param csv_file: The directory for the file_information.csv file
:return: A dataframe with numerical categories and a new `Class` label column'''
# your code here
df = pd.read_csv(csv_file)
df.loc[:,'Class'] = df.loc[:,'Category'].map({'non': 0, 'heavy': 1, 'light': 1, 'cut': 1, 'orig': -1})
df.loc[:,'Category'] = df.loc[:,'Category'].map({'non': 0, 'heavy': 1, 'light': 2, 'cut': 3, 'orig': -1})
return df
# ### 测试单元格
#
# 下面是几个测试单元格。第一个是非正式测试,你可以通过调用你的函数并输出返回的结果,检查代码是否符合预期。
#
# 下面的**第二个**单元格是更严格的测试单元格。这样的单元格旨在确保你的代码能按预期运行,并形成可能会在后面的测试/代码中使用的任何变量,在这里是指 dataframe `transformed_df`。
#
# > 你应该按前后顺序(出现在 notebook 中的顺序)运行此 notebook 中的单元格。对于测试单元格来说,这一点很重要。
#
# 通常,后面的单元格依赖于在之前的单元格中定义的函数、导入项或变量。例如,某些测试需要依赖于前面的测试才能运行。
#
# 这些测试并不能测试所有情况,但是可以很好地检查你的代码是否正确。
# In[6]:
# informal testing, print out the results of a called function
# create new `transformed_df`
transformed_df = numerical_dataframe(csv_file ='data/file_information.csv')
# check work
# check that all categories of plagiarism have a class label = 1
transformed_df.head(10)
# In[7]:
# test cell that creates `transformed_df`, if tests are passed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# importing tests
import problem_unittests as tests
# test numerical_dataframe function
tests.test_numerical_df(numerical_dataframe)
# if above test is passed, create NEW `transformed_df`
transformed_df = numerical_dataframe(csv_file ='data/file_information.csv')
# check work
print('\nExample data: ')
transformed_df.head()
# ## 文本处理和拆分数据
#
# 这个项目的目标是构建一个剽窃分类器。本质上这个任务是比较文本;查看给定答案和原文,比较二者并判断答案是否剽窃了原文。要有效地进行比较并训练分类器,我们需要完成几项操作:预处理所有文本数据并准备文本文件(在此项目中有 95 个答案文件和 5 个原始文件),使文件更容易比较,并将数据划分为 `train` 和 `test` 集合,从而分别可以用于训练分类器和评估分类器。
#
# 为此,我们向你提供了可以向上面的 `transformed_df` 添加额外信息的代码。下面的两个单元格不需要更改;它们会向 `transformed_df` 添加两列:
#
# 1. 一个 `Text` 列;此列包含 `File` 的所有小写文本,并删除了多余的标点。
# 2. 一个 `Datatype` 列;它是一个字符串值 `train`、`test` 或 `orig`,将数据点标记为训练集或测试集。
#
# 你可以在项目目录的 `helpers.py` 文件中找到如何创建这些额外列的详细信息。建议通读该文件,了解文本是如何处理的,以及数据是如何拆分的。
#
# 请运行以下单元格以获得 `complete_df`,其中包含剽窃检测和特征工程所需的所有信息。
# In[8]:
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import helpers
# create a text column
text_df = helpers.create_text_column(transformed_df)
text_df.head()
# In[9]:
# after running the cell above
# check out the processed text for a single file, by row index
row_idx = 0 # feel free to change this index
sample_text = text_df.iloc[0]['Text']
print('Sample processed text:\n\n', sample_text)
# ## 将数据拆分成训练集和测试集
#
# 下个单元格将向给定的 DataFrame 添加一个 `Datatype` 列,表示记录是否是:
# * `train` - 训练数据,用于训练模型。
# * `test` - 测试数据,用于评估模型。
# * `orig` - 任务的原始维基百科文件。
#
# ### 分层抽样
#
# 给定的代码使用了辅助函数,你可以在主项目目录的 `helpers.py` 文件中查看该函数。该函数实现了[分层随机抽样](https://en.wikipedia.org/wiki/Stratified_sampling),可以按照任务和剽窃量随机拆分数据。分层抽样可以确保获得在任务和剽窃组合之间均匀分布的训练和测试数据。约 26% 的数据作为测试集,约 74% 的数据作为训练集。
#
# 函数 **train_test_dataframe** 接受一个 DataFrame,并假设该 DataFrame 具有 `Task` 和 `Category` 列,然后返回一个修改过的 DataFrame,表示文件属于哪种 `Datatype`(训练、测试或原始文件)。抽样方式将根据传入的 *random_seed* 稍微不同。犹豫样本量比较小,所以这种分层随机抽样可以为二元剽窃分类器提供更稳定的结果。稳定性是指在给定随机 seed 后,分类器的准确率方差更小。
# In[10]:
random_seed = 1 # can change; set for reproducibility
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import helpers
# create new df with Datatype (train, test, orig) column
# pass in `text_df` from above to create a complete dataframe, with all the information you need
complete_df = helpers.train_test_dataframe(text_df, random_seed=random_seed)
# check results
complete_df.head(10)
# # 判断剽窃行为
#
# 准备好数据并创建了包含信息的 `complete_df`(包括与每个文件相关的文本和类别)后,可以继续完成下个任务了,即提取可以用于剽窃分类的相似性特征。
#
# > 注意:以下代码练习不会修改假设现在存在的 `complete_df` 的现有列。
#
# `complete_df` 应该始终包含以下列:`['File', 'Task', 'Category', 'Class', 'Text', 'Datatype']`。你可以添加其他列,并且可以通过复制 `complete_df` 的部分内容创建任何新的 DataFrames,只要不直接修改现有值即可。
#
# ---
#
# # 相似性特征
#
# 剽窃检测的一种方式是计算**相似性特征**,这些特征可以衡量给定文本与原始维基百科原文之间的相似性(对于特定的任务 A-E 来说)。你可以根据[这篇剽窃检测论文](https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c412841_developing-a-corpus-of-plagiarised-short-answers/developing-a-corpus-of-plagiarised-short-answers.pdf)创建相似性特征。
# > 在这篇论文中,研究人员创建了叫做**包含系数**和**最长公共子序列**的特征。
#
# 你将使用这些特征作为输入,训练模型区分剽窃文本和非剽窃文本。
#
# ## 特征工程
#
# 下面深入讨论下我们要包含在剽窃检测模型中的特征,以及如何计算这些特征。在下面的解释部分,我将提交的文本文件称为**学员答案文本 (A)**,并将原始维基百科文件(我们要将答案与之比较的文件)称为**维基百科原文 (S)**。
#
# ### 包含系数
#
# 你的第一个任务是创建**包含系数特征**。为了理解包含系数,我们先回顾下 [n-gram](https://en.wikipedia.org/wiki/N-gram) 的定义。*n-gram* 是一个序列字词组合。例如,在句子“bayes rule gives us a way to combine prior knowledge with new information”中,1-gram 是一个字词,例如“bayes”。2-gram 可以是“bayes rule”,3-gram 可以是“combine prior knowledge”。
#
# > 包含系数等于维基百科原文 (S) 的 n-gram 字词计数与学员答案文本 (S) 的 n-gram 字词计数之间的**交集**除以学员答案文本的 n-gram 字词计数。
#
# $$ \frac{\sum{count(\text{ngram}_{A}) \cap count(\text{ngram}_{S})}}{\sum{count(\text{ngram}_{A})}} $$
#
# 如果两段文本没有公共的 n-gram,那么包含系数为 0,如果所有 n-gram 都有交集,那么包含系数为 1。如果有更长的 n-gram 是一样的,那么可能存在复制粘贴剽窃行为。在此项目中,你需要决定在最终项目中使用什么样的 `n` 或多个 `n`。
#
# ### 练习:创建包含系数特征
#
# 根据你创建的 `complete_df`,你应该获得了比较学员答案文本 (A) 与对应的维基百科原文 (S) 所需的所有信息。任务 A 的答案应该与任务 A 的原文进行比较,并且任务 B、C、D 和 E 的答案应该与对应的原文进行比较。
#
# 在这道练习中,你需要完成函数 `calculate_containment`,它会根据以下参数计算包含系数:
# * 给定 DataFrame `df`(假设为上面的 `complete_df`)
# * `answer_filename`,例如 'g0pB_taskd.txt'
# * n-gram 长度 `n`
#
# ### 计算包含系数
#
# 完成此函数的一般步骤如下所示:
# 1. 根据给定 `df` 中的所有文本文件创建一个 n-gram 计数数组;建议使用 [CountVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html)。
# 2. 获得给定 `answer_filename` 的已处理答案和原文。
# 3. 根据以下公式计算答案和原文之间的包含系数。
#
# >$$ \frac{\sum{count(\text{ngram}_{A}) \cap count(\text{ngram}_{S})}}{\sum{count(\text{ngram}_{A})}} $$
#
# 4. 返回包含系数值。
#
# 在完成以下函数时,可以编写任何辅助函数。
# In[13]:
from sklearn.feature_extraction.text import CountVectorizer
# Calculate the ngram containment for one answer file/source file pair in a df
def calculate_containment(df, n, answer_filename):
'''Calculates the containment between a given answer text and its associated source text.
This function creates a count of ngrams (of a size, n) for each text file in our data.
Then calculates the containment by finding the ngram count for a given answer text,
and its associated source text, and calculating the normalized intersection of those counts.
:param df: A dataframe with columns,
'File', 'Task', 'Category', 'Class', 'Text', and 'Datatype'
:param n: An integer that defines the ngram size
:param answer_filename: A filename for an answer text in the df, ex. 'g0pB_taskd.txt'
:return: A single containment value that represents the similarity
between an answer text and its source text.
'''
answer_text, answer_task = df[df.File == answer_filename][['Text', 'Task']].iloc[0]
source_text = df[(df.Task == answer_task) & (df.Class == -1)]['Text'].iloc[0]
counts = CountVectorizer(analyzer='word', ngram_range=(n,n))
ngrams_array = counts.fit_transform([answer_text, source_text]).toarray()
containment = (np.minimum(ngrams_array[0],ngrams_array[1]).sum())/(ngrams_array[0].sum())
return containment
# ### 测试单元格
#
# 实现了包含系数函数后,你可以测试函数行为。
#
# 以下单元格将遍历前几个文件,并根据指定的 n 和文件计算原始类别和包含系数值。
#
# >如果你正确实现了该函数,你应该看到非剽窃类别的包含系数值很低或接近 0,剽窃示例的包含系数更高,或接近 1。
#
# 注意当 n 的值改变时会发生什么。建议将代码应用到多个文件上,并比较生成的包含系数。你应该看到,最高的包含系数对应于最高剽窃级别 (`cut`) 的文件。
# In[14]:
# select a value for n
n = 3
# indices for first few files
test_indices = range(5)
# iterate through files and calculate containment
category_vals = []
containment_vals = []
for i in test_indices:
# get level of plagiarism for a given file index
category_vals.append(complete_df.loc[i, 'Category'])
# calculate containment for given file and n
filename = complete_df.loc[i, 'File']
c = calculate_containment(complete_df, n, filename)
containment_vals.append(c)
# print out result, does it make sense?
print('Original category values: \n', category_vals)
print()
print(str(n)+'-gram containment values: \n', containment_vals)
# In[15]:
# run this test cell
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# test containment calculation
# params: complete_df from before, and containment function
tests.test_containment(complete_df, calculate_containment)
# ### 问题 1:为何我们可以在为创建模型而拆分 DataFrame 之前,计算所有数据(训练和测试数据)的包含系数特征?也就是说,为何在计算包含系数时,测试数据和训练数据不会相互影响?
# **回答:**
#
# 数据预处理阶段还没有建立模型,并且包含系数是文本的一种独立特征,因此测试数据和训练数据不会相互影响。
# ---
# ## 最长公共子序列
#
# 包含系数是检测两个文档用词重叠现象的很好方式;还有助于发现剪切-粘贴和改写级别的剽窃行为。因为剽窃是一项很复杂的任务,有多种不同的级别,所以通常有必要包含其他相似性特征。这篇论文还讨论了**最长公共子序列**特征。
#
# > 最长公共子序列是指维基百科原文 (S) 和学员答案文本 (A) 之间一样的最长字词(或字母)字符串。该值也会标准化,即除以学员答案文本中的总字词(字母)数量。
#
# 在这道练习中,你的任务是计算两段文本之间的最长公共字词子序列。
#
# ### 练习:计算最长公共子序列
#
# 请完成函数 `lcs_norm_word`;它应该会计算学员答案文本与维基百科原文之间的最长公共子序列。
#
# 举个具体的例子比较好理解。最长公共子序列 (LCS) 问题可能如下所示:
# * 假设有两段文本:长度为 n 的文本 A(答案)和长度为 m 的字符串 S(原文)。我们的目标是生成最长公共子序列:在两段文本中都出现过的从左到右最长公共子序列(字词不需要连续出现)。
# * 有两句话:
# * A = "i think pagerank is a link analysis algorithm used by google that uses a system of weights attached to each element of a hyperlinked set of documents"
# * S = "pagerank is a link analysis algorithm used by the google internet search engine that assigns a numerical weighting to each element of a hyperlinked set of documents"
#
# * 在此示例中,我们发现每句话的开头比较相似,有重叠的一串字词“pagerank is a link analysis algorithm used by”,然后稍微有所变化。我们**继续从左到右地在两段文本中移动**,直到看到下个公共序列;在此例中只有一个字词“google”。接着是“that”和“a”,最后是相同的结尾“to each element of a hyperlinked set of documents”。
# * 下图演示了这些序列是如何在每段文本中按顺序发现的。
#
# <img src='notebook_ims/common_subseq_words.png' width=40% />
#
# * 这些字词按顺序在每个文档中从左到右地出现,虽然中间夹杂着一些字词,我们也将其看做两段文本的最长公共子序列。
# * 统计出公共字词的数量为 20。**所以,LCS 的长度为 20**。
# * 接下来标准化该值,即除以学员答案的总长度;在此例中,长度仅为 27。**所以,函数 `lcs_norm_word` 应该返回值 `20/27` 或约 `0.7408`**。
#
# 所以,LCS 可以很好地检测剪切-粘贴剽窃行为,或检测某人是否在答案中多次参考了相同的原文。
# ### LCS,动态规划
#
# 从上面的示例可以看出,这个算法需要查看两段文本并逐词比较。你可以通过多种方式解决这个问题。首先,可以使用 `.split()` 将每段文本拆分成用逗号分隔的字词列表,以进行比较。然后,遍历文本中的每个字词,并在比较过程中使 LCS 递增。
#
# 在实现有效的 LCS 算法时,建议采用矩阵和动态规划法。**动态规划**是指将更大的问题拆分成一组更小的子问题,并创建一个完整的解决方案,不需要重复解决子问题。
#
# 这种方法假设你可以将大的 LCS 任务拆分成更小的任务并组合起来。举个简单的字母比较例子:
#
# * A = "ABCD"
# * S = "BD"
#
# 一眼就能看出最长的字母子序列是 2(B 和 D 在两个字符串中都按顺序出现了)。我们可以通过查看两个字符串 A 和 S 中每个字母之间的关系算出这个结果。
#
# 下图是一个矩阵,A 的字母位于顶部,S 的字母位于左侧:
#
# <img src='notebook_ims/matrix_1.png' width=40% />
#
# 这个矩阵的列数和行数为字符串 S 和 A 中的字母数量再加上一行和一列,并在顶部和左侧填充了 0。所以现在不是 2x4 矩阵,而是 3x5 矩阵。
#
# 下面将问题拆分成更小的 LCS 问题并填充矩阵。例如,先查看最短的子字符串:A 和 S 的起始字母。“A”和“B”这两个字母之间最长的公共子序列是什么?
#
# **答案是 0,在相应的单元格里填上 0。**
#
# <img src='notebook_ims/matrix_2.png' width=30% />
#
# 然后看看下个问题,“AB”和“B”之间的 LCS 是多少?
#
# **现在 B 和 B 匹配了,在相应的单元格中填上值 1**。
#
# <img src='notebook_ims/matrix_3_match.png' width=25% />
#
# 继续下去,最终矩阵如下所示,在右下角有个 **2**。
#
# <img src='notebook_ims/matrix_6_complete.png' width=25% />
#
# 最终的 LCS 等于值 **2** 除以 A 中的 n-gram 数量。所以标准化值为 2/4 = **0.5**。
#
# ### 矩阵规则
#
# 要注意的一点是,你可以一次一个单元格地填充该矩阵。每个网格的值仅取决于紧挨着的顶部和左侧网格中的值,或者对角线/左上角的值。规则如下所示:
# * 首先是有一个多余行和列(填充 0)的矩阵。
# * 在遍历字符串时:
# * 如果有匹配,则用左上角的值加一后填充该单元格。在此示例中,当我们发现匹配的 B-B 时,将匹配单元格左上角的值 0 加 1 后填充到该单元格。
# * 如果没有匹配,将紧挨着的左侧和上方单元格中的值之最大值填充到非匹配单元格中。
#
# <img src='notebook_ims/matrix_rules.png' width=50% />
#
# 填完矩阵后,**右下角的单元格将包含非标准化 LCS 值**。
#
# 这种矩阵方法可以应用到一组字词上,而非仅仅是字母上。你的函数应该将此矩阵应用到两个文本中的字词上,并返回标准化 LCS 值。
# In[16]:
# Compute the normalized LCS given an answer text and a source text
def lcs_norm_word(answer_text, source_text):
'''Computes the longest common subsequence of words in two texts; returns a normalized value.
:param answer_text: The pre-processed text for an answer text
:param source_text: The pre-processed text for an answer's associated source text
:return: A normalized LCS value'''
a_text = answer_text.split()
s_text = source_text.split()
lcs_matrix = np.zeros((len(s_text) + 1, len(a_text) + 1), dtype=int)
for i in range(1, len(s_text)+1):
for j in range(1, len(a_text)+1):
if s_text[i-1] == a_text[j-1]:
lcs_matrix[i][j] = lcs_matrix[i-1][j-1] + 1
else:
lcs_matrix[i][j] = max(lcs_matrix[i-1][j], lcs_matrix[i][j-1])
lcs = lcs_matrix[len(s_text)][len(a_text)]
return lcs / len(a_text)
# ### 测试单元格
#
# 首先用在开头的描述中提供的示例测试你的代码。
#
# 在以下单元格中,我们指定了字符串 A(答案文本)和 S(原文)。我们知道这两段文本有 20 个公共字词,提交的答案文本长 27,所以标准化的 LCS 应为 20/27。
#
# In[17]:
# Run the test scenario from above
# does your function return the expected value?
A = "i think pagerank is a link analysis algorithm used by google that uses a system of weights attached to each element of a hyperlinked set of documents"
S = "pagerank is a link analysis algorithm used by the google internet search engine that assigns a numerical weighting to each element of a hyperlinked set of documents"
# calculate LCS
lcs = lcs_norm_word(A, S)
print('LCS = ', lcs)
# expected value test
assert lcs==20/27., "Incorrect LCS value, expected about 0.7408, got "+str(lcs)
print('Test passed!')
# 下个单元格会运行更严格的测试。
# In[18]:
# run test cell
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# test lcs implementation
# params: complete_df from before, and lcs_norm_word function
tests.test_lcs(complete_df, lcs_norm_word)
# 最后,看看 `lcs_norm_word` 生成的几个值。与之前一样,你应该看到更高的值对应于高更级别的剽窃。
# In[19]:
# test on your own
test_indices = range(5) # look at first few files
category_vals = []
lcs_norm_vals = []
# iterate through first few docs and calculate LCS
for i in test_indices:
category_vals.append(complete_df.loc[i, 'Category'])
# get texts to compare
answer_text = complete_df.loc[i, 'Text']
task = complete_df.loc[i, 'Task']
# we know that source texts have Class = -1
orig_rows = complete_df[(complete_df['Class'] == -1)]
orig_row = orig_rows[(orig_rows['Task'] == task)]
source_text = orig_row['Text'].values[0]
# calculate lcs
lcs_val = lcs_norm_word(answer_text, source_text)
lcs_norm_vals.append(lcs_val)
# print out result, does it make sense?
print('Original category values: \n', category_vals)
print()
print('Normalized LCS values: \n', lcs_norm_vals)
# ---
# # 创建所有特征
#
# 完成了特征计算函数后,下面开始创建多个特征,并判断要在最终模型中使用哪些特征。在以下单元格中,我们提供了两个辅助函数,帮助你创建多个特征并将这些特征存储到 DataFrame `features_df` 中。
#
# ### 创建多个包含系数特征
#
# 你完成的 `calculate_containment` 函数将在下个单元格中被调用,该单元格定义了辅助函数 `create_containment_features`。
#
# > 此函数返回了一个包含系数特征列表,并根据给定的 `n` 和 df(假设为 `complete_df`)中的所有文件计算而出。
#
# 对于原始文件,包含系数值设为特殊值 -1。
#
# 你可以通过该函数轻松地针对每个文本文件创建多个包含系数特征,每个特征的 n-gram 长度都不一样。
# In[20]:
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# Function returns a list of containment features, calculated for a given n
# Should return a list of length 100 for all files in a complete_df
def create_containment_features(df, n, column_name=None):
containment_values = []
if(column_name==None):
column_name = 'c_'+str(n) # c_1, c_2, .. c_n
# iterates through dataframe rows
for i in df.index:
file = df.loc[i, 'File']
# Computes features using calculate_containment function
if df.loc[i,'Category'] > -1:
c = calculate_containment(df, n, file)
containment_values.append(c)
# Sets value to -1 for original tasks
else:
containment_values.append(-1)
print(str(n)+'-gram containment features created!')
return containment_values
# ### 创建 LCS 特征
#
# 在以下单元格中,你完成的 `lcs_norm_word` 函数将用于为给定 DataFrame 中的所有答案文件创建一个 LCS 特征列表(同样假设你传入的是 `complete_df`)。它会为原文分配特殊值 -1。
#
# In[21]:
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# Function creates lcs feature and add it to the dataframe
def create_lcs_features(df, column_name='lcs_word'):
lcs_values = []
# iterate through files in dataframe
for i in df.index:
# Computes LCS_norm words feature using function above for answer tasks
if df.loc[i,'Category'] > -1:
# get texts to compare
answer_text = df.loc[i, 'Text']
task = df.loc[i, 'Task']
# we know that source texts have Class = -1
orig_rows = df[(df['Class'] == -1)]
orig_row = orig_rows[(orig_rows['Task'] == task)]
source_text = orig_row['Text'].values[0]
# calculate lcs
lcs = lcs_norm_word(answer_text, source_text)
lcs_values.append(lcs)
# Sets to -1 for original tasks
else:
lcs_values.append(-1)
print('LCS features created!')
return lcs_values
# ## 练习:通过选择 `ngram_range` 创建特征 DataFrame
#
# 论文建议计算以下特征:*1-gram 到 5-gram* 包含系数和*最长公共子序列*。
# > 在这道练习中,你可以选择创建更多的特征,例如 *1-gram 到 7-gram* 包含系数特征和*最长公共子序列*。
#
# 你需要创建至少 6 个特征,并从中选择一些特征添加到最终的分类模型中。定义和比较至少 6 个不同的特征使你能够丢弃似乎多余的任何特征,并选择用在最终模型中的最佳特征。
#
# 在以下单元格中,请**定义 n-gram 范围**;你将使用这些 n 创建 n-gram 包含系数特征。我们提供了剩余的特征创建代码。
# In[22]:
# Define an ngram range
ngram_range = range(1,7)
# The following code may take a minute to run, depending on your ngram_range
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
features_list = []
# Create features in a features_df
all_features = np.zeros((len(ngram_range)+1, len(complete_df)))
# Calculate features for containment for ngrams in range
i=0
for n in ngram_range:
column_name = 'c_'+str(n)
features_list.append(column_name)
# create containment features
all_features[i]=np.squeeze(create_containment_features(complete_df, n))
i+=1
# Calculate features for LCS_Norm Words
features_list.append('lcs_word')
all_features[i]= np.squeeze(create_lcs_features(complete_df))
# create a features dataframe
features_df = pd.DataFrame(np.transpose(all_features), columns=features_list)
# Print all features/columns
print()
print('Features: ', features_list)
print()
# In[23]:
# print some results
features_df.head(10)
# ## 相关特征
#
# 你应该检查整个数据集的特征相关性,判断哪些特征**过于高度相似**,没必要都包含在一个模型中。在分析过程中,你可以使用整个数据集,因为我们的样本量很小。
#
# 所有特征都尝试衡量两段文本之间的相似性。因为特征都是为了衡量相似性,所以这些特征可能会高度相关。很多分类模型(例如朴素贝叶斯分类器)都要求特征不高度相关;高度相关的特征可能会过于突出单个特征的重要性。
#
# 所以你在选择特征时,需要选择相关性低的几个特征。相关系数值的范围从 0 到 1,表示从低到高,如以下[相关性矩阵](https://www.displayr.com/what-is-a-correlation-matrix/)所示。
# In[24]:
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# Create correlation matrix for just Features to determine different models to test
corr_matrix = features_df.corr().abs().round(2)
# display shows all of a dataframe
display(corr_matrix)
# ## 练习:创建选择的训练/测试数据
#
# 请在下方完成函数 `train_test_data`。此函数的参数应该包括:
# * `complete_df`:一个 DataFrame,其中包含所有处理过的文本数据、文件信息、数据类型和类别标签
# * `features_df`:一个 DataFrame,其中包含所有计算的特征,例如 ngram (n= 1-5) 的包含系数,以及 `complete_df`(在之前的单元格中创建的 df)中列出的每个文本文件的 LCS 值。
# * `selected_features`:一个特征列名称列表,例如 `['c_1', 'lcs_word']`,将用于在创建训练/测试数据集时选择最终特征。
#
# 它应该返回两个元组:
# * `(train_x, train_y)`,所选的训练特征及其对应的类别标签 (0/1)
# * `(test_x, test_y)`,所选的测试特征及其对应的类别标签 (0/1)
#
# ** 注意:x 和 y 应该分别是特征值和数值类别标签数组,不是 DataFrame。**
#
# 在看了上述相关性矩阵后,你应该设置一个小于 1.0 的相关性**边界**值,判断哪些特征过于高度相关,不适合包含在最终训练和测试数据中。如果你找不到相关性比边界值更低的特征,建议增加特征数量(更长的 n-gram)并从中选择特征,或者在最终模型中仅使用一两个特征,避免引入高度相关的特征。
#
# `complete_df` 有一个 `Datatype` 列,表示数据应该为 `train` 或 `test` 数据;它可以帮助你相应地拆分数据。
# In[25]:
# Takes in dataframes and a list of selected features (column names)
# and returns (train_x, train_y), (test_x, test_y)
def train_test_data(complete_df, features_df, selected_features):
'''Gets selected training and test features from given dataframes, and
returns tuples for training and test features and their corresponding class labels.
:param complete_df: A dataframe with all of our processed text data, datatypes, and labels
:param features_df: A dataframe of all computed, similarity features
:param selected_features: An array of selected features that correspond to certain columns in `features_df`
:return: training and test features and labels: (train_x, train_y), (test_x, test_y)'''
df = | pd.concat([complete_df, features_df[selected_features]], axis=1) | pandas.concat |
"""Base Constraint class."""
import copy
import importlib
import inspect
import logging
import pandas as pd
LOGGER = logging.getLogger(__name__)
def get_qualified_name(_object):
"""Return the Fully Qualified Name from an instance or class."""
module = _object.__module__
if hasattr(_object, '__name__'):
_class = _object.__name__
else:
_class = _object.__class__.__name__
return module + '.' + _class
def import_object(obj):
"""Import an object from its qualified name."""
if isinstance(obj, str):
package, name = obj.rsplit('.', 1)
return getattr(importlib.import_module(package), name)
return obj
class ConstraintMeta(type):
"""Metaclass for Constraints.
This metaclass replaces the ``__init__`` method with a new function
that stores the arguments passed to the __init__ method in a dict
as the attribute ``__kwargs__``.
This allows us to later on dump the class definition as a dict.
"""
def __init__(self, name, bases, attr):
super().__init__(name, bases, attr)
old__init__ = self.__init__
signature = inspect.signature(old__init__)
arg_names = list(signature.parameters.keys())[1:]
def __init__(self, *args, **kwargs):
class_name = self.__class__.__name__
if name == class_name:
self.__kwargs__ = copy.deepcopy(kwargs)
self.__kwargs__.update(dict(zip(arg_names, args)))
old__init__(self, *args, **kwargs)
__init__.__doc__ = old__init__.__doc__
__init__.__signature__ = signature
self.__init__ = __init__
class Constraint(metaclass=ConstraintMeta):
"""Constraint base class."""
_handling_strategy = 'all'
def _identity(self, table_data):
return table_data
def __init__(self):
if self._handling_strategy == 'transform':
self.filter_valid = self._identity
elif self._handling_strategy == 'reject_sampling':
self.transform = self._identity
self.reverse_transform = self._identity
def fit(self, table_data):
"""No-op method."""
pass
def transform(self, table_data):
"""Identity method for completion. To be optionally overwritten by subclasses.
Args:
table_data (pandas.DataFrame):
Table data.
Returns:
pandas.DataFrame:
Input data unmodified.
"""
return table_data
def fit_transform(self, table_data):
"""Fit this Constraint to the data and then transform it.
Args:
table_data (pandas.DataFrame):
Table data.
Returns:
pandas.DataFrame:
Transformed data.
"""
self.fit(table_data)
return self.transform(table_data)
def reverse_transform(self, table_data):
"""Identity method for completion. To be optionally overwritten by subclasses.
Args:
table_data (pandas.DataFrame):
Table data.
Returns:
pandas.DataFrame:
Input data unmodified.
"""
return table_data
def is_valid(self, table_data):
"""Say whether the given table rows are valid.
Args:
table_data (pandas.DataFrame):
Table data.
Returns:
pandas.Series:
Series of ``True`` values
"""
return | pd.Series(True, index=table_data.index) | pandas.Series |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
from utils import linear_regression
def remove_outliers(time_df, q):
"""
New: Remove values outside q to (100-q) quantiles.
Old: Only keep cases where the time taken is less than n times as long for one language compared to the other.
Also drop cases where the difference in days is larger than 8*n. (20 days for n=2.5)
"""
# old version
# # remove outliers based on ratio and minimum difference
# time_df = time_df.loc[(time_df["DAYS FRENCH"] / time_df["DAYS SPANISH"]) < n]
# time_df = time_df.loc[(time_df["DAYS SPANISH"] / time_df["DAYS FRENCH"]) < n]
# time_df = time_df.loc[abs(time_df["DAYS FRENCH"] - time_df["DAYS SPANISH"]) < 8*n]
# remove outliers based on quantiles
perday_ratio = time_df['DAYS FRENCH'] / time_df['DAYS SPANISH']
uq = perday_ratio.quantile(q=(100-q)/100)
lq = perday_ratio.quantile(q=q/100)
perday_ratio = perday_ratio.loc[perday_ratio < uq]
perday_ratio = perday_ratio.loc[perday_ratio > lq]
time_df = time_df.loc[perday_ratio.index]
return time_df
features = pd.read_csv("data/wto/biber_wto.dat", sep='\t')
tr = pd.read_csv("data/wto/wto_timed.csv")
df = pd.concat([features, tr], axis=1)
df = remove_outliers(df, q=10)
y_fr = df["PERDAY FRENCH"]
y_sp = df["PERDAY SPANISH"]
X = df.iloc[:, :-7]
"""
French only
"""
y = y_fr
ols, scaler, X_test, y_test = linear_regression(X, y, test_size=0.1, random_state=210, plots=False)
X_test_s = scaler.transform(X_test)
y_pred = ols.predict(X_test_s)
ols_residuals = y_test - y_pred
#print("Coefficients: \n{}".format(ols.coef_))
print("French only")
print(ols.score(X_test_s, y_test))
plt.figure()
plt.scatter(y_test, y_pred)
plt.plot(range(100, 5000), range(100, 5000), 'k-')
plt.xlabel('True values')
plt.ylabel('Predicted values')
plt.title('OLS - French')
"""
Spanish only
"""
y = y_sp
ols, scaler, X_test, y_test = linear_regression(X, y, test_size=0.1, random_state=210, plots=False)
X_test_s = scaler.transform(X_test)
y_pred = ols.predict(X_test_s)
ols_residuals = y_test - y_pred
#print("Coefficients: \n{}".format(ols.coef_))
print("\nSpanish only")
print(ols.score(X_test_s, y_test))
plt.figure()
plt.scatter(y_test, y_pred)
plt.plot(range(100, 5000), range(100, 5000), 'k-')
plt.xlabel('True values')
plt.ylabel('Predicted values')
plt.title('OLS - Spanish')
# #Residuals
# plt.figure()
# plt.plot(y_test, ols_residuals, '.')
# plt.xlabel("Real value")
# plt.ylabel("Residual")
# plt.title("OLS Residuals - Spanish")
# plt.show()
"""
Train with French, test with Spanish
"""
X_train = X
X_test = X
y_train = y_fr
y_test = y_sp
scaler = preprocessing.StandardScaler().fit(X_train)
X_train_s = scaler.transform(X_train)
X_test_s = scaler.transform(X_test)
ols = linear_model.LinearRegression()
ols.fit(X_train_s, y_train)
y_pred = ols.predict(X_test_s)
ols_residuals = y_test - y_pred
#print("Coefficients: \n{}".format(ols.coef_))
print("\nTrain with French, test with Spanish")
print(ols.score(X_test_s, y_test))
plt.figure()
plt.scatter(y_test, y_pred)
plt.plot(range(100, 5000), range(100, 5000), 'k-')
plt.plot(np.unique(y_test), np.poly1d(np.polyfit(y_test, y_pred, 1))(np.unique(y_test)), 'r--')
plt.xlabel('True values')
plt.ylabel('Predicted values')
plt.title('OLS - Train French, test Spanish')
#plt.show()
"""
Train with French, test with Spanish
"""
X_train = X
X_test = X
y_train = y_sp
y_test = y_fr
scaler = preprocessing.StandardScaler().fit(X_train)
X_train_s = scaler.transform(X_train)
X_test_s = scaler.transform(X_test)
ols = linear_model.LinearRegression()
ols.fit(X_train_s, y_train)
y_pred = ols.predict(X_test_s)
ols_residuals = y_test - y_pred
#print("Coefficients: \n{}".format(ols.coef_))
print("\nTrain with Spanish, test with French")
print(ols.score(X_test_s, y_test))
plt.figure()
plt.scatter(y_test, y_pred)
plt.plot(range(100, 5000), range(100, 5000), 'k-')
plt.plot(np.unique(y_test), np.poly1d(np.polyfit(y_test, y_pred, 1))(np.unique(y_test)), 'r--')
plt.xlabel('True values')
plt.ylabel('Predicted values')
plt.title('OLS - Train Spanish, test French')
#plt.show()
"""
Combine French and Spanish
"""
X_combined = pd.concat([X, X], axis=0, ignore_index=True)
y_combined = | pd.concat([y_fr, y_sp], axis=0, ignore_index=True) | pandas.concat |
import os
import numpy as np
import numpy.random
import pandas as pd
from data_algebra.data_ops import *
import data_algebra.SQLite
import data_algebra.test_util
import vtreat
from vtreat.vtreat_db_adapter import as_data_algebra_pipeline
def test_db_adapter_1_cdata():
# Example from:
# https://github.com/WinVector/pyvtreat/blob/main/Examples/Database/vtreat_db_adapter.ipynb
# Data from:
# https://archive.ics.uci.edu/ml/datasets/Diabetes+130-US+hospitals+for+years+1999-2008
# data_all = pd.read_csv("diabetes_head.csv")
dir_path = os.path.dirname(os.path.realpath(__file__))
data_all = pd.read_csv(os.path.join(dir_path, "diabetes_head.csv"))
n = data_all.shape[0]
data_all["orig_index"] = range(n)
d_train = data_all.loc[range(n - 5), :].reset_index(inplace=False, drop=True)
d_app = data_all.loc[range(n - 5, n)].reset_index(inplace=False, drop=True)
#%%
outcome_name = "readmitted"
cols_to_copy = ["orig_index", "encounter_id", "patient_nbr"] + [outcome_name]
vars = ["time_in_hospital", "weight"]
columns = vars + cols_to_copy
# d_train.loc[:, columns]
#%%
treatment = vtreat.BinomialOutcomeTreatment(
cols_to_copy=cols_to_copy,
outcome_name=outcome_name,
outcome_target=True,
params=vtreat.vtreat_parameters(
{"sparse_indicators": False, "filter_to_recommended": False,}
),
)
d_train_treated = treatment.fit_transform(d_train.loc[:, columns])
d_app_treated = treatment.transform(d_app.loc[:, columns])
# d_app_treated
#%%
transform_as_data = treatment.description_matrix()
# transform_as_data
#%%
ops = as_data_algebra_pipeline(
source=descr(d_app=d_app.loc[:, columns]),
vtreat_descr=transform_as_data,
treatment_table_name="transform_as_data",
row_keys=['orig_index'],
)
# print(ops)
#%%
transformed = ops.eval(
{"d_app": d_app.loc[:, columns], "transform_as_data": transform_as_data}
)
# transformed
#%%
assert data_algebra.test_util.equivalent_frames(transformed, d_app_treated)
#%%
db_handle = data_algebra.SQLite.example_handle()
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
# print(sql)
#%%
db_handle.insert_table(d_app.loc[:, columns], table_name="d_app")
db_handle.insert_table(transform_as_data, table_name="transform_as_data")
db_handle.execute("CREATE TABLE res AS " + sql)
res_db = db_handle.read_query("SELECT * FROM res ORDER BY orig_index LIMIT 10")
# res_db
#%%
assert data_algebra.test_util.equivalent_frames(res_db, d_app_treated)
#%%
db_handle.close()
def test_db_adapter_general():
# set up example data
def mk_data(
n_rows: int = 100,
*,
outcome_name: str = "y",
n_cat_vars: int = 5,
n_num_vars: int = 5,
add_unknowns: bool = False,
):
step = 1 / np.sqrt(n_cat_vars + n_num_vars)
cols = dict()
y = np.random.normal(size=n_rows)
for i in range(n_cat_vars):
vname = f"vc_{i}"
levels = ["a", "b", "c", "none"]
if add_unknowns:
levels = levels + ["d"]
level_values = {v: step * np.random.normal(size=1)[0] for v in levels}
v = np.random.choice(levels, replace=True, size=n_rows)
y = y + np.array([level_values[vi] for vi in v])
v = np.array([vi if vi != "none" else None for vi in v])
cols[vname] = v
for i in range(n_num_vars):
vname = f"vn_{i}"
v = np.random.normal(size=n_rows)
y = y + step * v
v[np.random.uniform(size=n_rows) < 0.24] = None
cols[vname] = v
vars = list(cols.keys())
vars.sort()
cols[outcome_name] = y
d = pd.DataFrame(cols)
d["orig_index"] = range(d.shape[0])
return d, outcome_name, vars
d, outcome_name, vars = mk_data(100)
d_app, _, _ = mk_data(50, add_unknowns=True)
cols_to_copy = [outcome_name, "orig_index"]
columns = vars + cols_to_copy
# get reference result
treatment = vtreat.NumericOutcomeTreatment(
cols_to_copy=cols_to_copy,
outcome_name=outcome_name,
params=vtreat.vtreat_parameters(
{"sparse_indicators": False, "filter_to_recommended": False,}
),
)
d_train_treated = treatment.fit_transform(d)
assert isinstance(d_train_treated, pd.DataFrame)
d_app_treated = treatment.transform(d_app)
# test ops path
transform_as_data = treatment.description_matrix()
ops = as_data_algebra_pipeline(
source=descr(d_app=d),
vtreat_descr=transform_as_data,
treatment_table_name="transform_as_data",
row_keys=["orig_index"],
)
ops_source = str(ops)
assert isinstance(ops_source, str)
d_app_res = ops.eval({"d_app": d_app, "transform_as_data": transform_as_data})
assert data_algebra.test_util.equivalent_frames(d_app_treated, d_app_res)
# test ops db path
source_descr = TableDescription(table_name="d_app", column_names=columns,)
db_handle = data_algebra.SQLite.example_handle()
db_handle.insert_table(d_app.loc[:, columns], table_name="d_app")
db_handle.insert_table(transform_as_data, table_name="transform_as_data")
db_handle.execute("CREATE TABLE res AS " + db_handle.to_sql(ops))
res_db = db_handle.read_query("SELECT * FROM res ORDER BY orig_index")
assert data_algebra.test_util.equivalent_frames(res_db, d_app_treated)
db_handle.close()
def test_db_adapter_monster():
outcome_name = "y"
row_id_name = 'row_id'
n_vars = 5
def mk_data(n_rows: int = 100):
step = 1 / np.sqrt(n_vars)
cols = dict()
y = np.random.normal(size=n_rows)
for i in range(n_vars):
vname = f"v_{i}"
v = np.random.choice(["a", "b"], replace=True, size=n_rows)
y = y + np.where(v == "a", step, -step)
cols[vname] = v
vars = list(cols.keys())
vars.sort()
cols[outcome_name] = y
cols[row_id_name] = range(n_rows)
d = | pd.DataFrame(cols) | pandas.DataFrame |
from os import listdir
from os.path import isfile, join
from typing import Dict, List, Tuple
import pandas as pd
episodes = 1000
letters = ['A', 'B', 'C', 'D', 'E']
get_min = {
'A': 4,
'B': 5,
'C': 5,
'D': 6,
'E': 6,
}
def sort_files_by_depth(files: List) -> Tuple[List, List, List, List, List]:
"""
Sorts files by depth in separate list
"""
depth_0 = []
depth_1 = []
depth_2 = []
depth_3 = []
depth_4 = []
for f in files:
d0 = f.find('depth=')
d = f[d0 + 6: d0 + 7] # extracts depth from file name
if d == '0':
depth_0.append(f)
elif d == '1':
depth_1.append(f)
elif d == '2':
depth_2.append(f)
elif d == '3':
depth_3.append(f)
else:
depth_4.append(f)
return depth_0, depth_1, depth_2, depth_3, depth_4
def read_files(files: List) -> Dict:
"""
Reads list of file names into dictionary
:param files: list of file names
:return: Dict where key is a file name and
value is a pandas dataframe
"""
dfs = {}
for f in files:
# print('Reading', f)
df = pd.read_csv(f)
dfs[f] = df
return dfs
def get_file_dictionaries(d0, d1, d2, d3, d4) -> Tuple[Dict, Dict, Dict, Dict,
Dict]:
"""
Reads files into dictionaries
"""
dfs_0 = read_files(d0)
dfs_1 = read_files(d1)
dfs_2 = read_files(d2)
dfs_3 = read_files(d3)
dfs_4 = read_files(d4)
return dfs_0, dfs_1, dfs_2, dfs_3, dfs_4
def combine_results(dfs: Dict) -> pd.DataFrame:
"""
Combines all DataFrames in dictionary into
one pd.DataFrame
"""
frame = | pd.DataFrame() | pandas.DataFrame |
#%%
import os
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
#%%
import sys
sys.path.append("/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/")
import pandas as pd
import numpy as np
import connectome_tools.process_matrix as promat
import math
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import pymaid
from pymaid_creds import url, name, password, token
# convert pair-sorted brain/sensories matrix to binary matrix based on synapse threshold
matrix_ad = pd.read_csv('data/axon-dendrite.csv', header=0, index_col=0)
matrix_dd = | pd.read_csv('data/dendrite-dendrite.csv', header=0, index_col=0) | pandas.read_csv |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
from pandas.api.types import is_scalar
from pandas.util._validators import validate_bool_kwarg
from pandas.core.index import _ensure_index_from_sequences
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_upcast_putmask
from pandas import compat
from pandas.compat import lzip, string_types, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
is_bool_dtype,
is_list_like,
is_numeric_dtype,
is_timedelta64_dtype)
from pandas.core.indexing import check_bool_indexer
import warnings
import numpy as np
import ray
import itertools
import io
import sys
import re
from .groupby import DataFrameGroupBy
from .utils import (
_deploy_func,
_map_partitions,
_partition_pandas_dataframe,
to_pandas,
_blocks_to_col,
_blocks_to_row,
_create_block_partitions,
_inherit_docstrings,
_reindex_helper,
_co_op_helper)
from . import get_npartitions
from .index_metadata import _IndexMetadata
@_inherit_docstrings(pd.DataFrame)
class DataFrame(object):
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False, col_partitions=None, row_partitions=None,
block_partitions=None, row_metadata=None, col_metadata=None):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index or list): The row index for this dataframe.
columns (pandas.Index): The column names for this dataframe, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input
col_partitions ([ObjectID]): The list of ObjectIDs that contain
the column dataframe partitions.
row_partitions ([ObjectID]): The list of ObjectIDs that contain the
row dataframe partitions.
block_partitions: A 2D numpy array of block partitions.
row_metadata (_IndexMetadata):
Metadata for the new dataframe's rows
col_metadata (_IndexMetadata):
Metadata for the new dataframe's columns
"""
self._row_metadata = self._col_metadata = None
# Check type of data and use appropriate constructor
if data is not None or (col_partitions is None and
row_partitions is None and
block_partitions is None):
pd_df = pd.DataFrame(data=data, index=index, columns=columns,
dtype=dtype, copy=copy)
# TODO convert _partition_pandas_dataframe to block partitioning.
row_partitions = \
_partition_pandas_dataframe(pd_df,
num_partitions=get_npartitions())
self._block_partitions = \
_create_block_partitions(row_partitions, axis=0,
length=len(pd_df.columns))
# Set in case we were only given a single row/column for below.
axis = 0
columns = pd_df.columns
index = pd_df.index
else:
# created this invariant to make sure we never have to go into the
# partitions to get the columns
assert columns is not None, \
"Columns not defined, must define columns for internal " \
"DataFrame creations"
if block_partitions is not None:
# put in numpy array here to make accesses easier since it's 2D
self._block_partitions = np.array(block_partitions)
axis = 0
else:
if row_partitions is not None:
axis = 0
partitions = row_partitions
elif col_partitions is not None:
axis = 1
partitions = col_partitions
self._block_partitions = \
_create_block_partitions(partitions, axis=axis,
length=len(columns))
if row_metadata is not None:
self._row_metadata = row_metadata.copy()
if col_metadata is not None:
self._col_metadata = col_metadata.copy()
# Sometimes we only get a single column or row, which is
# problematic for building blocks from the partitions, so we
# add whatever dimension we're missing from the input.
if self._block_partitions.ndim < 2:
self._block_partitions = np.expand_dims(self._block_partitions,
axis=axis ^ 1)
assert self._block_partitions.ndim == 2, "Block Partitions must be 2D."
# Create the row and column index objects for using our partitioning.
# If the objects haven't been inherited, then generate them
if self._row_metadata is None:
self._row_metadata = _IndexMetadata(self._block_partitions[:, 0],
index=index, axis=0)
if self._col_metadata is None:
self._col_metadata = _IndexMetadata(self._block_partitions[0, :],
index=columns, axis=1)
def _get_row_partitions(self):
return [_blocks_to_row.remote(*part)
for part in self._block_partitions]
def _set_row_partitions(self, new_row_partitions):
self._block_partitions = \
_create_block_partitions(new_row_partitions, axis=0,
length=len(self.columns))
_row_partitions = property(_get_row_partitions, _set_row_partitions)
def _get_col_partitions(self):
return [_blocks_to_col.remote(*self._block_partitions[:, i])
for i in range(self._block_partitions.shape[1])]
def _set_col_partitions(self, new_col_partitions):
self._block_partitions = \
_create_block_partitions(new_col_partitions, axis=1,
length=len(self.index))
_col_partitions = property(_get_col_partitions, _set_col_partitions)
def __str__(self):
return repr(self)
def _repr_helper_(self):
if len(self._row_metadata) <= 60 and \
len(self._col_metadata) <= 20:
return to_pandas(self)
def head(df, n, get_local_head=False):
"""Compute the head for this without creating a new DataFrame"""
if get_local_head:
return df.head(n)
new_dfs = _map_partitions(lambda df: df.head(n),
df)
index = self.index[:n]
pd_head = pd.concat(ray.get(new_dfs), axis=1, copy=False)
pd_head.index = index
pd_head.columns = self.columns
return pd_head
def tail(df, n, get_local_tail=False):
"""Compute the tail for this without creating a new DataFrame"""
if get_local_tail:
return df.tail(n)
new_dfs = _map_partitions(lambda df: df.tail(n),
df)
index = self.index[-n:]
pd_tail = pd.concat(ray.get(new_dfs), axis=1, copy=False)
pd_tail.index = index
pd_tail.columns = self.columns
return pd_tail
def front(df, n):
"""Get first n columns without creating a new Dataframe"""
cum_col_lengths = self._col_metadata._lengths.cumsum()
index = np.argmax(cum_col_lengths >= 10)
pd_front = pd.concat(ray.get(x[:index+1]), axis=1, copy=False)
pd_front = pd_front.iloc[:, :n]
pd_front.index = self.index
pd_front.columns = self.columns[:n]
return pd_front
def back(df, n):
"""Get last n columns without creating a new Dataframe"""
cum_col_lengths = np.flip(self._col_metadata._lengths,
axis=0).cumsum()
index = np.argmax(cum_col_lengths >= 10)
pd_back = pd.concat(ray.get(x[-(index+1):]), axis=1, copy=False)
pd_back = pd_back.iloc[:, -n:]
pd_back.index = self.index
pd_back.columns = self.columns[-n:]
return pd_back
x = self._col_partitions
get_local_head = False
# Get first and last 10 columns if there are more than 20 columns
if len(self._col_metadata) >= 20:
get_local_head = True
front = front(x, 10)
back = back(x, 10)
col_dots = pd.Series(["..."
for _ in range(len(self.index))])
col_dots.index = self.index
col_dots.name = "..."
x = pd.concat([front, col_dots, back], axis=1)
# If less than 60 rows, x is already in the correct format.
if len(self._row_metadata) < 60:
return x
head = head(x, 30, get_local_head)
tail = tail(x, 30, get_local_head)
# Make the dots in between the head and tail
row_dots = pd.Series(["..."
for _ in range(len(head.columns))])
row_dots.index = head.columns
row_dots.name = "..."
# We have to do it this way or convert dots to a dataframe and
# transpose. This seems better.
result = head.append(row_dots).append(tail)
return result
def __repr__(self):
# We use pandas repr so that we match them.
if len(self._row_metadata) <= 60 and \
len(self._col_metadata) <= 20:
return repr(self._repr_helper_())
# The split here is so that we don't repr pandas row lengths.
result = self._repr_helper_()
final_result = repr(result).rsplit("\n\n", maxsplit=1)[0] + \
"\n\n[{0} rows x {1} columns]".format(len(self.index),
len(self.columns))
return final_result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
if len(self._row_metadata) <= 60 and \
len(self._col_metadata) <= 20:
return self._repr_helper_()._repr_html_()
# We split so that we insert our correct dataframe dimensions.
result = self._repr_helper_()._repr_html_()
return result.split("<p>")[0] + \
"<p>{0} rows x {1} columns</p>\n</div>".format(len(self.index),
len(self.columns))
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._row_metadata.index
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._row_metadata.index = new_index
index = property(_get_index, _set_index)
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._col_metadata.index
def _set_columns(self, new_index):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._col_metadata.index = new_index
columns = property(_get_columns, _set_columns)
def _arithmetic_helper(self, remote_func, axis, level=None):
# TODO: We don't support `level` right now
if level is not None:
raise NotImplementedError("Level not yet supported.")
axis = pd.DataFrame()._get_axis_number(axis) if axis is not None \
else 0
oid_series = ray.get(_map_partitions(remote_func,
self._col_partitions if axis == 0
else self._row_partitions))
if axis == 0:
# We use the index to get the internal index.
oid_series = [(oid_series[i], i) for i in range(len(oid_series))]
if len(oid_series) > 1:
for df, partition in oid_series:
this_partition = \
self._col_metadata.partition_series(partition)
df.index = \
this_partition[this_partition.isin(df.index)].index
result_series = pd.concat([obj[0] for obj in oid_series],
axis=0, copy=False)
else:
result_series = pd.concat(oid_series, axis=0, copy=False)
result_series.index = self.index
return result_series
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is '':
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and '@' in expr:
raise NotImplementedError("Local variables not yet supported in "
"eval.")
if isinstance(expr, str) and 'not' in expr:
if 'parser' in kwargs and kwargs['parser'] == 'python':
raise NotImplementedError("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# The number of dimensions is common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ndim,
self._row_partitions[0]))
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
result = ray.get(_deploy_func.remote(lambda df: df.ftypes,
self._row_partitions[0]))
result.index = self.columns
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
# The dtypes are common across all partitions.
# The first partition will be enough.
result = ray.get(_deploy_func.remote(lambda df: df.dtypes,
self._row_partitions[0]))
result.index = self.columns
return result
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
all_empty = ray.get(_map_partitions(
lambda df: df.empty, self._row_partitions))
return False not in all_empty
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return np.concatenate(ray.get(_map_partitions(
lambda df: df.values, self._row_partitions)))
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, row_partitions=None, col_partitions=None,
block_partitions=None, columns=None, index=None,
col_metadata=None, row_metadata=None):
"""Updates the current DataFrame inplace.
Behavior should be similar to the constructor, given the corresponding
arguments. Note that len(columns) and len(index) should match the
corresponding dimensions in the partition(s) passed in, otherwise this
function will complain.
Args:
row_partitions ([ObjectID]):
The new partitions to replace self._row_partitions directly
col_partitions ([ObjectID]):
The new partitions to replace self._col_partitions directly
columns (pd.Index):
Index of the column dimension to replace existing columns
index (pd.Index):
Index of the row dimension to replace existing index
Note:
If `columns` or `index` are not supplied, they will revert to
default columns or index respectively, as this function does
not have enough contextual info to rebuild the indexes
correctly based on the addition/subtraction of rows/columns.
"""
assert row_partitions is not None or col_partitions is not None\
or block_partitions is not None, \
"To update inplace, new column or row partitions must be set."
if block_partitions is not None:
self._block_partitions = block_partitions
elif row_partitions is not None:
self._row_partitions = row_partitions
elif col_partitions is not None:
self._col_partitions = col_partitions
if col_metadata is not None:
self._col_metadata = col_metadata
else:
assert columns is not None, \
"Columns must be passed without col_metadata"
self._col_metadata = _IndexMetadata(
self._block_partitions[0, :], index=columns, axis=1)
if row_metadata is not None:
self._row_metadata = row_metadata
else:
# Index can be None for default index, so we don't check
self._row_metadata = _IndexMetadata(
self._block_partitions[:, 0], index=index, axis=0)
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(prefix) + str(x))
return DataFrame(block_partitions=self._block_partitions,
columns=new_cols,
index=self.index)
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(x) + str(suffix))
return DataFrame(block_partitions=self._block_partitions,
columns=new_cols,
index=self.index)
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError(
"\'{0}\' object is not callable".format(type(func)))
new_block_partitions = np.array([
_map_partitions(lambda df: df.applymap(func), block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(block_partitions=self._block_partitions,
columns=self.columns,
index=self.index)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pd.DataFrame()._get_axis_number(axis)
if callable(by):
by = by(self.index)
elif isinstance(by, compat.string_types):
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
mismatch = len(by) != len(self) if axis == 0 \
else len(by) != len(self.columns)
if all([obj in self for obj in by]) and mismatch:
raise NotImplementedError(
"Groupby with lists of columns not yet supported.")
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
return DataFrameGroupBy(self, by, axis, level, as_index, sort,
group_keys, squeeze, **kwargs)
def sum(self, axis=None, skipna=True, level=None, numeric_only=None):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
def remote_func(df):
return df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only)
return self._arithmetic_helper(remote_func, axis, level)
def abs(self):
"""Apply an absolute value function to all numberic columns.
Returns:
A new DataFrame with the applied absolute value.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
new_block_partitions = np.array([_map_partitions(lambda df: df.abs(),
block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.isin(values), block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.isna(), block) for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index,
row_metadata=self._row_metadata,
col_metadata=self._col_metadata)
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.isnull(), block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
# Each partition should have the same index, so we'll use 0's
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.T, block) for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions.T,
columns=self.index,
index=self.columns)
T = property(transpose)
def dropna(self, axis, how, thresh=None, subset=[], inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
raise NotImplementedError("Not yet")
def add(self, other, axis='columns', level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
return self._operator_helper(pd.DataFrame.add, other, axis, level,
fill_value)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pd.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop('is_transform', None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
elif is_list_like(arg):
from .concat import concat
x = [self._aggregate(func, *args, **kwargs)
for func in arg]
new_dfs = [x[i] if not isinstance(x[i], pd.Series)
else pd.DataFrame(x[i], columns=[arg[i]]).T
for i in range(len(x))]
return concat(new_dfs)
elif callable(arg):
self._callable_function(arg, _axis, *args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, compat.string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert len([kwarg
for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, func, None)
if f is not None:
raise NotImplementedError("Numpy aggregates not yet supported.")
raise ValueError("{} is an unknown string function".format(func))
def _callable_function(self, func, axis, *args, **kwargs):
if axis == 0:
partitions = self._col_partitions
else:
partitions = self._row_partitions
if axis == 1:
kwargs['axis'] = axis
kwargs['temp_columns'] = self.columns
else:
kwargs['temp_index'] = self.index
def agg_helper(df, arg, *args, **kwargs):
if 'temp_index' in kwargs:
df.index = kwargs.pop('temp_index', None)
else:
df.columns = kwargs.pop('temp_columns', None)
is_transform = kwargs.pop('is_transform', False)
new_df = df.agg(arg, *args, **kwargs)
is_series = False
if isinstance(new_df, pd.Series):
is_series = True
index = None
columns = None
else:
index = new_df.index \
if not isinstance(new_df.index, pd.RangeIndex) \
else None
columns = new_df.columns
new_df.columns = pd.RangeIndex(0, len(new_df.columns))
new_df.reset_index(drop=True, inplace=True)
if is_transform:
if is_scalar(new_df) or len(new_df) != len(df):
raise ValueError("transforms cannot produce "
"aggregated results")
return is_series, new_df, index, columns
remote_result = \
[_deploy_func._submit(args=(lambda df: agg_helper(df,
func,
*args,
**kwargs),
part), num_return_vals=4)
for part in partitions]
# This magic transposes the list comprehension returned from remote
is_series, new_parts, index, columns = \
[list(t) for t in zip(*remote_result)]
# This part is because agg can allow returning a Series or a
# DataFrame, and we have to determine which here. Shouldn't add
# too much to latency in either case because the booleans can
# be returned immediately
is_series = ray.get(is_series)
if all(is_series):
new_series = pd.concat(ray.get(new_parts))
new_series.index = self.columns if axis == 0 else self.index
return new_series
# This error is thrown when some of the partitions return Series and
# others return DataFrames. We do not allow mixed returns.
elif any(is_series):
raise ValueError("no results.")
# The remaining logic executes when we have only DataFrames in the
# remote objects. We build a Ray DataFrame from the Pandas partitions.
elif axis == 0:
new_index = ray.get(index[0])
columns = ray.get(columns)
columns = columns[0].append(columns[1:])
return DataFrame(col_partitions=new_parts,
columns=columns,
index=self.index if new_index is None
else new_index)
else:
new_index = ray.get(index[0])
columns = ray.get(columns)
columns = columns[0].append(columns[1:])
return DataFrame(row_partitions=new_parts,
columns=columns,
index=self.index if new_index is None
else new_index)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def all(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
def remote_func(df):
return df.all(axis=axis, bool_only=bool_only, skipna=skipna,
level=level, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def any(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
def remote_func(df):
return df.any(axis=axis, bool_only=bool_only, skipna=skipna,
level=level, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def append(self, other, ignore_index=False, verify_integrity=False):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pd.Series, dict)):
if isinstance(other, dict):
other = pd.Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = pd.Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = pd.DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = pd.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from .concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = pd.DataFrame()._get_axis_number(axis)
if is_list_like(func) and not all([isinstance(obj, str)
for obj in func]):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
if axis == 0 and is_list_like(func):
return self.aggregate(func, axis, *args, **kwds)
if isinstance(func, compat.string_types):
if axis == 1:
kwds['axis'] = axis
return getattr(self, func)(*args, **kwds)
elif callable(func):
return self._callable_function(func, axis=axis, *args, **kwds)
else:
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_blocks(self, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_matrix(self, columns=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asof(self, where, subset=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def assign(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def astype(self, dtype, copy=True, errors='raise', **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at_time(self, time, asof=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')
"""
new_df = self.fillna(method='bfill',
axis=axis,
limit=limit,
downcast=downcast,
inplace=inplace)
if not inplace:
return new_df
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError("""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all().""")
else:
return to_pandas(self).bool()
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None,
**kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_lower(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_upper(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine(self, other, func, fill_value=None, overwrite=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine_first(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def compound(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def consolidate(self, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corr(self, method='pearson', min_periods=1):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corrwith(self, other, axis=0, drop=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
def remote_func(df):
return df.count(axis=axis, level=level, numeric_only=numeric_only)
return self._arithmetic_helper(remote_func, axis, level)
def cov(self, min_periods=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def _cumulative_helper(self, func, axis):
axis = pd.DataFrame()._get_axis_number(axis) if axis is not None \
else 0
if axis == 0:
new_cols = _map_partitions(func, self._col_partitions)
return DataFrame(col_partitions=new_cols,
columns=self.columns,
index=self.index)
else:
new_rows = _map_partitions(func, self._row_partitions)
return DataFrame(row_partitions=new_rows,
columns=self.columns,
index=self.index)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
def remote_func(df):
return df.cummax(axis=axis, skipna=skipna, *args, **kwargs)
return self._cumulative_helper(remote_func, axis)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
def remote_func(df):
return df.cummin(axis=axis, skipna=skipna, *args, **kwargs)
return self._cumulative_helper(remote_func, axis)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
def remote_func(df):
return df.cumprod(axis=axis, skipna=skipna, *args, **kwargs)
return self._cumulative_helper(remote_func, axis)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
def remote_func(df):
return df.cumsum(axis=axis, skipna=skipna, *args, **kwargs)
return self._cumulative_helper(remote_func, axis)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
def describe_helper(df):
"""This to ensure nothing goes on with non-numeric columns"""
try:
return df.select_dtypes(exclude='object').describe(
percentiles=percentiles,
include=include,
exclude=exclude)
# This exception is thrown when there are only non-numeric columns
# in this partition
except ValueError:
return pd.DataFrame()
# Begin fixing index based on the columns inside.
parts = ray.get(_map_partitions(describe_helper, self._col_partitions))
# We use the index to get the internal index.
parts = [(parts[i], i) for i in range(len(parts))]
for df, partition in parts:
this_partition = self._col_metadata.partition_series(partition)
df.columns = this_partition[this_partition.isin(df.columns)].index
# Remove index from tuple
result = pd.concat([obj[0] for obj in parts], axis=1, copy=False)
return result
def diff(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def div(self, other, axis='columns', level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self._operator_helper(pd.DataFrame.add, other, axis, level,
fill_value)
def divide(self, other, axis='columns', level=None, fill_value=None):
"""Synonym for div.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self.div(other, axis, level, fill_value)
def dot(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
"""Return new object with labels in requested axis removed.
Args:
labels: Index or column labels to drop.
axis: Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
level: For MultiIndex
inplace: If True, do operation inplace and return None.
errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
"""
# TODO implement level
if level is not None:
raise NotImplementedError("Level not yet supported for drop")
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and "
"'index'/'columns'")
axis = pd.DataFrame()._get_axis_name(axis)
axes = {axis: labels}
elif index is not None or columns is not None:
axes, _ = pd.DataFrame()._construct_axes_from_arguments((index,
columns),
{})
else:
raise ValueError("Need to specify at least one of 'labels', "
"'index' or 'columns'")
obj = self.copy()
def drop_helper(obj, axis, label):
# TODO(patyang): If you drop from the index first, you can do it
# in batch by returning the dropped items. Likewise coords.drop
# leaves the coords df in an inconsistent state.
if axis == 'index':
try:
coords = obj._row_metadata[label]
if isinstance(coords, pd.DataFrame):
partitions = list(coords['partition'])
indexes = list(coords['index_within_partition'])
else:
partitions, indexes = coords
partitions = [partitions]
indexes = [indexes]
for part, index in zip(partitions, indexes):
x = _deploy_func.remote(
lambda df: df.drop(labels=index, axis=axis,
errors='ignore'),
obj._row_partitions[part])
obj._row_partitions = \
[obj._row_partitions[i] if i != part
else x
for i in range(len(obj._row_partitions))]
# The decrement here is because we're dropping one at a
# time and the index is automatically updated when we
# convert back to blocks.
obj._row_metadata.squeeze(part, index)
obj._row_metadata.drop(labels=label)
except KeyError:
return obj
else:
try:
coords = obj._col_metadata[label]
if isinstance(coords, pd.DataFrame):
partitions = list(coords['partition'])
indexes = list(coords['index_within_partition'])
else:
partitions, indexes = coords
partitions = [partitions]
indexes = [indexes]
for part, index in zip(partitions, indexes):
x = _deploy_func.remote(
lambda df: df.drop(labels=index, axis=axis,
errors='ignore'),
obj._col_partitions[part])
obj._col_partitions = \
[obj._col_partitions[i] if i != part
else x
for i in range(len(obj._col_partitions))]
# The decrement here is because we're dropping one at a
# time and the index is automatically updated when we
# convert back to blocks.
obj._col_metadata.squeeze(part, index)
obj._col_metadata.drop(labels=label)
except KeyError:
return obj
return obj
for axis, labels in axes.items():
if labels is None:
continue
if is_list_like(labels):
for label in labels:
if errors != 'ignore' and label and \
label not in getattr(self, axis):
raise ValueError("The label [{}] is not in the [{}]",
label, axis)
else:
obj = drop_helper(obj, axis, label)
else:
if errors != 'ignore' and labels and \
labels not in getattr(self, axis):
raise ValueError("The label [{}] is not in the [{}]",
labels, axis)
else:
obj = drop_helper(obj, axis, labels)
if not inplace:
return obj
else:
self._row_metadata = obj._row_metadata
self._col_metadata = obj._col_metadata
self._block_partitions = obj._block_partitions
def drop_duplicates(self, subset=None, keep='first', inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def duplicated(self, subset=None, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def eq(self, other, axis='columns', level=None):
"""Checks element-wise that this is equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the eq over.
level: The Multilevel index level to apply eq over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._operator_helper(pd.DataFrame.eq, other, axis, level)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
# TODO(kunalgosar): Implement Copartition and use to implement equals
def helper(df, index, other_series):
return df.iloc[index['index_within_partition']] \
.equals(other_series)
results = []
other_partition = None
other_df = None
# TODO: Make the appropriate coord df accessor methods for this fxn
for i, idx in other._row_metadata._coord_df.iterrows():
if idx['partition'] != other_partition:
other_df = ray.get(other._row_partitions[idx['partition']])
other_partition = idx['partition']
# TODO: group series here into full df partitions to reduce
# the number of remote calls to helper
other_series = other_df.iloc[idx['index_within_partition']]
curr_index = self._row_metadata._coord_df.iloc[i]
curr_df = self._row_partitions[int(curr_index['partition'])]
results.append(_deploy_func.remote(helper,
curr_df,
curr_index,
other_series))
for r in results:
if not ray.get(r):
return False
return True
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
columns = self.columns
def eval_helper(df):
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
# If result is a series, expr was not an assignment expression.
if not isinstance(result, pd.Series):
result.columns = pd.RangeIndex(0, len(result.columns))
return result
inplace = validate_bool_kwarg(inplace, "inplace")
new_rows = _map_partitions(eval_helper, self._row_partitions)
result_type = ray.get(_deploy_func.remote(lambda df: type(df),
new_rows[0]))
if result_type is pd.Series:
new_series = pd.concat(ray.get(new_rows), axis=0)
new_series.index = self.index
return new_series
columns_copy = self._col_metadata._coord_df.copy().T
columns_copy.eval(expr, inplace=True, **kwargs)
columns = columns_copy.columns
if inplace:
self._update_inplace(row_partitions=new_rows, columns=columns)
else:
return DataFrame(columns=columns, row_partitions=new_rows)
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='ffill')
"""
new_df = self.fillna(method='ffill',
axis=axis,
limit=limit,
downcast=downcast,
inplace=inplace)
if not inplace:
return new_df
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
"""Fill NA/NaN values using the specified method.
Args:
value: Value to use to fill holes. This value cannot be a list.
method: Method to use for filling holes in reindexed Series pad.
ffill: propagate last valid observation forward to next valid
backfill.
bfill: use NEXT valid observation to fill gap.
axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
limit: If method is specified, this is the maximum number of
consecutive NaN values to forward/backward fill. In other
words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method
is not specified, this is the maximum number of entries along
the entire axis where NaNs will be filled. Must be greater
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
filled: DataFrame
"""
# TODO implement value passed as DataFrame
if isinstance(value, pd.DataFrame):
raise NotImplementedError("Passing a DataFrame as the value for "
"fillna is not yet supported.")
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = pd.DataFrame()._get_axis_number(axis) \
if axis is not None \
else 0
if isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
if value is None and method is None:
raise ValueError('must specify a fill method or value')
if value is not None and method is not None:
raise ValueError('cannot specify both a fill method and value')
if method is not None and method not in ['backfill', 'bfill', 'pad',
'ffill']:
expecting = 'pad (ffill) or backfill (bfill)'
msg = 'Invalid fill method. Expecting {expecting}. Got {method}'\
.format(expecting=expecting, method=method)
raise ValueError(msg)
if inplace:
new_obj = self
else:
new_obj = self.copy()
parts, coords_obj = (new_obj._col_partitions,
new_obj._col_metadata) if axis == 0 else \
(new_obj._row_partitions,
new_obj._row_metadata)
if isinstance(value, (pd.Series, dict)):
new_vals = {}
value = dict(value)
for val in value:
# Get the local index for the partition
try:
part, index = coords_obj[val]
# Pandas ignores these errors so we will suppress them too.
except KeyError:
continue
new_vals[val] = _deploy_func.remote(lambda df: df.fillna(
value={index: value[val]},
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs), parts[part])
# Not every partition was changed, so we put everything back that
# was not changed and update those that were.
new_parts = [parts[i] if coords_obj.index[i] not in new_vals
else new_vals[coords_obj.index[i]]
for i in range(len(parts))]
else:
new_parts = _map_partitions(lambda df: df.fillna(
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs), parts)
if axis == 0:
new_obj._update_inplace(col_partitions=new_parts,
columns=self.columns,
index=self.index)
else:
new_obj._update_inplace(row_partitions=new_parts,
columns=self.columns,
index=self.index)
if not inplace:
return new_obj
def filter(self, items=None, like=None, regex=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first_valid_index(self):
"""Return index for first non-NA/null value.
Returns:
scalar: type of index
"""
return self._row_metadata.first_valid_index()
def floordiv(self, other, axis='columns', level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self._operator_helper(pd.DataFrame.floordiv, other, axis, level,
fill_value)
@classmethod
def from_csv(self, path, header=0, sep=', ', index_col=0,
parse_dates=True, encoding=None, tupleize_cols=None,
infer_datetime_format=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_dict(self, data, orient='columns', dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_items(self, items, columns=None, orient='columns'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_records(self, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ge(self, other, axis='columns', level=None):
"""Checks element-wise that this is greater than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._operator_helper(pd.DataFrame.ge, other, axis, level)
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
return ray.get(_deploy_func.remote(lambda df: df.get_dtype_counts(),
self._row_partitions[0]))
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return ray.get(_deploy_func.remote(lambda df: df.get_ftype_counts(),
self._row_partitions[0]))
def get_value(self, index, col, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get_values(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def gt(self, other, axis='columns', level=None):
"""Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._operator_helper(pd.DataFrame.gt, other, axis, level)
def head(self, n=5):
"""Get the first n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the first n rows of the dataframe.
"""
if n >= len(self._row_metadata):
return self.copy()
new_dfs = _map_partitions(lambda df: df.head(n),
self._col_partitions)
index = self._row_metadata.index[:n]
return DataFrame(col_partitions=new_dfs,
columns=self.columns,
index=index)
def hist(self, data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
if not all([d != np.dtype('O') for d in self.dtypes]):
raise TypeError(
"reduction operation 'argmax' not allowed for this dtype")
def remote_func(df):
return df.idxmax(axis=axis, skipna=skipna)
internal_indices = self._arithmetic_helper(remote_func, axis)
# do this to convert internal indices to correct index
return internal_indices.apply(lambda x: self.index[x])
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
if not all([d != np.dtype('O') for d in self.dtypes]):
raise TypeError(
"reduction operation 'argmax' not allowed for this dtype")
def remote_func(df):
return df.idxmin(axis=axis, skipna=skipna)
internal_indices = self._arithmetic_helper(remote_func, axis)
# do this to convert internal indices to correct index
return internal_indices.apply(lambda x: self.index[x])
def infer_objects(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
def info_helper(df):
output_buffer = io.StringIO()
df.info(verbose=verbose,
buf=output_buffer,
max_cols=max_cols,
memory_usage=memory_usage,
null_counts=null_counts)
return output_buffer.getvalue()
# Combine the per-partition info and split into lines
result = ''.join(ray.get(_map_partitions(info_helper,
self._col_partitions)))
lines = result.split('\n')
# Class denoted in info() output
class_string = '<class \'ray.dataframe.dataframe.DataFrame\'>\n'
# Create the Index info() string by parsing self.index
index_string = self.index.summary() + '\n'
# A column header is needed in the inf() output
col_header = 'Data columns (total {0} columns):\n'.format(
len(self.columns))
# Parse the per-partition values to get the per-column details
# Find all the lines in the output that start with integers
prog = re.compile('^[0-9]+.+')
col_lines = [prog.match(line) for line in lines]
cols = [c.group(0) for c in col_lines if c is not None]
# replace the partition columns names with real column names
columns = ["{0}\t{1}\n".format(self.columns[i],
cols[i].split(" ", 1)[1])
for i in range(len(cols))]
col_string = ''.join(columns) + '\n'
# A summary of the dtypes in the dataframe
dtypes_string = "dtypes: "
for dtype, count in self.dtypes.value_counts().iteritems():
dtypes_string += "{0}({1}),".format(dtype, count)
dtypes_string = dtypes_string[:-1] + '\n'
# Compute the memory usage by summing per-partitions return values
# Parse lines for memory usage number
prog = re.compile('^memory+.+')
mems = [prog.match(line) for line in lines]
mem_vals = [float(re.search(r'\d+', m.group(0)).group())
for m in mems if m is not None]
memory_string = ""
if len(mem_vals) != 0:
# Sum memory usage from each partition
if memory_usage != 'deep':
memory_string = 'memory usage: {0}+ bytes'.format(
sum(mem_vals))
else:
memory_string = 'memory usage: {0} bytes'.format(sum(mem_vals))
# Combine all the components of the info() output
result = ''.join([class_string, index_string, col_header,
col_string, dtypes_string, memory_string])
# Write to specified output buffer
if buf:
buf.write(result)
else:
sys.stdout.write(result)
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
if not is_list_like(value):
value = np.full(len(self.index), value)
if len(value) != len(self.index):
raise ValueError(
"Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError(
"cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)))
if loc < 0:
raise ValueError("unbounded slice")
partition, index_within_partition = \
self._col_metadata.insert(column, loc)
# Deploy insert function to specific column partition, and replace that
# column
def insert_col_part(df):
df.insert(index_within_partition, column, value, allow_duplicates)
return df
new_obj = _deploy_func.remote(insert_col_part,
self._col_partitions[partition])
new_cols = [self._col_partitions[i]
if i != partition
else new_obj
for i in range(len(self._col_partitions))]
new_col_names = self.columns.insert(loc, column)
self._update_inplace(col_partitions=new_cols, columns=new_col_names)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
def update_iterrow(series, i):
"""Helper function to correct the columns + name of the Series."""
series.index = self.columns
series.name = list(self.index)[i]
return series
iters = ray.get([_deploy_func.remote(
lambda df: list(df.iterrows()), part)
for part in self._row_partitions])
iters = itertools.chain.from_iterable(iters)
series = map(lambda s: update_iterrow(s[1][1], s[0]), enumerate(iters))
return zip(self.index, series)
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
iters = ray.get([_deploy_func.remote(
lambda df: list(df.items()), part)
for part in self._row_partitions])
def concat_iters(iterables):
for partitions in enumerate(zip(*iterables)):
series = | pd.concat([_series for _, _series in partitions[1]]) | pandas.concat |
import sys
import pandas as pd
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import re
from sqlalchemy import create_engine
def tokenize(text):
"""
Function to tokenize text using NLP pipeline with lemmatization
Args:
text (str): original text
Returns:
list of str: tokens of text
"""
text = re.sub("[^a-zA-Z0-9]"," ",text)
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
stopwords_list = stopwords.words("english")
for token in tokens:
clean_token = lemmatizer.lemmatize(token).lower().strip()
if (clean_token not in stopwords_list): clean_tokens.append(clean_token)
return clean_tokens
def load_data(messages_filepath, categories_filepath):
"""
Function to load datasets from filepaths
Returns:
dataframe: merged dataframe from two datasets
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories,on="id")
return df
def clean_data(df):
"""
Transform categories labels to columns and clean data errors
Args:
df (dataframe): merged dataframe containing message and categories
Returns:
df (dataframe): clean dataframe
"""
categories = df["categories"].str.split(";",expand=True)
row = categories.iloc[0]
category_colnames = row.apply(lambda x: x[:-2])
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].apply(lambda x: x[-1])
categories[column] = | pd.to_numeric(categories[column]) | pandas.to_numeric |
import pandas as pd
from pathlib import Path
import argparse
def select_target_from_sf(csv_path):
sf_id = csv_path.stem
df = | pd.read_csv(csv_path, index_col=0) | pandas.read_csv |
# -*- cding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import pymysql
import datetime
import glob
'''
程序说明:
1.实现能从Mysql数据库读取AIS数据到本地,保存到pandas的dataframe中;
2.实现从本地读取CSV文件,以便于数据处理;
3.利用datetime函数将数据库保存的时间戳转换成具体的时间函数,以便于读取;
4.将轨迹数据进行清洗,并对轨迹数据进行重构,输出符合要求的轨迹数据;
5.形成最终的MMSI唯一性代表的轨迹运动特性的数据,以便于后期深度学习的时候使用。
'''
# 链接数据库的句子
# dbconn = pymysql.connect(host = '127.0.0.1',user = 'root', passwd='<PASSWORD>',db= 'ais_dynamic',charset = 'utf8')
# sql查询语句
# sqlcmd = "select * from ais_dynamic.ais_dynamic limit 100"
# 从CSV文件中读取数据进行处理
# ais_file= pd.read_csv(r'C:\Users\cege-user\Desktop\dataset-ais\1-1000000-ais.csv',header = 0,sep = ' ',names = list('Record_Datetime','MMSI','Longitude','Latitude','Direction',
# 'Heading','Speed','Status','ROT','Position_Accuracy','UTC_Hour',
# 'UTC_Minute','UTC_Second','Message_ID','Rec_Datetime','Source_ID'))
ais_file1 = | pd.read_csv(r'D:\Data store file\dataset-ais\1-1000000-ais.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import time
import os
from sklearn.svm import SVR
import joblib
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit_utils import smiles_dataset
from utils import save_dataset
model_load = joblib.load('./models/model.pkl')
database = | pd.read_csv('./screening_base/in-vitro_zinc/in-vitro.csv') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Making identical historical data requests within 15 seconds.
Making six or more historical data requests for the same Contract, Exchange and Tick Type within two seconds.
Making more than 60 requests within any ten minute period.
"""
import os
import argparse
import time
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from signal import signal, SIGINT, SIG_DFL
import pickle
from quanttrader.event.event import EventType
from quanttrader.event.live_event_engine import LiveEventEngine
from quanttrader.brokerage.ib_brokerage import InteractiveBrokers
signal(SIGINT, SIG_DFL)
df = pd.DataFrame(columns=['Open', 'High', 'Low', 'Close', 'Volume'])
def log_event_handler(log_event):
print(f'{log_event.timestamp}: {log_event.content}')
def historical_event_handler(bar_event):
"""
local timezone based on tws setting
"""
global df
row_dict = {}
row_dict['Open'] = bar_event.open_price
row_dict['High'] = bar_event.high_price
row_dict['Low'] = bar_event.low_price
row_dict['Close'] = bar_event.close_price
row_dict['Volume'] = bar_event.volume
df1 = | pd.DataFrame(row_dict, index=[bar_event.bar_start_time]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import math
import sys
import numpy as np
import pandas as pd
import sample_functions
from sklearn import metrics, model_selection, svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
# 下の y_name を 'pIC50_class', 'pIGC50_class' のいずれかにしてください。
# descriptors_with_[y_name].csv というファイルを dataset として読み込み計算します。
# さらに、y_name を別の名前に変えて、ご自身で別途 sample_program_6_8_0_csv.py もしくは
# sample_program_6_8_0_sdf.py で descriptors_with_[y_name].csv というファイルを、
# 他のファイルと同様の形式で準備すれば、同じように計算することができます。
y_name = 'pIC50_class'
# 'pIC50_class' : クラス分類用の薬理活性のデータセットの場合
# 'pIGC50_class' : クラス分類用の環境毒性のデータセットの場合
rate_of_test_samples = 0.25 # テストデータのサンプル数の割合。0 より大きく 1 未満
method_name = 'rf' # 'knn' or 'svm' or 'rf'
number_of_submodels = 50 # サブモデルの数
rate_of_selected_x_variables = 0.7 # 各サブデータセットで選択される説明変数の数の割合。0 より大きく 1 未満
add_nonlinear_terms_flag = False # True (二乗項・交差項を追加) or False (追加しない)
fold_number = 5 # N-fold CV の N
max_number_of_k = 20 # 使用する k の最大値
svm_cs = 2 ** np.arange(-5, 11, dtype=float)
svm_gammas = 2 ** np.arange(-20, 11, dtype=float)
rf_number_of_trees = 300 # RF における決定木の数
rf_x_variables_rates = np.arange(1, 11, dtype=float) / 10 # 1 つの決定木における説明変数の数の割合の候補
if method_name != 'knn' and method_name != 'svm' and method_name != 'rf':
sys.exit('\'{0}\' というクラス分類手法はありません。method_name を見直してください。'.format(method_name))
dataset = pd.read_csv('descriptors_with_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
y = dataset.iloc[:, 0].copy()
x = dataset.iloc[:, 1:]
x = x.replace(np.inf, np.nan).fillna(np.nan) # inf を NaN に置き換え
nan_variable_flags = x.isnull().any() # NaN を含む変数
x = x.drop(x.columns[nan_variable_flags], axis=1) # NaN を含む変数を削除
number_of_test_samples = round(dataset.shape[0] * rate_of_test_samples)
# ランダムにトレーニングデータとテストデータとに分割
# random_state に数字を与えることで、別のときに同じ数字を使えば、ランダムとはいえ同じ結果にすることができます
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=number_of_test_samples, shuffle=True,
random_state=0)
class_types = list(set(y_train)) # クラスの種類
class_types.sort(reverse=True) # 並び替え
# 標準偏差が 0 の説明変数を削除
std_0_variable_flags = x_train.std() == 0
x_train = x_train.drop(x_train.columns[std_0_variable_flags], axis=1)
x_test = x_test.drop(x_test.columns[std_0_variable_flags], axis=1)
if add_nonlinear_terms_flag:
x_train = pd.read_csv('x_train_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
x_test = pd.read_csv('x_test_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
# x_train = sample_functions.add_nonlinear_terms(x_train) # 説明変数の二乗項や交差項を追加
# x_test = sample_functions.add_nonlinear_terms(x_test) # 説明変数の二乗項や交差項を追加
# 標準偏差が 0 の説明変数を削除
std_0_nonlinear_variable_flags = x_train.std() == 0
x_train = x_train.drop(x_train.columns[std_0_nonlinear_variable_flags], axis=1)
x_test = x_test.drop(x_test.columns[std_0_nonlinear_variable_flags], axis=1)
# オートスケーリング
autoscaled_x_train = (x_train - x_train.mean()) / x_train.std()
autoscaled_x_test = (x_test - x_train.mean()) / x_train.std()
if method_name == 'svm':
# 時間短縮のため、最初だけグラム行列の分散を最大化することによる γ の最適化
optimal_svm_gamma = sample_functions.gamma_optimization_with_variance(autoscaled_x_train, svm_gammas)
number_of_x_variables = int(np.ceil(x_train.shape[1] * rate_of_selected_x_variables))
print('各サブデータセットの説明変数の数 :', number_of_x_variables)
estimated_y_train_all = pd.DataFrame() # 空の DataFrame 型を作成し、ここにサブモデルごとのトレーニングデータの y の推定結果を追加
selected_x_variable_numbers = [] # 空の list 型の変数を作成し、ここに各サブデータセットの説明変数の番号を追加
submodels = [] # 空の list 型の変数を作成し、ここに構築済みの各サブモデルを追加
for submodel_number in range(number_of_submodels):
print(submodel_number + 1, '/', number_of_submodels) # 進捗状況の表示
# 説明変数の選択
# 0 から 1 までの間に一様に分布する乱数を説明変数の数だけ生成して、その乱数値が小さい順に説明変数を選択
random_x_variables = np.random.rand(x_train.shape[1])
selected_x_variable_numbers_tmp = random_x_variables.argsort()[:number_of_x_variables]
selected_autoscaled_x_train = autoscaled_x_train.iloc[:, selected_x_variable_numbers_tmp]
selected_x_variable_numbers.append(selected_x_variable_numbers_tmp)
if method_name == 'knn':
# CV による k の最適化
accuracy_in_cv_all = [] # 空の list の変数を作成して、成分数ごとのクロスバリデーション後の 正解率 をこの変数に追加していきます
ks = [] # 同じく k の値をこの変数に追加していきます
for k in range(1, max_number_of_k + 1):
model = KNeighborsClassifier(n_neighbors=k, metric='euclidean') # k-NN モデルの宣言
# クロスバリデーション推定値の計算し、DataFrame型に変換
estimated_y_in_cv = pd.DataFrame(
model_selection.cross_val_predict(model, selected_autoscaled_x_train, y_train,
cv=fold_number))
accuracy_in_cv = metrics.accuracy_score(y_train, estimated_y_in_cv) # 正解率を計算
accuracy_in_cv_all.append(accuracy_in_cv) # r2 を追加
ks.append(k) # k の値を追加
optimal_k = ks[accuracy_in_cv_all.index(max(accuracy_in_cv_all))]
submodel = KNeighborsClassifier(n_neighbors=optimal_k, metric='euclidean') # k-NN モデルの宣言
elif method_name == 'svm':
# CV による C の最適化
model_in_cv = GridSearchCV(svm.SVC(kernel='rbf', gamma=optimal_svm_gamma),
{'C': svm_cs}, cv=fold_number)
model_in_cv.fit(selected_autoscaled_x_train, y_train)
optimal_svm_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVC(kernel='rbf', C=optimal_svm_c),
{'gamma': svm_gammas}, cv=fold_number)
model_in_cv.fit(selected_autoscaled_x_train, y_train)
optimal_svm_gamma = model_in_cv.best_params_['gamma']
submodel = svm.SVC(kernel='rbf', C=optimal_svm_c, gamma=optimal_svm_gamma) # SVM モデルの宣言
elif method_name == 'rf':
# OOB (Out-Of-Bugs) による説明変数の数の割合の最適化
accuracy_oob = []
for index, x_variables_rate in enumerate(rf_x_variables_rates):
model_in_validation = RandomForestClassifier(n_estimators=rf_number_of_trees, max_features=int(
max(math.ceil(selected_autoscaled_x_train.shape[1] * x_variables_rate), 1)), oob_score=True)
model_in_validation.fit(selected_autoscaled_x_train, y_train)
accuracy_oob.append(model_in_validation.oob_score_)
optimal_x_variables_rate = rf_x_variables_rates[accuracy_oob.index(max(accuracy_oob))]
submodel = RandomForestClassifier(n_estimators=rf_number_of_trees,
max_features=int(max(math.ceil(
selected_autoscaled_x_train.shape[1] * optimal_x_variables_rate), 1)),
oob_score=True) # RF モデルの宣言
submodel.fit(selected_autoscaled_x_train, y_train) # モデルの構築
submodels.append(submodel)
# サブデータセットの説明変数の種類やサブモデルを保存。同じ名前のファイルがあるときは上書きされるため注意
pd.to_pickle(selected_x_variable_numbers, 'selected_x_variable_numbers.bin')
pd.to_pickle(submodels, 'submodels.bin')
# サブデータセットの説明変数の種類やサブモデルを読み込み
# 今回は、保存した後にすぐ読み込んでいるため、あまり意味はありませんが、サブデータセットの説明変数の種類やサブモデルを
# 保存しておくことで、後で新しいサンプルを予測したいときにモデル構築の過程を省略できます
selected_x_variable_numbers = pd.read_pickle('selected_x_variable_numbers.bin')
submodels = | pd.read_pickle('submodels.bin') | pandas.read_pickle |
#based on the following kernel: https://www.kaggle.com/hyeonho/pca-nusvc-0-95985
import numpy as np, pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn import svm, neighbors, linear_model, neural_network
from sklearn.svm import NuSVC
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from tqdm import tqdm
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
oof = np.zeros(len(train))
preds = np.zeros(len(test))
oof_2 = np.zeros(len(train))
preds_2 = np.zeros(len(test))
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), | pd.DataFrame(test2[cols]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
"""Script to calculate the Amp-hours that were supplied in each
bike battery charging session.
"""
days_to_include = float(input('Enter # of Days to Incude: '))
default_delta = 300.0 # default reading spacing in seconds
max_delta = 1000.0 # if spacing greater than this between charging readings, must be a new cycle
ending_amps = 0.1 # if amps are below this level charging is complete
import pandas as pd
import numpy as np
from bmondata import Server
from datetime import datetime, timedelta
server = Server('https://bmon.analysisnorth.com/')
start_ts = str(datetime.now()-timedelta(days=days_to_include))
df = server.sensor_readings('260034000c47343432313031_amps', start_ts=start_ts)
df.columns = ['amps']
df['ts'] = | pd.to_datetime(df.index) | pandas.to_datetime |
import numpy as np
import pandas as pd
import numba
import seaborn as sns
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from hfhd import hd
@numba.njit
def garch_11(n, sigma_sq_0, mu, alpha, beta, omega):
r"""
Generate GARCH(1, 1) log-returns of size n.
This function is accelerated via JIT with Numba.
Parameters
----------
n : int
The length of the wished time series.
sigma_sq_0 : float > 0
The variance starting value.
mu : float:
The drift of log-returns.
alpha : float >= 0
The volatility shock parameter. A higher value will lead to
larger spikes in volatility. A.k.a short-term persistence.
beta : float >= 0
The volatility persistence parameter. A larger value will
result in stronger persistence. A.k.a long-term persistence.
omega : float > 0
The variance constant. A higher value results in a higher
mean variance.
Returns
-------
r : numpy.ndarray
The GARCH log-returns time series.
sigma_sq : numpy.ndarray
The resulting variance time series with which each log-return
was generated.
Notes
-----
In general, the conditional variance of a GARCH(p,q) model is given by
.. math:: \sigma_{t}^{2}=\omega+\sum_{i=1}^{q} \alpha_{i}
\varepsilon_{t-i}^{2}+\sum_{j=1}^{p} \beta_{j} \sigma_{t-j}^{2}.
The unconditional variance is given by
.. math:: \sigma^{2}=\frac{\omega}{1-\sum_{i=1}^{q}
\alpha_{i}-\sum_{j=1}^{p} \beta_{j}}.
Here, :math:`p=q=1`,
and :math:`\epsilon_{t} \sim \mathcal{N}\left(0, 1\right)`
"""
nu = np.random.normal(0, 1, n)
r = np.zeros(n)
epsilon = np.zeros(n)
sigma_sq = np.zeros(n)
sigma_sq[0] = sigma_sq_0
if min(alpha, beta) < 0:
raise ValueError('alpha, beta need to be non-negative')
if omega <= 0:
raise ValueError('omega needs to be positive')
if alpha+beta >= 1:
print('''alpha+beta>=1, variance not defined
--> time series will not be weakly stationary''')
for i in range(n):
if i > 0:
sigma_sq[i] = omega + alpha * epsilon[i-1]**2 + beta * sigma_sq[i-1]
epsilon[i] = (sigma_sq[i]**0.5) * nu[i]
r[i] = mu + epsilon[i]
return r, sigma_sq
class Universe:
r"""
The universe is a specification from which simulated realizations
can be sampled. Stocks follow a factor model, they belong
to industries and have an idiosyncratic component. Stocks are predictable
by a single feature.
Attributes
----------
feature_beta : float
The true coefficient.
factor_garch_spec : list
The garch specification for factor returns.
``[sigma_sq_0, mu, alpha, beta, omega]``
industry_garch_spec : list
The garch specification for industry returns.
``[sigma_sq_0, mu, alpha, beta, omega]``
resid_garch_spec : list
The garch specification for residual returns.
``[sigma_sq_0, mu, alpha, beta, omega]``
factor_loadings : numpy.ndarray
An array with factor loadings for each stock and factor.
dim = n_stocks x n_factors
industry_loadings : numpy.ndarray
An array with industry loadings for each stock and industry.
dim = n_stocks x n_industry
This is usually a sparse matrix. One stock loads typically on
one or two industries. A good number of industries is 10 to 20.
liquidity : float
A value between 0 and 1 that describes liquidity.
A value of 1 means that the probability of observation
is 100% each minute. 0.5 means that there is a 50%
probability of observing a price each minute.
gamma : float >=0
The microstructure noise will be zero-mean Gaussian with variance
$\gamma^2 var(r)$, where $var(r)$ is the variance of the
underlying true return process. This noise is be added to the price.
freq : str, ``'s'`` or ``'m'``.
The granularity of the discretized continous price process.
"""
def __init__(self, feature_beta, factor_garch_spec, industry_garch_spec,
resid_garch_spec, factor_loadings, industry_loadings,
liquidity=0.5, gamma=2, freq='m'):
self.feature_beta = feature_beta
self.factor_garch_spec = factor_garch_spec
self.industry_garch_spec = industry_garch_spec
self.resid_garch_spec = resid_garch_spec
self.factor_loadings = factor_loadings
self.industry_loadings = industry_loadings
self.liquidity = liquidity
self.gamma = gamma
self.freq = freq
self.n_stocks = self.factor_loadings.shape[0]
self.n_ind = self.industry_loadings.shape[1]
self.n_factors = self.factor_loadings.shape[1]
@staticmethod
def uncond_var(spec):
'''
Compute the uncoditional variance from a
GARCH(1,1) specification.
Parameters
----------
spec : list
The garch specification.
``[sigma_sq_0, mu, alpha, beta, omega]``
Returns
-------
float
The unconditional variance.
'''
return spec[4]/(1-spec[2]-spec[3])
def uncond_cov(self):
'''
Compute the uncoditional covariance of stock returns
in the universe from a universe specification.
Returns
-------
numpy.ndarray
The unconditional covariance matrix.
'''
sf = np.diag([self.uncond_var(self.factor_garch_spec)]*self.n_factors)
sr = np.diag([self.uncond_var(self.resid_garch_spec)]*self.n_stocks)
si = np.diag([self.uncond_var(self.industry_garch_spec)]*self.n_ind)
return (self.factor_loadings @ sf @ self.factor_loadings.T
+ sr
+ self.industry_loadings @ si @ self.industry_loadings.T)
def cond_cov(self):
'''
Compute the daily coditional integrated covariance matrix of stock
returns within regular market hours in the universe from a realized
universe simulation.
Returns
-------
list
A list containing the conditional integrated covariance matrices
of each day.
'''
sr = pd.DataFrame(self.sigma_sq_resid)
sr.index = pd.to_datetime(sr.index, unit=self.freq)
sr = sr.between_time('9:30', '16:00',
include_start=True,
include_end=True)
sr = sr.resample('1d').sum()
si = pd.DataFrame(self.sigma_sq_industry)
si.index = pd.to_datetime(si.index, unit=self.freq)
si = si.between_time('9:30', '16:00',
include_start=True,
include_end=True)
si = si.resample('1d').sum()
sf = | pd.DataFrame(self.sigma_sq_factor) | pandas.DataFrame |
#!/usr/bin/env python3
# coding: utf-8
"""
@author:qiuping1
@file:data_parse.py
@time:2020/12/30
change log: 2021/01/12 增加数据qc相关代码,并调式
python ./data_parse.py --input_path ../data/01.LiverCancer/DP8400012941BR_E4/DP8400012941BR_E4.txt --out_dir ../data/E4/ --read_raw --bin_size 200
python ./data_parse.py --input_path ../data/E4/raw_andata.bin200.h5ad --out_dir ../data/E4/ --run_filter --normalize --bin_size 200 --max_gene_cnt 7000 --min_genes 200 --min_cells 3 --max_mt 15
"""
import pandas as pd
import scanpy as sc
import numpy as np
import argparse
from scipy import sparse
import sys
def read_raw_file_bak(inpath, step):
"""
读取原始bin1的数据,返回andata对象
:param inpath: 输入文件路径
:param step: 合并的bin大小
:param output: andata存储路径
:return: andata
"""
df = pd.read_csv(inpath, sep='\t')
df.dropna(inplace=True)
df.columns = list(df.columns[0:-1]) + ['UMICount']
df['x1'] = (df['x'] / step).astype(np.int32)
df['y1'] = (df['y'] / step).astype(np.int32)
df['pos'] = df['x1'].astype(str) + "-" + df['y1'].astype(str)
g = df.groupby(['geneID', 'pos'])['UMICount'].sum()
g = g.to_frame().reset_index()
# 每个gene至少在3个bin里面捕获到
# g = g[g.groupby('geneID')['geneID'].transform('size') > 2]
g = g.pivot(index='pos', columns='geneID', values='UMICount').fillna(0)
# 每个bin至少捕获到50个gene
# g = g.loc[:, g.sum() >= 50]
adata = sc.AnnData(g)
pos = np.array(list(adata.obs.index.str.split('-', expand=True)), dtype=np.int)
pos[:, 1] = pos[:, 1] * -1
adata.obsm['spatial'] = pos
return adata
def read_raw_file(inpath, step):
df = pd.read_csv(inpath, sep='\t')
df.dropna(inplace=True)
df.columns = list(df.columns[0:-1]) + ['UMICount']
df['x1'] = (df['x'] / step).astype(np.int32)
df['y1'] = (df['y'] / step).astype(np.int32)
df['pos'] = df['x1'].astype(str) + "-" + df['y1'].astype(str)
bindf = df.groupby(['pos', 'geneID'])['UMICount'].sum()
cells = set(x[0] for x in bindf.index)
genes = set(x[1] for x in bindf.index)
cellsdic = dict(zip(cells, range(0, len(cells))))
genesdic = dict(zip(genes, range(0, len(genes))))
rows = [cellsdic[x[0]] for x in bindf.index]
cols = [genesdic[x[1]] for x in bindf.index]
print(f'the martrix has {len(cells)} bins, and {len(genes)} genes.')
expMtx = sparse.csr_matrix((bindf.values, (rows, cols))).toarray()
print(f'the size of matrix is {sys.getsizeof(expMtx) / 1073741824} G.')
obs = | pd.DataFrame(index=cells) | pandas.DataFrame |
import pandas as pd
import numpy as np
from os.path import join as oj
def load_daily_data(usafacts_data_cases='usafacts/confirmed_cases.csv',
usafacts_data_deaths='usafacts/deaths.csv',
dir_mod = ""):
usafacts_data_cases = oj(dir_mod, usafacts_data_cases)
usafacts_data_deaths = oj(dir_mod, usafacts_data_deaths)
cases = | pd.read_csv(usafacts_data_cases, encoding="iso-8859-1", index_col=0) | pandas.read_csv |
import os
import random
import string
import sqlalchemy
import pandas as pd
from faker import Faker
def get_account_list():
# This prevents getting the same name twice as a set is distinct
names = ['<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'8Cp9RdNXaqJarLKLEhNyy1Zrg5VAyEN2iSFkxWan4eZyGtb7W1k',
'<KEY>vTH6XcFmhochP<KEY>sGP',
'<KEY>tKdYLUk59aZ5k29wRj6gHCYwYYVM',
'<KEY>zDeCtW4uLhrDnBzhMWBQh2o',
'8CCCuKBtVXaijVWbNrHnPSmespPd4okcizxaqfiipm1MRidmCUa',
'<KEY>XGs18pBi29t',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'8BJdKw7nDhX3pcZQsXBL7R8FCHjwbyi8tDiaetS15ioQ93VpUHW',
'8BqAjDB2V7ewAYv5RMJXdRtsrYyFmYXNTf41a9gVNExs8aBeafG',
'8CmFWSkrcCpNcXWXb7YsWeRAfhRew9zgYiqnoGQADvmNjV6UYF6',
'8CgXtDXoGEjK78RCJrYa1LV8sJZpC9bStGazzAqjtL64aGZXsFX',
'8C7rTGDKtugNdVyHi5fuYaA7uV26p7Uyn2v3ErB6XrJ59HDxTkk',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'8BZF397yZeAxd2VBuQax6VLH6QvcT1CMhX1k3qGoexW2GaQUGKq',
'8BBaWXo5k6cnwWwEvoWuy3m1nEDyTrZ2JbfFMSNJzA4rwC5onKC',
'<KEY>',
'8BjbGEzoYsVPBRRnr9bdsLN9ifsyiHKBLGeAiAYXqFvzfoBMLns',
'8CsFXhyPwzguAQABxf6GXaEgmTvqyX1X5s2vo5UM6Byt3mmw869',
'8CTsQwJmbjPunUruEbuk9AdbvWu8imKcUHcPZYAKfigsr3qWCgX',
'<KEY>tKHPN2t7CrF7RhbBMgkgofPED7bUuvNCE',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>mKWVACuhpR3RuxJ6d47M77saRvHso3nei9X5cRT4RN7',
'8Ciea9g8GLFWWCL5DMomeiyAngaw45yGeYcnYACVBTJtZCTWKan',
'<KEY>',
'<KEY>',
'8Ct8BWUY2g7XUhjhxQe6trLffjdoAXJNbuCsQaBxPv62iqyjh2x',
'8CsExqqMPcPGrRwftQzqd7rX5ct153sxYf6FVzCpFdsU7MMEB1K',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>QKs8q5GwGfyQSXh1cov9qQ6e9',
'8CRVqp1wSPR2P9RCFEfoUaozGpZALfcdU2xUaUCYNJcdydtp8JA',
'8BBxtCi8WNkN7sZgQcEHr3XTbWGpxE5zhKsPqut6yozvrUoeuth',
'8CTfKi3u2vHtBdue5dteu85UaYhdTgXwyhY1XFDv3LhmcszerZH',
'8BZekjR4AVGhfZHPWKX9ndhDWLiX3Ct9qbtMDuTBXscwE7e7Tos',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'8Cnhxitj3eGXEFpkYboHEqUs5AE1GQUjPNg38pNvim6ZzkUez5E',
'8CP1XUYQ9ykRf6WZ2EVSDKgAAaLi1dNXag1ucpFtvVQmyTKSFHV',
'<KEY>',
'<KEY>fRsXxdBHb',
'8Cs7GG1RkVfSdoHE7rxswsZbLaYRQ6vvD2iGHsD6jzwZergNm4E',
'8C2F5c49ui8ynbFhgV5djTR17wBkFiD1SgG5RSSUKyGf218cFt3',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'8CptBktPwLY38Ktuv6p81bmvxwPGMDWbfHqnJwFMRPJ8awioask',
'<KEY>AQtHLggr5JfbuGVJ5oG7',
'8CYDCUNh6XcNnFnYa1VTT5xPUBboXfH8psp3TGYXQPFZ87Bx4Kf',
'8CuBrDBj5ARApUjoxNzPAbQt88EvPtVG97qLSWQce1kq4vhFqTF',
'8Cqn4pknZ8XkAjYuesVu4WH1Ev6bbMofLiRqviwomoAJ445ED36',
'8BDEy6RrnaVZ1cKYV6N2ANZYvYP4vEXtVHjDhC2o4w7aMRXg1Jo',
'8CZtQm4zgvj6i2YhrRcpAQpdWkMpmdyY3aphPpscPUkokFgcYrM',
'8BhpkJmja3r5ECrwZEWhu8FuazwT7XXvtkh8eGUtxo3uvrmaLyY',
]
return names
def connect_to_db(_file):
if not os.path.exists(_file):
raise AssertionError("DB File not found")
engine = sqlalchemy.create_engine(f'sqlite:///{_file}')
sqlalchemy_connection = engine.connect()
print("DB Connection established")
return sqlalchemy_connection
def clean_db(connection):
tables = ['round', 'block', 'account', 'banned_connections_api', 'banned_users_connections']
for table in tables:
connection.execute(f'DELETE FROM {table}')
def create_round_test_data(connection):
# Todo get Config
rounds = 10
# Create an Empty Dataframe to hold the Data
dataframe_round = pd.DataFrame(columns=['round_number',
'total_shares',
'total_reward',
'blocks',
'start_date_time',
'end_date_time',
'is_active',
'is_paid'
]
)
for x in range(rounds):
total_share = round(random.uniform(1, 10), 2)
total_reward = round(random.uniform(1, 10), 2)
blocks = random.randint(1, 10)
start_date_time = fake.past_datetime()
end_date_time = fake.past_datetime()
is_active = random.randint(0, 1)
is_paid = random.randint(0, 1)
data_dict = {
'total_shares': total_share,
'total_reward': total_reward,
'blocks': blocks,
'start_date_time': start_date_time,
'end_date_time': end_date_time,
'is_active': is_active,
'is_paid': is_paid,
}
dataframe_round = dataframe_round.append(data_dict, ignore_index=True)
dataframe_round.to_sql('round', con=connection, index=False, if_exists='append')
print("Inserted Test Data for 'Round'")
def create_account_test_data(connection):
# Todo get Config
accounts = 100
# Create an Empty Dataframe to hold the Data
dataframe_account = pd.DataFrame(columns=[
'name',
'created_at',
'last_active',
'connection_count',
'shares',
'hashrate',
'display_name'
]
)
names = get_account_list()
for x in range(accounts):
name = names[x]
created_at = fake.past_datetime()
last_active = fake.past_datetime()
connection_count = random.randint(1, 100)
shares = round(random.uniform(1, 100), 2)
hashrate = round(random.uniform(1, 10), 2)
letters = string.ascii_lowercase
display_name = ''.join(random.choice(letters) for i in range(10))
data_dict = {
'name': name,
'created_at': created_at,
'last_active': last_active,
'connection_count': connection_count,
'shares': shares,
'hashrate': hashrate,
'display_name' : display_name,
}
dataframe_account = dataframe_account.append(data_dict, ignore_index=True)
dataframe_account.to_sql('account', con=connection, index=False, if_exists='append')
print("Inserted Test Data for 'Account'")
def create_payment_test_data(connection):
accounts = 100
# Create an Empty Dataframe to hold the Data
dataframe_payment = pd.DataFrame(columns=[
'name',
'amount',
'shares',
'payment_date_time',
'round',
'tx_id',
]
)
names = get_account_list()
df_tmp = pd.read_sql(sql='SELECT round_number from round', con=connection)
round_list = list(df_tmp['round_number'])
for x in range(accounts):
name = names[x]
amount = round(random.uniform(1, 10), 2)
shares = round(random.uniform(1, 100), 2)
payment_date_time = fake.past_datetime()
round_value = random.choice(round_list)
tx_id_value = fake.md5(raw_output=False)
data_dict = {
'name': name,
'amount': amount,
'shares': shares,
'payment_date_time': payment_date_time,
'round': round_value,
'tx_id': tx_id_value,
}
dataframe_payment = dataframe_payment.append(data_dict, ignore_index=True)
dataframe_payment.to_sql('payment', con=connection, index=False, if_exists='append')
print("Inserted Test Data for 'Payment'")
def create_block_test_data(connection):
# Todo get Config
blocks = 1000
# Create an Empty Dataframe to hold the Data
dataframe_block = pd.DataFrame(columns=[
'hash',
'height',
'type',
'difficulty',
'orphan',
'block_finder',
'round',
'block_found_time',
'mainnet_reward',
]
)
df_tmp = pd.read_sql(sql='SELECT round_number from round', con=connection)
round_list = list(df_tmp['round_number'])
df_tmp = pd.read_sql(sql='SELECT name from account', con=connection)
account_list = list(df_tmp['name'])
for x in range(blocks):
hash = fake.md5(raw_output=False)
height = random.randint(1, 100)
type = 'Test'
difficulty = round(random.uniform(1, 100), 2)
orphan = random.randint(1, 100)
block_finder = str(random.choice(account_list))
round_value = random.choice(round_list)
block_found_time = fake.past_datetime()
mainnet_reward = round(random.uniform(0, 1), 2)
data_dict = {
'hash': hash,
'height': height,
'type': type,
'difficulty': difficulty,
'orphan': orphan,
'block_finder': block_finder,
'round': round_value,
'block_found_time': block_found_time,
'mainnet_reward': mainnet_reward,
}
dataframe_block = dataframe_block.append(data_dict, ignore_index=True)
dataframe_block.to_sql('block', con=connection, index=False, if_exists='append')
print("Inserted Test Data for 'Block'")
def create_banned_connections_api_test_data(connection):
# Todo get Config
banned_connections_api = 100
# Create an Empty Dataframe to hold the Data
dataframe_banned = pd.DataFrame(columns=[
'ip',
]
)
for x in range(banned_connections_api):
ip = fake.ipv4()
data_dict = {
'ip': ip,
}
dataframe_banned = dataframe_banned.append(data_dict, ignore_index=True)
dataframe_banned.to_sql('banned_connections_api', con=connection, index=False, if_exists='append')
print("Inserted Test Data for 'banned_connections_api'")
def create_banned_users_connections_test_data(connection):
# Todo get Config
banned_users_connections = 10
# Create an Empty Dataframe to hold the Data
dataframe_banned_users = pd.DataFrame(columns=[
'user',
'ip',
]
)
df_tmp = | pd.read_sql(sql='SELECT name from account', con=connection) | pandas.read_sql |
#!/usr/bin/env python
"""
This script enables training and comparison of models on multiple GPUs.
Usage:
```
python scripts/automate_training.py -c path/to/config.json -p path/to/config_hyper.json \
-n number_of_iterations --all-combin
```
"""
import argparse
import copy
import itertools
from functools import partial
import json
import random
import collections.abc
import shutil
import sys
import joblib
import pandas as pd
import numpy as np
import torch.multiprocessing as mp
from ivadomed.loader.bids_dataframe import BidsDataframe
import ivadomed.scripts.visualize_and_compare_testing_models as violin_plots
from pathlib import Path
from loguru import logger
from ivadomed import main as ivado
from ivadomed import config_manager as imed_config_manager
from ivadomed.loader import utils as imed_loader_utils
from ivadomed.scripts.compare_models import compute_statistics
from ivadomed import utils as imed_utils
from ivadomed.keywords import ConfigKW,SplitDatasetKW, LoaderParamsKW
LOG_FILENAME = 'log.txt'
logger.add(LOG_FILENAME)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", required=True, help="Base config file path.",
metavar=imed_utils.Metavar.file)
parser.add_argument("-ch", "--config-hyper", dest="config_hyper", required=True,
help="JSON file where hyperparameters to experiment are listed.",
metavar=imed_utils.Metavar.file)
parser.add_argument("-pd", "--path-data", required=False, help="Path to BIDS data.",
metavar=imed_utils.Metavar.int)
parser.add_argument("-n", "--n-iterations", dest="n_iterations", default=1,
type=int, help="Number of times to run each config.",
metavar=imed_utils.Metavar.int)
parser.add_argument("--all-combin", dest='all_combin', action='store_true',
help="To run all combinations of config"),
parser.add_argument("-m", "--multi-params", dest="multi_params", action='store_true',
help="To change multiple parameters at once.")
parser.add_argument("--run-test", dest='run_test', action='store_true',
help="Evaluate the trained model on the testing sub-set.")
parser.add_argument("--fixed-split", dest='fixed_split', action='store_true',
help="Keep a constant dataset split for all configs and iterations")
parser.add_argument("-l", "--all-logs", dest="all_logs", action='store_true',
help="Keep all log directories for each iteration.")
parser.add_argument('-t', '--thr-increment', dest="thr_increment", required=False, type=float,
help="""A threshold analysis is performed at the end of the training using
the trained model and the validation sub-dataset to find the optimal
binarization threshold. The specified value indicates the increment
between 0 and 1 used during the analysis (e.g. 0.1).""",
metavar=imed_utils.Metavar.float)
parser.add_argument("-o", "--output_dir", required=False,
help="Output Folder.")
return parser
def train_worker(config, thr_incr):
"""
Args:
config (dict): dictionary containing configuration details.
thr_incr (float): A threshold analysis is performed at the end of the training
using the trained model and the validation sub-dataset to find the optimal binarization
threshold. The specified value indicates the increment between 0 and 1 used during the
ROC analysis (e.g. 0.1). Flag: ``-t``, ``--thr-increment``
"""
current = mp.current_process()
# ID of process used to assign a GPU
ID = int(current.name[-1]) - 1
# Use GPU i from the array specified in the config file
config[ConfigKW.GPU_IDS] = [config[ConfigKW.GPU_IDS][ID]]
# Call ivado cmd_train
try:
# Save best validation score
config[ConfigKW.COMMAND] = "train"
best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = \
ivado.run_command(config, thr_increment=thr_incr)
except Exception:
logger.exception('Got exception on main handler')
logger.info("Unexpected error:", sys.exc_info()[0])
raise
# Save config file in output path
config_copy = open(config[ConfigKW.PATH_OUTPUT] + "/config_file.json", "w")
json.dump(config, config_copy, indent=4)
return config[ConfigKW.PATH_OUTPUT], best_training_dice, best_training_loss, best_validation_dice, \
best_validation_loss
def test_worker(config):
# Call ivado cmd_eval
current = mp.current_process()
# ID of process used to assign a GPU
ID = int(current.name[-1]) - 1
# Use GPU i from the array specified in the config file
config[ConfigKW.GPU_IDS] = [config[ConfigKW.GPU_IDS][ID]]
try:
# Save best test score
config[ConfigKW.COMMAND] = "test"
df_results, test_dice = ivado.run_command(config)
except Exception:
logger.exception('Got exception on main handler')
logger.info("Unexpected error:", sys.exc_info()[0])
raise
return config[ConfigKW.PATH_OUTPUT], test_dice, df_results
def split_dataset(initial_config):
"""
Args:
initial_config (dict): The original config file, which we use as a basis from which
to modify our hyperparameters.
.. code-block:: JSON
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/"
}
"""
loader_parameters = initial_config[ConfigKW.LOADER_PARAMETERS]
path_output = Path(initial_config[ConfigKW.PATH_OUTPUT])
if not path_output.is_dir():
logger.info(f'Creating output path: {path_output}')
path_output.mkdir(parents=True)
else:
logger.info(f'Output path already exists: {path_output}')
bids_df = BidsDataframe(loader_parameters, str(path_output), derivatives=True)
train_lst, valid_lst, test_lst = imed_loader_utils.get_new_subject_file_split(
df=bids_df.df,
data_testing=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.DATA_TESTING],
split_method=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.SPLIT_METHOD],
random_seed=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.RANDOM_SEED],
train_frac=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.TRAIN_FRACTION],
test_frac=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.TEST_FRACTION],
path_output="./",
balance=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.BALANCE] \
if SplitDatasetKW.BALANCE in initial_config[ConfigKW.SPLIT_DATASET] else None
)
# save the subject distribution
split_dct = {'train': train_lst, 'valid': valid_lst, 'test': test_lst}
split_path = "./" + "common_split_datasets.joblib"
joblib.dump(split_dct, split_path)
initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.FNAME_SPLIT] = split_path
return initial_config
def make_config_list(param_list, initial_config, all_combin, multi_params):
"""Create a list of config dictionaries corresponding to different hyperparameters.
Args:
param_list (list)(HyperparameterOption): A list of the different hyperparameter options.
initial_config (dict): The original config file, which we use as a basis from which
to modify our hyperparameters.
.. code-block:: JSON
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/"
}
all_combin (bool): If true, combine the hyperparameters combinatorically.
multi_params (bool): If true, combine the hyperparameters by index in the list, i.e.
all the first elements, then all the second elements, etc.
Returns:
list, dict: A list of configuration dictionaries, modified by the hyperparameters.
.. code-block:: python
config_list = [
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-loss={'name': 'DiceLoss'}"
},
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "FocalLoss", "gamma": 0.2, "alpha": 0.5}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-loss={'name': 'FocalLoss', 'gamma': 0.2, 'alpha': 0.5}"
},
# etc...
]
"""
config_list = []
if all_combin:
keys = set([hyper_option.base_key for hyper_option in param_list])
for combination in list(itertools.combinations(param_list, len(keys))):
if keys_are_unique(combination):
new_config = copy.deepcopy(initial_config)
path_output = new_config[ConfigKW.PATH_OUTPUT]
for hyper_option in combination:
new_config = update_dict(new_config, hyper_option.option, hyper_option.base_key)
folder_name_suffix = hyper_option.name
folder_name_suffix = folder_name_suffix.translate({ord(i): None for i in '[]}{ \''})
folder_name_suffix = folder_name_suffix.translate({ord(i): '-' for i in ':=,'})
path_output = path_output + folder_name_suffix
new_config[ConfigKW.PATH_OUTPUT] = path_output
config_list.append(new_config)
elif multi_params:
base_keys = get_base_keys(param_list)
base_key_dict = {key: [] for key in base_keys}
for hyper_option in param_list:
base_key_dict[hyper_option.base_key].append(hyper_option)
max_length = np.min([len(base_key_dict[base_key]) for base_key in base_key_dict.keys()])
for i in range(0, max_length):
new_config = copy.deepcopy(initial_config)
path_output = new_config[ConfigKW.PATH_OUTPUT]
for key in base_key_dict.keys():
hyper_option = base_key_dict[key][i]
new_config = update_dict(new_config, hyper_option.option, hyper_option.base_key)
folder_name_suffix = hyper_option.name
folder_name_suffix = folder_name_suffix.translate({ord(i): None for i in '[]}{ \''})
folder_name_suffix = folder_name_suffix.translate({ord(i): '-' for i in ':=,'})
path_output = path_output + folder_name_suffix
new_config[ConfigKW.PATH_OUTPUT] = path_output
config_list.append(new_config)
else:
for hyper_option in param_list:
new_config = copy.deepcopy(initial_config)
update_dict(new_config, hyper_option.option, hyper_option.base_key)
folder_name_suffix = hyper_option.name
folder_name_suffix = folder_name_suffix.translate({ord(i): None for i in '[]}{ \''})
folder_name_suffix = folder_name_suffix.translate({ord(i): '-' for i in ':=,'})
new_config[ConfigKW.PATH_OUTPUT] = initial_config[ConfigKW.PATH_OUTPUT] + folder_name_suffix
config_list.append(new_config)
return config_list
class HyperparameterOption:
"""Hyperparameter option to edit config dictionary.
This class is used to edit a standard config file. For example, say we want to edit the
following config file:
.. code-block:: JSON
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/"
}
Say we want to change the ``loss``. We could have:
.. code-block::
base_key = "loss"
base_option = {"name": "FocalLoss", "gamma": 0.5}
option = {"training_parameters": {"loss": {"name": "FocalLoss", "gamma": 0.5}}}
Attributes:
base_key (str): the key whose value you want to edit.
option (dict): the full tree path to the value you want to insert.
base_option (dict): the value you want to insert.
name (str): the name to be used for the output folder.
"""
def __init__(self, base_key=None, option=None, base_option=None):
self.base_key = base_key
self.option = option
self.base_option = base_option
self.name = None
self.create_name_str()
def __eq__(self, other):
return self.base_key == other.base_key and self.option == other.option
def create_name_str(self):
self.name = "-" + str(self.base_key) + "=" + str(self.base_option).replace("/", "_")
def get_param_list(my_dict, param_list, superkeys):
"""Recursively create the list of hyperparameter options.
Args:
my_dict (dict): A dictionary of parameters.
param_list (list)(HyperparameterOption): A list of HyperparameterOption objects.
superkeys (list)(str): TODO
Returns:
list, HyperparameterOption: A list of HyperparameterOption objects.
"""
for key, value in my_dict.items():
if type(value) is list:
for element in value:
dict_prev = {key: element}
for superkey in reversed(superkeys):
dict_new = {}
dict_new[superkey] = dict_prev
if len(superkeys) == 0:
dict_new = dict_prev
hyper_option = HyperparameterOption(base_key=key, option=dict_new,
base_option=element)
param_list.append(hyper_option)
else:
param_list = get_param_list(value, param_list, superkeys + [key])
return param_list
def update_dict(d, u, base_key):
"""Update a given dictionary recursively with a new sub-dictionary.
Example 1:
.. code-block:: python
d = {
'foo': {
'bar': 'some example text',
'baz': {'zag': 5}
}
}
u = {'foo': {'baz': {'zag': 7}}}
base_key = 'zag'
>>> print(update_dict(d, u, base_key))
{
'foo': {
'bar': 'some example text',
'baz': {'zag': 7}
}
}
Example 2:
.. code-block:: python
d = {
'foo': {
'bar': 'some example text',
'baz': {'zag': 5}
}
}
u = {'foo': {'baz': {'zag': 7}}}
base_key = 'foo'
>>> print(update_dict(d, u, base_key))
{
'foo': {
'baz': {'zag': 7}
}
}
Args:
d (dict): A dictionary to update.
u (dict): A subdictionary to update the original one with.
base_key (str): the string indicating which level to update.
Returns:
dict: An updated dictionary.
"""
for k, v in u.items():
if k == base_key:
d[k] = v
elif isinstance(v, collections.abc.Mapping):
d[k] = update_dict(d.get(k, {}), v, base_key)
else:
d[k] = v
return d
def keys_are_unique(hyperparam_list):
"""Check if the ``base_keys`` in a list of ``HyperparameterOption`` objects are unique.
Args:
hyperparam_list (list)(HyperparameterOption): a list of hyperparameter options.
Returns:
bool: True if all the ``base_keys`` are unique, otherwise False.
"""
keys = [item.base_key for item in hyperparam_list]
keys = set(keys)
return len(keys) == len(hyperparam_list)
def get_base_keys(hyperparam_list):
"""Get a list of base_keys from a param_list.
Args:
hyperparam_list (list)(HyperparameterOption): a list of hyperparameter options.
Returns:
base_keys (list)(str): a list of base_keys.
"""
base_keys_all = [hyper_option.base_key for hyper_option in hyperparam_list]
base_keys = []
for base_key in base_keys_all:
if base_key not in base_keys:
base_keys.append(base_key)
return base_keys
def format_results(results_df, config_list, param_list):
"""Merge config and results in a df."""
config_df = pd.DataFrame.from_dict(config_list)
keep = list(set([list(hyper_option.option.keys())[0] for hyper_option in param_list]))
keep.append(ConfigKW.PATH_OUTPUT)
config_df = config_df[keep]
results_df = config_df.set_index(ConfigKW.PATH_OUTPUT).join(results_df.set_index(ConfigKW.PATH_OUTPUT))
results_df = results_df.reset_index()
results_df = results_df.sort_values(by=['best_validation_loss'])
return results_df
def automate_training(file_config, file_config_hyper, fixed_split, all_combin, path_data=None,
n_iterations=1, run_test=False, all_logs=False, thr_increment=None,
multi_params=False, output_dir=None, plot_comparison=False):
"""Automate multiple training processes on multiple GPUs.
Hyperparameter optimization of models is tedious and time-consuming. This function automatizes
this optimization across multiple GPUs. It runs trainings, on the same training and validation
datasets, by combining a given set of parameters and set of values for each of these parameters.
Results are collected for each combination and reported into a dataframe to allow their
comparison. The script efficiently allocates each training to one of the available GPUs.
Usage Example::
ivadomed_automate_training -c config.json -p config_hyper.json -n n_iterations
.. csv-table:: Example of dataframe
:file: ../../images/detailed_results.csv
Config File:
The config file is the standard config file used in ``ivadomed`` functions. We use this
as the basis. We call a key of this config file a ``category``. In the example below,
we would say that ``training_parameters``, ``default_model``, and ``path_output`` are
``categories``.
.. code-block:: JSON
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/"
}
Hyperparameter Config File:
The hyperparameter config file should have the same layout as the config file. To select
a hyperparameter you would like to vary, just list the different options under the
appropriate key, which we call the ``base_key``. In the example below, we want to vary the
``loss``, ``depth``, and ``model_name``; these are our 3 ``base_keys``. As you can see,
we have listed our different options for these keys. For ``depth``, we have listed
``2``, ``3``, and ``4`` as our different options.
How we implement this depends on 3 settings: ``all_combin``, ``multi_param``,
or the default.
.. code-block:: JSON
{
"training_parameters": {
"loss": [
{"name": "DiceLoss"},
{"name": "FocalLoss", "gamma": 0.2, "alpha" : 0.5}
],
},
"default_model": {"depth": [2, 3, 4]},
"model_name": ["seg_sc_t2star", "find_disc_t1"]
}
Default:
The default option is to change only one parameter at a time relative to the base
config file. We then create a list of config options, called ``config_list``.
Using the examples above, we would have ``2 + 2 + 3 = 7`` different config options:
.. code-block:: python
config_list = [
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-loss={'name': 'DiceLoss'}"
},
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "FocalLoss", "gamma": 0.2, "alpha": 0.5}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-loss={'name': 'FocalLoss', 'gamma': 0.2, 'alpha': 0.5}"
},
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 2
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-depth=2"
},
# etc ...
]
All Combinations:
If we select the ``all_combin`` option, we will create a list of configuration options
combinatorically. Using the config examples above, we would have ``2 * 3 * 2 = 12``
different config options. I'm not going to write out the whole ``config_list`` because it's
quite long, but here are the combinations:
.. code-block::
loss = DiceLoss, depth = 2, model_name = "seg_sc_t2star"
loss = FocalLoss, depth = 2, model_name = "seg_sc_t2star"
loss = DiceLoss, depth = 3, model_name = "seg_sc_t2star"
loss = FocalLoss, depth = 3, model_name = "seg_sc_t2star"
loss = DiceLoss, depth = 4, model_name = "seg_sc_t2star"
loss = FocalLoss, depth = 4, model_name = "seg_sc_t2star"
loss = DiceLoss, depth = 2, model_name = "find_disc_t1"
loss = FocalLoss, depth = 2, model_name = "find_disc_t1"
loss = DiceLoss, depth = 3, model_name = "find_disc_t1"
loss = FocalLoss, depth = 3, model_name = "find_disc_t1"
loss = DiceLoss, depth = 4, model_name = "find_disc_t1"
loss = FocalLoss, depth = 4, model_name = "find_disc_t1"
Multiple Parameters:
The ``multi_params`` option entails changing all the first elements from the list,
then all the second parameters from the list, etc. If the lists are different lengths,
we will just use the first ``n`` elements. In our example above, the lists are of length
2 or 3, so we will only use the first 2 elements:
.. code-block::
loss = DiceLoss, depth = 2, model_name = "seg_sc_t2star"
loss = FocalLoss, depth = 3, model_name = "find_disc_t1"
Args:
file_config (string): Configuration filename, which is used as skeleton to configure the
training. This is the standard config file used in ``ivadomed`` functions. In the
code, we call the keys from this config file ``categories``.
Flag: ``--config``, ``-c``
file_config_hyper (string): json file containing parameters configurations to compare.
Parameter "keys" of this file need to match the parameter "keys" of `config` file.
Parameter "values" are in a list. Flag: ``--config-hyper``, ``-ch``
Example::
{"default_model": {"depth": [2, 3, 4]}}
fixed_split (bool): If True, all the experiments are run on the same
training/validation/testing subdatasets. Flag: ``--fixed-split``
all_combin (bool): If True, all parameters combinations are run. Flag: ``--all-combin``
n_iterations (int): Controls the number of time that each experiment (ie set of parameter)
are run. Flag: ``--n-iteration``, ``-n``
run_test (bool): If True, the trained model is also run on the testing subdataset and violiplots are displayed
with the dicescores for each new output folder created.
Flag: ``--run-test``
all_logs (bool): If True, all the log directories are kept for every iteration.
Flag: ``--all-logs``, ``-l``
thr_increment (float): A threshold analysis is performed at the end of the training
using the trained model and the validation sub-dataset to find the optimal binarization
threshold. The specified value indicates the increment between 0 and 1 used during the
ROC analysis (e.g. 0.1). Flag: ``-t``, ``--thr-increment``
multi_params (bool): If True, more than one parameter will be change at the time from
the hyperparameters. All the first elements from the hyperparameters list will be
applied, then all the second, etc.
output_dir (str): Path to where the results will be saved.
"""
if output_dir and not Path(output_dir).exists():
Path(output_dir).mkdir(parents=True)
if not output_dir:
output_dir = ""
# Load initial config
initial_config = imed_config_manager.ConfigurationManager(file_config).get_config()
if path_data is not None:
initial_config[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.PATH_DATA] = path_data
# Split dataset if not already done
if fixed_split and (initial_config.get(ConfigKW.SPLIT_PATH) is None):
initial_config = split_dataset(initial_config)
# Hyperparameters values to experiment
with Path(file_config_hyper).open(mode="r") as fhandle:
config_hyper = json.load(fhandle)
param_list = get_param_list(config_hyper, [], [])
config_list = make_config_list(param_list, initial_config, all_combin, multi_params)
# CUDA problem when forking process
# https://github.com/pytorch/pytorch/issues/2517
ctx = mp.get_context("spawn")
# Run all configs on a separate process, with a maximum of n_gpus processes at a given time
logger.info(initial_config[ConfigKW.GPU_IDS])
results_df = pd.DataFrame()
eval_df = pd.DataFrame()
all_mean = pd.DataFrame()
with ctx.Pool(processes=len(initial_config[ConfigKW.GPU_IDS])) as pool:
for i in range(n_iterations):
if not fixed_split:
# Set seed for iteration
seed = random.randint(1, 10001)
for config in config_list:
config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.RANDOM_SEED] = seed
if all_logs:
if i:
config[ConfigKW.PATH_OUTPUT] = config[ConfigKW.PATH_OUTPUT].replace("_n=" + str(i - 1).zfill(2),
"_n=" + str(i).zfill(2))
else:
config[ConfigKW.PATH_OUTPUT] += "_n=" + str(i).zfill(2)
validation_scores = pool.map(partial(train_worker, thr_incr=thr_increment), config_list)
val_df = pd.DataFrame(validation_scores, columns=[
'path_output', 'best_training_dice', 'best_training_loss', 'best_validation_dice',
'best_validation_loss'])
if run_test:
new_config_list = []
for config in config_list:
# Delete path_pred
path_pred = Path(config['path_output'], 'pred_masks')
if path_pred.is_dir() and n_iterations > 1:
try:
shutil.rmtree(str(path_pred))
except OSError as e:
logger.info(f"Error: {e.filename} - {e.strerror}.")
# Take the config file within the path_output because binarize_prediction may have been updated
json_path = Path(config[ConfigKW.PATH_OUTPUT], 'config_file.json')
new_config = imed_config_manager.ConfigurationManager(str(json_path)).get_config()
new_config[ConfigKW.GPU_IDS] = config[ConfigKW.GPU_IDS]
new_config_list.append(new_config)
test_results = pool.map(test_worker, new_config_list)
df_lst = []
# Merge all eval df together to have a single excel file
for j, result in enumerate(test_results):
df = result[-1]
if i == 0:
all_mean = df.mean(axis=0)
std_metrics = df.std(axis=0)
metrics = pd.concat([all_mean, std_metrics], sort=False, axis=1)
else:
all_mean = pd.concat([all_mean, df.mean(axis=0)], sort=False, axis=1)
mean_metrics = all_mean.mean(axis=1)
std_metrics = all_mean.std(axis=1)
metrics = | pd.concat([mean_metrics, std_metrics], sort=False, axis=1) | pandas.concat |
#........................................................................................................
# Title: Wikidata claims (statements) to natural language (a part of Triple2Text/Ontology2Text task)
# Author: <NAME>
# Email: <EMAIL>
# Lab: https://www.cic.ipn.mx
# Date: 12/2019
#........................................................................................................
from base import *
#from cluster_methods import *
from wiki_core import *
from read_write_file import *
from word2vec import *
from wiki2vec import *
from itertools import cycle
from collections import Counter
from sklearn.cluster import DBSCAN, OPTICS, MeanShift, AffinityPropagation, AgglomerativeClustering, SpectralClustering, Birch
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import TruncatedSVD, PCA, NMF, SparsePCA, FastICA
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import LocalOutlierFactor
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.manifold import TSNE
from sklearn.neighbors import NearestNeighbors
import numpy as np
import math
import re
import time
import matplotlib.pyplot as plt
from collections import Counter
import pandas as pd
from nltk import ngrams
import string
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
nlp = spacy.load("en_core_web_md")
definition_properties = {
'P26': ['spouse', 'wife', 'married', 'marry', 'marriage', 'partner', 'wedded', 'wed', 'wives', 'husbands', 'spouses', 'husband'],
'P39': ['position', 'political', 'object', 'seat', 'public', 'office', 'subject', 'formerly', 'holds', 'currently', 'held', 'occupied'],
'P54': ['sports', 'teams', 'clubs', 'member', 'team', 'played', 'plays', 'club', 'player'],
'P69': ['educated', 'educational', 'institution', 'attended', 'subject', 'alma', 'mater', 'education',
'alumni', 'alumnus', 'alumna', 'college', 'university', 'school', 'studied', 'graduate', 'graduated', 'faculty'],
'P108': ['employer', 'person', 'organization', 'subject', 'works', 'worked', 'workplace', 'employed', 'working', 'place'],
'P166': ['work', 'awarded', 'won', 'medals', 'creative', 'person', 'awards', 'win', 'winner', 'honors', 'received', 'award',
'prize', 'title', 'recognition', 'honorary', 'honours', 'organisation'],
'P6': ['first', 'chancellor', 'prime', 'minister', 'government', 'mayor', 'state', 'executive', 'town', 'national', 'other', 'power', 'head', 'municipality', 'country', 'premier', 'body', 'governor', 'heading', 'city', 'headed', 'governmental', 'president'],
'P17': ['human', 'host', 'used', 'beings', 'item', 'sovereign', 'country', 'nation', 'land', 'state'],
'P22': ['daughter', 'daddy', 'subject', 'dad', 'male', 'stepfather', 'stepparent', 'son', 'father', 'child', 'parent'],
'P27': ['national', 'subject', 'nationality', 'country', 'citizenship', 'object', 'citizen', 'recognizes'],
'P31': ['example', 'main', 'member', 'class', 'individual', 'unsubclassable', 'subject', 'instance', 'occurrence', 'unsubtypable', 'distinct', 'uninstantiable', 'non', 'specific', 'unique', 'rdf', 'unsubclassifiable', 'element', 'unitary', 'type', 'particular'],
'P35': ['highest', 'king', 'authority', 'governor', 'queen', 'chief', 'monarch', 'head', 'official', 'country', 'headed', 'emperor', 'leader', 'formal', 'state', 'president'],
'P101': ['specialism', 'specialization', 'speciality', 'studies', 'FOW', 'organization', 'field', 'researcher', 'area', 'work', 'fields', 'person', 'academic', 'research', 'occupation', 'activity', 'subject', 'domain', 'scientific', 'discipline', 'responsible', 'conduct', 'see', 'study'],
'P103': ['languages', 'mother', 'person', 'language', 'native', 'learned', 'first', 'L1', 'speaker', 'tongue', 'birth'],
'P106': ['work', 'position', 'held', 'person', 'employment', 'craft', 'occupation', 'profession', 'career', 'field', 'job'],
'P108': ['person', 'employed', 'workplace', 'employer', 'working', 'works', 'subject', 'worked', 'place', 'organization'],
'P131': ['district', 'administrative', 'arrondissement', 'rural', 'territorial', 'entity', 'happens', 'village', 'region', 'following', 'territory', 'item', 'Indian', 'local', 'shire', 'government', 'area', 'based', 'borough', 'department', 'state', 'reservation', 'town', 'commune', 'unit', 'places', 'province', 'reserve', 'municipality', 'settlement', 'ward', 'county', 'prefecture', 'non', 'locations', 'parish', 'items', 'principal', 'location', 'voivodeship', 'locality', 'specifying', 'city', 'events', 'located'],
'P155': ['comes', 'offices', 'prequel', 'preceding', 'prior', 'replaces', 'split', 'sequel', 'item', 'successor', 'immediately', 'follows', 'before', 'series', 'subject', 'replaced', 'political', 'use', 'preceded', 'part', 'succeeds', 'previous', 'predecessor'],
'P156': ['comes', 'offices', 'prequel', 'part', 'sequel', 'following', 'item', 'successor', 'succeeded', 'immediately', 'followed', 'before', 'preceeds', 'series', 'subject', 'precedes', 'replaced', 'political', 'next', 'use', 'succeded'],
'P184': ['PhD', 'supervisor', 'doctorate', 'thesis', 'promotor', 'advisor', 'subject', 'doctoral', 'supervised'],
'P276': ['administrative', 'case', 'entity', 'region', 'physical', 'venue', 'event', 'item', 'place', 'area', 'based', 'object', 'held', 'feature', 'neighborhood', 'distinct', 'origin', 'terrain', 'location', 'use', 'located', 'moveable'],
'P407': ['broadcasting', 'audio', 'signed', 'URL', 'written', 'languages', 'associated', 'used', 'such', 'name', 'language', 'native', 'available', 'text', 'website', 'work', 'creative', 'named', 'reference', 'spoken', 'websites', 'songs', 'persons', 'use', 'shows', 'books'],
'P413': ['specialism', 'position', 'played', 'player', 'speciality', 'team', 'fielding'],
'P453': ['filled', 'specific', 'played', 'cast', 'subject', 'role', 'use', 'plays', 'acting', 'qualifier', 'character', 'member', 'actor', 'only', 'voice'],
'P512': ['person', 'academic', 'degree', 'holds', 'diploma'],
'P570': ['date', 'died', 'dead', 'subject', 'deathdate', 'year', 'death', 'end', 'DOD'],
'P571': ['introduced', 'commenced', 'defined', 'commencement', 'existence', 'point', 'came', 'time', 'creation', 'formation', 'first', 'inception', 'founded', 'written', 'founding', 'built', 'created', 'constructed', 'foundation', 'when', 'inititated', 'date', 'dedication', 'subject', 'establishment', 'issue', 'start', 'inaugurated', 'launch', 'introduction', 'launched', 'formed', 'construction', 'year', 'incorporated', 'incorporation', 'completed', 'established'],
'P577': ['work', 'date', 'airdate', 'dop', 'released', 'point', 'air', 'initial', 'pubdate', 'time', 'publication', 'first', 'year', 'published', 'release', 'when'],
'P580': ['start', 'starting', 'began', 'statement', 'date', 'introduced', 'introduction', 'begins', 'item', 'started', 'beginning', 'exist', 'time', 'valid', 'starttime', 'starts', 'building'],
'P582': ['ending', 'ceases', 'indicates', 'divorced', 'cease', 'left', 'time', 'closed', 'end', 'endtime', 'operation', 'item', 'date', 'stop', 'statement', 'office', 'dissolved', 'ends', 'stops', 'valid', 'being', 'exist', 'fall', 'completed'],
'P585': ['date', 'statement', 'event', 'existed', 'point', 'something', 'place', 'true', 'time', 'year', 'took', 'when'],
'P642': ['stating', 'statement', 'item', 'scope', 'qualifier', 'applies', 'particular'],
'P669': ['road', 'add', 'street', 'square', 'item', 'number', 'where', 'use', 'address', 'qualifier', 'there', 'property', 'located'],
'P708': ['church', 'types', 'archdiocese', 'division', 'administrative', 'other', 'diocese', 'ecclesiastical', 'use', 'entities', 'bishopric', 'belongs', 'element', 'territorial', 'archbishopric'],
'P735': ['forename', 'family', 'Christian', 'person', 'used', 'names', 'middle', 'values', 'name', 'should', 'link', 'first', 'disambiguations', 'property', 'given', 'personal'],
'P748': ['person', 'appointed', 'used', 'can', 'office', 'qualifier'],
'P768': ['district', 'seat', 'electoral', 'area', 'candidacy', 'representing', 'held', 'Use', 'election', 'riding', 'person', 'office', 'ward', 'position', 'being', 'contested', 'qualifier', 'constituency', 'electorate'],
'P805': ['dedicated', 'identified', 'statement', 'qualifying', 'item', 'describes', 'subject', 'artfor', 'article', 'relation', 'claim'],
'P811': ['college', 'someone', 'studied', 'academic', 'minor', 'university'],
'P812': ['college', 'someone', 'studied', 'academic', 'major', 'subject', 'university', 'field', 'study'],
'P828': ['due', 'causes', 'has', 'result', 'ultimate', 'had', 'why', 'ultimately', 'implied', 'thing', 'reason', 'effect', 'underlying', 'outcome', 'resulted', 'originated', 'caused', 'cause', 'initial'],
'P937': ['work', 'workplace', 'persons', 'working', 'activity', 'where', 'location', 'place', 'active'],
'P1001': ['value', 'institution', 'has', 'territorial', 'jurisdiction', 'item', 'linked', 'law', 'applied', 'state', 'statement', 'office', 'power', 'country', 'municipality', 'valid', 'belongs', 'applies', 'public'],
'P1013': ['respect', 'basis', 'used', 'according', 'made', 'criterion', 'reference', 'criteria', 'respectively', 'property', 'distinction', 'based', 'classification', 'by'],
'P1066': ['pupil', 'master', 'person', 'academic', 'disciple', 'supervisor', 'teacher', 'professor', 'studied', 'has', 'mentor', 'advisor', 'taught', 'student', 'tutor'],
'P1264': ['applicability', 'statement', 'validity', 'period', 'time', 'valid', 'applies', 'when'],
'P1268': ['represents', 'entity', 'organization', 'organisation', 'individual'],
'P1350': ['pitched', 'number', 'played', 'races', 'games', 'matches', 'team', 'appearances', 'caps', 'starts', 'gp', 'sports', 'mp'],
'P1351': ['scored', 'used', 'event', 'number', 'league', 'participant', 'points', 'goals', 'qualifier', 'GF', 'score', 'set', 'match', 'use'],
'P1365': ['replaces', 'structures', 'identical', 'item', 'successor', 'continues', 'forefather', 'follows', 'holder', 'person', 'job', 'replaced', 'structure', 'preceded', 'supersedes', 'succeeds', 'previous', 'predecessor'],
'P1366': ['adds', 'role', 'identical', 'item', 'heir', 'successor', 'succeeded', 'continues', 'superseded', 'followed', 'dropping', 'holder', 'person', 'other', 'series', 'job', 'replaced', 'next', 'replacing', 'continued', 'mediatised', 'books'],
'P1534': ['date', 'ending', 'specify', 'together', 'use', 'qualifier', 'cause', 'ended', 'end', 'reason'],
'P1642': ['status', 'transaction', 'player', 'acquisition', 'acquired', 'team', 'how', 'qualifier', 'member', 'loan', 'contract', 'sports'],
'P1686': ['work', 'awarded', 'nominated', 'received', 'award', 'qualifier', 'citation', 'creator', 'given'],
'P1706': ['item', 'together', 'award', 'tied', 'feat', 'qualifier', 'featuring', 'property', 'shared', 'accompanied', 'specify'],
'P2389': ['leads', 'person', 'directed', 'office', 'head', 'heads', 'directs', 'leader', 'organization', 'runs', 'organisation', 'led'],
'P2578': ['learning', 'research', 'academic', 'item', 'working', 'study', 'subject', 'studies', 'researches', 'property', 'object', 'field', 'studying', 'scholarly'],
'P2715': ['election', 'position', 'reelection', 'confirmed', 'person', 'statements', 'gained', 'qualifier', 'link', 'elected', 'held'],
'P2842': ['wedding', 'location', 'where', 'place', 'spouse', 'celebrated', 'marriage', 'property', 'married'],
'P2868': ['value', 'duty', 'function', 'context', 'has', 'role', 'title', 'purpose', 'generic', 'item', 'acting', 'identity', 'character', 'object', 'statement', 'subject', 'roles', 'job', 'use'],
'P3831': ['value', 'generic', 'statement', 'context', 'specifically', 'circumstances', 'item', 'employment', 'subject', 'role', 'identity', 'use', 'qualifier', 'object'],
'P4100': ['parliament', 'group', 'faction', 'belongs', 'parliamentary', 'member', 'party'],
'P1319': ['date', 'earliest']
}
# load corpus from property name
def load_corpus(file_name, word2vec_file_name, property_name, delimiter='#', dtype=dtypes, trained=False, idf_dict_status=False):
df = pd.read_csv(file_name, delimiter='#', dtype=dtype, usecols=list(dtype))
best_sentences, best_rows = get_best_sentences(df, show=False)
labeled_sen_list = df['labeled_sentence_2']
counter = create_ngram(labeled_sen_list, 1) # unigram
idf_dict = {}
if (idf_dict_status == True):
idf_dict = create_idf_dict(labeled_sen_list)
word_corpus = create_true_distribution_corpus2(labeled_sen_list, 0)
if (trained == True):
word2vec_train(word2vec_file_name, property_name, word_corpus)
# load models
local_model = load_word2vec(word2vec_file_name)
global_model = load_wiki2vec('D:\wiki-news-300d-1M.vec', 200000)
result_dict = {}
result_dict['file_name'] = file_name
result_dict['sen_list'] = df
result_dict['best_sentences'] = best_sentences
result_dict['labeled_sen_list'] = labeled_sen_list
result_dict['counter'] = counter
result_dict['idf_dict'] = idf_dict
result_dict['word_corpus'] = word_corpus
result_dict['local_model'] = local_model
result_dict['global_model'] = global_model
print('Loading corpus was done!!!')
return result_dict
# some basic statistics
def basic_statistics(file_name, delimiter='#', dtype=dtypes, best_sentence = False):
print('file_name: ', file_name)
#sen_list = read_from_csv_file(file_name, '#', 'all')[1:] # remove headers
df = pd.read_csv(file_name, delimiter=delimiter, dtype=dtype, usecols=list(dtype))
average_sentence_length(df)
average_word(df)
average_token(df)
average_token_labeled_sentence(df)
ratio_token_per_quad(df)
ratio_token_per_quad_item(df)
if (best_sentence == True):
print('++ Best sentences statistics')
labeled_list, df2 = get_best_sentences(df)
#print(len(labeled_list))
average_sentence_length(df2)
average_word(df2)
average_token(df2)
average_token_labeled_sentence(df2)
ratio_token_per_quad(df2)
ratio_token_per_quad_item(df2)
print('.............................')
print('.............................')
# cumulative rate by property
def cumulative_rate_by_property(property_name, df):
length_list = []
for index, row in df.iterrows():
#print(property_name, row['length'])
if (row['predicate'].lower() == property_name.lower()):
length_list.append(int(row['length']))
elif (property_name == 'common'): # count all properties
length_list.append(int(row['length']))
#file_name, sen_list, best_sentences, labeled_sen_list, counter, idf_dict, word_corpus, local_model, global_model = load_corpus(property_name)
#sentences, number_redundant_word_list, redundant_word_list = get_corpus_redundant_words(sen_list)
#print('length_list: ', length_list)
rank_list = rank_sentence_by_redundant_words(length_list)
cumulative_list = cumulative_rate(rank_list)
#print('rank_list: ', rank_list)
return cumulative_list
def treat_labeled_items2():
prefixes = list(nlp.Defaults.prefixes)
prefixes.remove('\\[')
prefix_regex = spacy.util.compile_prefix_regex(prefixes)
nlp.tokenizer.prefix_search = prefix_regex.search
suffixes = list(nlp.Defaults.suffixes)
suffixes.remove('\\]')
suffix_regex = spacy.util.compile_suffix_regex(suffixes)
nlp.tokenizer.suffix_search = suffix_regex.search
infixes = list(nlp.Defaults.prefixes)
infixes.remove('\\[')
infixes.remove('\\]')
try:
infixes.remove('\\-')
except Exception as e:
pass
try:
infixes.remove(':')
except Exception as e:
pass
try:
infixes.remove('_')
except Exception as e:
pass
infix_regex = spacy.util.compile_infix_regex(infixes)
nlp.tokenizer = Tokenizer(nlp.vocab, infix_finditer=infix_regex.finditer)
# create n-gram from text
def ngram(text, n):
# make n-gram and also count the frequency of each item by Counter
treat_labeled_items2()
doc = nlp(text)
temp = [token.text for token in doc if token.text != '']
return list(ngrams(temp, n))
# create n-gram from list
def create_ngram(sentence_list, n):
temp = []
for sentence in sentence_list:
sentence = "[start] " + sentence + " [end]"
temp += (ngram(sentence, n))
return Counter(temp)
# filter by property name
def filter_by_property(property_name, sen_list):
#property_list = ['P26','P39','P54','P69','P108','P166']
result_list = []
for p in sen_list[1:]: # start with data in line 1 (not headers)
if (p[2] == property_name):
result_list.append(p)
result_list = sorted(result_list, key = lambda x: (int(x[2][1:]), x[4])) # sort by qualifier
return result_list
# write file from list
def write_file_from_list(file_name, sen_list):
with open(file_name,'w', newline='', encoding='utf-8') as f:
wr = csv.writer(f, delimiter='#', quoting=csv.QUOTE_MINIMAL)
for p in sen_list:
print(p)
wr.writerow(p)
# average length per raw sentence
def average_sentence_length(df):
al = 0
for index, row in df.iterrows():
#print('row: ', row)
al += len(row['raw_sentence'])
print('average_sentence_length: ', al/len(df))
return al/len(df)
# average word per raw sentence
def average_word(df):
al = 0
for index, row in df.iterrows():
doc = nlp(row['raw_sentence'])
# words = [token.text for token in doc if token.is_punct != True]
al += len(row['raw_sentence'].split())
print('average_word: ', al/len(df))
return al/len(df)
# average token per raw sentence
def average_token(df):
al = 0
for index, row in df.iterrows():
doc = nlp(row['raw_sentence'])
al += doc.__len__()
print('average_token: ', al/len(df))
return al/len(df)
# average token per labeled sentence
def average_token_labeled_sentence(df):
al = 0
treat_labeled_items() # treat a labeled item as a token
for index, row in df.iterrows():
doc = nlp(row['labeled_sentence_2'])
al += doc.__len__()
print('average_token_labeled_sentence: ', al/len(df))
return al/len(df)
# ratio of token per quad (labeled sentence)
def ratio_token_per_quad(df):
treat_labeled_items() # treat a labeled item as a token
tokens = 0
quads = len(df) # 1 quad in 1 sentence
for index, row in df.iterrows():
doc = nlp(row['labeled_sentence_2'])
tokens += doc.__len__()
print('ratio_token_per_quad: ', tokens/quads)
return tokens/quads
# ratio of token per quad item (labeled sentence)
def ratio_token_per_quad_item(df):
treat_labeled_items() # treat a labeled item as a token
tokens = 0
quad_items = 0
for index, row in df.iterrows():
doc = nlp(row['labeled_sentence_2'])
temp_quads = len(row['order_2'].split(','))
tokens += doc.__len__() - temp_quads
quad_items += temp_quads
print('ratio_token_per_quad_item: ', tokens/quad_items)
return tokens/quad_items
# get the best sentences: no redundant words (except stop words & a verb as ROOT)
def get_best_sentences(df, show=False):
treat_labeled_items2() # treat a labeled item as a token
best_sentence_list = []
best_row_list = []
columns = []
if (len(df) != 0):
columns = [index for index, val in df.iloc[0].iteritems()]
for index, row in df.iterrows():
doc = nlp(row['labeled_sentence_2'])
redudant_list = []
temp_quads = [x.strip() for x in row['order_2'].split(',')]
for token in doc:
if (token.pos_ == "X"):
continue
if (token.pos_ == "PUNCT"):
continue
if (token.pos_ == "CCONJ"):
continue
if (token.pos_ == "ADP"):
continue
if (token.pos_ == "PRON"):
continue
if (token.pos_ == "PART"):
continue
if (token.pos_ == "DET"):
continue
if (token.dep_ == "punct"):
continue
if (token.text not in temp_quads):
redudant_list.append([token.text, token.pos_, token.dep_])
#print(token.text, token.pos_, token.dep_)
if (len(redudant_list) == 1):
if (redudant_list[0][2] == "ROOT"): # token.pos_
if (row['labeled_sentence_2'] not in best_sentence_list):
best_sentence_list.append(row['labeled_sentence_2']) # add the labeled sentence only
best_row_list.append([val for index, val in row.iteritems()]) # add a whole row
if (show != False):
print('..............................')
print('..............................')
print('Best sentences:')
for s in best_sentence_list:
print(s)
print('-----------')
print('..............................')
print('..............................')
# convert to dataframe
df = pd.DataFrame(best_row_list, columns=columns)
#print('df: ', df)
return best_sentence_list, df
# get redundant words in labeled sentences
def get_redundant_words(sen_row):
redudant_list = []
treat_labeled_items2()
doc = nlp(sen_row['labeled_sentence_2'])
quad_items = get_quad_items(sen_row)
for token in doc:
if (token.pos_ == "X"):
continue
if (token.pos_ == "PUNCT"):
continue
if (token.pos_ == "CCONJ"):
continue
if (token.pos_ == "ADP"):
continue
if (token.pos_ == "PRON"):
continue
if (token.pos_ == "PART"):
continue
if (token.pos_ == "DET"):
continue
if (token.dep_ == "punct"):
continue
if (token.text not in quad_items and token.text.strip() != ''):
#redudant_list.append([token.text, token.pos_, token.dep_])
redudant_list.append(token.text)
return redudant_list
# train corpus using CBOW
def word2vec_train(word2vec_file, property_name, corpus):
# save_word2vec(corpus, min_count, size, window, sorted_vocab, sg, workers, iters, file_name)
if (property_name == 'p26'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 10, word2vec_file)
if (property_name == 'p108'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 30, word2vec_file)
if (property_name == 'p69'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 20, word2vec_file)
if (property_name == 'p166'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 20, word2vec_file)
if (property_name == 'p54'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 5, word2vec_file)
if (property_name == 'p39'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 25, word2vec_file)
if (property_name == 'common'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 3, word2vec_file)
if (property_name == 'common2'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 3, word2vec_file)
# get quad items in a sentence
def get_quad_items(sen_row):
quad_items = []
quad_items = [x.strip() for x in sen_row['order_2'].split(',')]
return quad_items
# get important quad items in a sentence
def get_important_quad_items(sen_row):
quad_items = []
quad_items = [x.strip() for x in sen_row['order_2'].split(',')]
quad_items = list(set(quad_items) - set(['[det:the]','[det:a-an]','[s:poss]'])) # remove unimportant terms
return quad_items
# get qualifier quad items in a sentence
def get_qualifier_items(sen_row):
quad_items = []
quad_items = [x.strip() for x in sen_row['order_2'].split(',')]
qualifier_list = []
for q in quad_items:
if ('qualifier' in q and 'o0' in q): # get qualifiers of o0 (main object or first object)
qualifier_list.append(q)
#print('qualifier_list: ', qualifier_list)
return qualifier_list
# convert sentence to measures (tf, idf, local_distance, global_distance, vector, etc) & write to a result file
def convert_sentence_to_measures(output_file_name, sen_row, best_sentences, local_model, global_model, counter, idf_dict):
#print('sen_row: ', sen_row)
# redundant words
redundant_words = get_redundant_words(sen_row)
length = len(redundant_words)
sentence = redundant_words # check redundant words only
# best sentence
label = ''
if (sen_row['labeled_sentence_2'] in best_sentences): label = 'x'
# sum & product
tf1, tf2 = convert_sentence_to_tf(sentence, local_model, counter)
idf1, idf2 = convert_sentence_to_idf(sentence, idf_dict)
local1, local2 = convert_sentence_to_local_distance(sen_row, sentence, local_model, counter)
global1, global2 = convert_sentence_to_global_distance(sen_row, sentence, global_model)
# combination
tf_idf1, tf_idf2 = convert_sentence_to_tf_idf(sentence, local_model, counter, idf_dict)
local_tf1, local_tf2 = convert_sentence_to_local_tf_distance(sen_row, sentence, local_model, counter)
local_idf1, local_idf2 = convert_sentence_to_local_idf_distance(sen_row, sentence, local_model, counter, idf_dict)
local_tf_idf1, local_tf_idf2 = convert_sentence_to_local_tf_idf_distance(sen_row, sentence, local_model, counter, idf_dict)
global_tf1, global_tf2 = convert_sentence_to_global_tf_distance(sen_row, sentence, global_model, counter, qualifier=False)
global_idf1, global_idf2 = convert_sentence_to_global_idf_distance(sen_row, sentence, global_model, idf_dict, qualifier=False)
global_tf_idf1, global_tf_idf2 = convert_sentence_to_global_tf_idf_distance(sen_row, sentence, global_model, counter, idf_dict,
qualifier=False)
# global with qualifier
global_qualifier1, global_qualifier2 = convert_sentence_to_global_distance(sen_row, sentence, global_model, qualifier=True)
global_qualifier_tf1, global_qualifier_tf2 = convert_sentence_to_global_tf_distance(sen_row, sentence, global_model, counter, qualifier=True)
global_qualifier_idf1, global_qualifier_idf2 = convert_sentence_to_global_idf_distance(sen_row, sentence, global_model, idf_dict, qualifier=True)
global_qualifier_tf_idf1, global_qualifier_tf_idf2 = convert_sentence_to_global_tf_idf_distance(sen_row, sentence, global_model, counter, idf_dict,
qualifier=True)
# vector
vector_sum, vector_product = convert_sentence_to_vector(sentence, local_model) # base on local_model
# add results to sen_row
temp_list = [label, redundant_words, length, tf1, tf2, idf1, idf2, local1, local2, global1, global2, tf_idf1, tf_idf2, local_tf1,
local_tf2, local_idf1, local_idf2, local_tf_idf1, local_tf_idf2, global_tf1, global_tf2, global_idf1, global_idf2,
global_tf_idf1, global_tf_idf2, global_qualifier1, global_qualifier2, global_qualifier_tf1, global_qualifier_tf2,
global_qualifier_idf1, global_qualifier_idf2, global_qualifier_tf_idf1, global_qualifier_tf_idf2]
sen_row = sen_row.values.tolist()
sen_row.extend(temp_list)
write_to_csv_file(output_file_name, '#', sen_row)
# count average distance of a word to other words in a sentence (use important terms)
def convert_sentence_to_global_distance(sen_row, sentence, global_model, qualifier=False):
predicate = sen_row[2]
#print('predicate: ', predicate)
definition_word_list = []
if (predicate in definition_properties):
definition_word_list = definition_properties[predicate]
if (qualifier == True):
qualifiers = sen_row[4].split('-')
for q in qualifiers:
if (q in definition_properties):
definition_word_list += definition_properties[q]
length = len(sentence)
sum_dist = 0
product_dist = 1
def_length = len(definition_word_list)
if (def_length == 0): return 0, 0
for w in sentence:
temp_sum = 0
for item in definition_word_list:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = (global_model.similarity(w, item) + 1)/2
temp_sum += sim
#print('w, item: ', w, item)
#print('sim: ', sim)
except:
pass
if (temp_sum == 0): continue
'''print('temp_sum: ', temp_sum)
print('def_length: ', def_length)
print('...............')
print('...............')'''
sum_dist += temp_sum/def_length
product_dist *= -math.log(temp_sum/def_length)
# return sum_dist, math.log(product_dist + 1)
return sum_dist, -math.log(product_dist)
# count average distance of a word to other words in a sentence (use important terms)
def convert_sentence_to_global_tf_idf_distance(sen_row, sentence, global_model, counter, idf_dict, qualifier=False):
predicate = sen_row[2]
#print('predicate: ', predicate)
definition_word_list = []
if (predicate in definition_properties):
definition_word_list = definition_properties[predicate]
if (qualifier == True):
qualifiers = sen_row[4].split('-')
for q in qualifiers:
if (q in definition_properties):
definition_word_list += definition_properties[q]
length = len(sentence)
sum_dist = 0
product_dist = 1
def_length = len(definition_word_list)
if (def_length == 0): return 0, 0
for w in sentence:
temp_sum = 0
for item in definition_word_list:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = (global_model.similarity(w, item) + 1)/2
temp_sum += sim
#print('w, item: ', w, item)
#print('sim: ', sim)
except:
pass
if (temp_sum == 0): continue
'''print('temp_sum: ', temp_sum)
print('def_length: ', def_length)
print('...............')
print('...............')'''
idf = get_idf(idf_dict, w) # inverse topic frequency
tf = get_tf(counter, w) # term frequency
sum_dist += (temp_sum*idf*tf)/def_length
product_dist *= math.log(1 + (temp_sum*idf*tf)/def_length)
# return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# count average distance of a word to other words in a sentence (use important terms)
def convert_sentence_to_global_idf_distance(sen_row, sentence, global_model, idf_dict, qualifier=False):
predicate = sen_row[2]
#print('predicate: ', predicate)
definition_word_list = []
if (predicate in definition_properties):
definition_word_list = definition_properties[predicate]
if (qualifier == True):
qualifiers = sen_row[4].split('-')
for q in qualifiers:
if (q in definition_properties):
definition_word_list += definition_properties[q]
length = len(sentence)
sum_dist = 0
product_dist = 1
def_length = len(definition_word_list)
if (def_length == 0): return 0, 0
for w in sentence:
temp_sum = 0
for item in definition_word_list:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = (global_model.similarity(w, item) + 1)/2
temp_sum += sim
#print('w, item: ', w, item)
#print('sim: ', sim)
except:
pass
if (temp_sum == 0): continue
'''print('temp_sum: ', temp_sum)
print('def_length: ', def_length)
print('...............')
print('...............')'''
idf = get_idf(idf_dict, w) # inverse topic frequency
sum_dist += (temp_sum*idf)/def_length
product_dist *= math.log(1 + (temp_sum*idf)/def_length)
# return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# count average distance of a word to other words in a sentence (use important terms)
def convert_sentence_to_global_tf_distance(sen_row, sentence, global_model, counter, qualifier=False):
predicate = sen_row[2]
#print('predicate: ', predicate)
definition_word_list = []
if (predicate in definition_properties):
definition_word_list = definition_properties[predicate]
if (qualifier == True):
qualifiers = sen_row[4].split('-')
for q in qualifiers:
if (q in definition_properties):
definition_word_list += definition_properties[q]
length = len(sentence)
sum_dist = 0
product_dist = 1
def_length = len(definition_word_list)
if (def_length == 0): return 0, 0
for w in sentence:
temp_sum = 0
for item in definition_word_list:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = (global_model.similarity(w, item) + 1)/2
temp_sum += sim
#print('w, item: ', w, item)
#print('sim: ', sim)
except:
pass
if (temp_sum == 0): continue
'''print('temp_sum: ', temp_sum)
print('def_length: ', def_length)
print('...............')
print('...............')'''
tf = get_tf(counter, w) # term frequency
sum_dist += (temp_sum*tf)/def_length
product_dist *= math.log(1 + (temp_sum*tf)/def_length)
#print('---', (temp_sum*tf)/def_length, math.log((temp_sum*tf)/def_length))
#print('product_dist: ', product_dist)
# return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# convert sentence to a vector distance (similarity)
def convert_sentence_to_local_distance(sen_row, sentence, local_model, counter):
length = len(sentence)
sum_dist = 0
product_dist = 1
quad_items = get_important_quad_items(sen_row)
quad_length = len(quad_items)
for w in sentence:
temp_sum = 0
for item in quad_items:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = ((local_model.similarity(w, item) + 1)/2)
temp_sum += sim
except:
pass
if (temp_sum == 0): continue
sum_dist += temp_sum/quad_length
product_dist *= -math.log(temp_sum/quad_length)
#return sum_dist, math.log(product_dist + 1)
return sum_dist, -math.log(product_dist)
# convert sentence to local-tf
def convert_sentence_to_local_tf_distance(sen_row, sentence, local_model, counter):
length = len(sentence)
sum_dist = 0
product_dist = 1
quad_items = get_important_quad_items(sen_row)
quad_length = len(quad_items)
for w in sentence:
temp_sum = 0
for item in quad_items:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = ((local_model.similarity(w, item) + 1)/2)
temp_sum += sim
except:
pass
if (temp_sum == 0): continue
tf = get_tf(counter, w) # term frequency
sum_dist += (temp_sum*tf)/quad_length
product_dist *= math.log(1 + (temp_sum*tf)/quad_length)
#print('---', (temp_sum*tf)/quad_length, math.log((temp_sum*tf)/quad_length))
#return sum_dist, math.log(product_dist + 1)
#print('product_dist: ', product_dist)
return sum_dist, math.log(product_dist + 1)
# convert sentence to local-idf
def convert_sentence_to_local_idf_distance(sen_row, sentence, local_model, counter, idf_dict):
length = len(sentence)
sum_dist = 0
product_dist = 1
quad_items = get_important_quad_items(sen_row)
quad_length = len(quad_items)
for w in sentence:
temp_sum = 0
for item in quad_items:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = ((local_model.similarity(w, item) + 1)/2)
temp_sum += sim
except:
pass
if (temp_sum == 0): continue
idf = get_idf(idf_dict, w) # inverse topic frequency
sum_dist += (temp_sum*idf)/quad_length
product_dist *= math.log(1 + (temp_sum*idf)/quad_length)
#return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# convert sentence to local-tf-idf
def convert_sentence_to_local_tf_idf_distance(sen_row, sentence, local_model, counter, idf_dict):
length = len(sentence)
sum_dist = 0
product_dist = 1
quad_items = get_important_quad_items(sen_row)
quad_length = len(quad_items)
for w in sentence:
temp_sum = 0
for item in quad_items:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = ((local_model.similarity(w, item) + 1)/2)
temp_sum += sim
except:
pass
if (temp_sum == 0): continue
tf = get_tf(counter, w) # term frequency
idf = get_idf(idf_dict, w) # inverse topic frequency
sum_dist += (temp_sum*tf*idf)/quad_length
product_dist *= math.log(1 + (temp_sum*tf*idf)/quad_length)
#return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# convert sentence to tf-idf
def convert_sentence_to_tf_idf(sentence, model, counter, idf_dict):
length = len(sentence)
sum_score = 0
product_score = 1
for w in sentence:
try:
tf = get_tf(counter, w)
idf = get_idf(idf_dict, w)
sum_score += tf*idf
product_score *= tf*idf
except:
pass
return sum_score, math.log(product_score + 1)
# convert sentence to term frequency
def convert_sentence_to_tf(sentence, model, counter):
#length = len(sentence)
sum_score = 0
product_score = 1
for w in sentence:
try:
score = get_tf(counter, w)
#print('---', score)
sum_score += score
product_score *= score
except:
pass
#print('product_score: ', product_score)
return sum_score, math.log(product_score)
# convert sentence to term frequency
def convert_sentence_to_idf(sentence, idf_dict):
length = len(sentence)
sum_score = 0
product_score = 1
for w in sentence:
try:
score = get_idf(idf_dict, w)
sum_score += score
product_score *= score
except:
pass
return sum_score, math.log(product_score + 1)
# convert sentence to vector
def convert_sentence_to_vector(sentence, model):
length = len(sentence)
sum_vec = 1
product_vec = 1
for w in sentence:
try:
w_vec = model.get_vector(w)
sum_vec += w_vec
product_vec *= w_vec
except:
pass
return sum_vec, product_vec
# convert corpus to vector
def convert_corpus_to_vector(corpus, best_sentences, model, counter):
label_list = []
vector_list = []
i = 0
# note that a sentence is a list of words
for sentence in corpus:
# convert back to a string sentence
temp = ' '.join(e for e in sentence[1:-1]) # remove [start], [end]
if (temp in best_sentences):
label_list.append('x')
else:
label_list.append('')
sum_vector, product_vector = convert_sentence_to_vector(sentence, model, counter)
vector_list.append([sum_vector, product_vector])
i = i + 1
return label_list, vector_list
# get redundant words and their length for all sentences
def get_corpus_redundant_words(sen_list):
sentence_list, number_redundant_word_list, redundant_word_list = [], [], []
for p in sen_list:
redundant_words = get_redundant_words(p)
length = len(redundant_words)
number_redundant_word_list.append(length)
redundant_word_list.append(redundant_words)
sentence_list.append(p['labeled_sentence_2'])
return sentence_list, number_redundant_word_list, redundant_word_list
# convert corpus to measures and write to file
def convert_corpus_to_measures(output_file_name, sen_list, best_sentences, local_model, global_model, counter, idf_dict):
#label_list, metric_list, number_redundant_word_list, redundant_word_list, sentence_list = [], [], [], [], []
#i = 0
# note that sentence is a list of words
for index, sen_row in sen_list.iterrows():
convert_sentence_to_measures(output_file_name, sen_row, best_sentences, local_model, global_model, counter, idf_dict)
#metric_list.append(temp_dict)
#i = i + 1
#return label_list, metric_list, number_redundant_word_list, redundant_word_list, sentence_list
#...........................
#...........................
# rank a predicate frequency by property (P26, P39, P54, etc)
def rank_predicate_by_property(count_list, property_name):
# group and calculate average values
temp_list = []
for i in count_list:
if (i['term'].split('-')[0] == property_name):
temp_list.append([i['term'], i['local_average'], i['local_max_dist'], i['global_average'], i['global_max_dist'],
i['subject_dist'], i['object_dist'], i['redundant_words']])
df = pd.DataFrame(temp_list)
df = df.groupby([0]).agg('mean')
df = {x[0]: x[1:] for x in df.itertuples(index=True)}
# calculate term frequency and add it & average values to freq_dict
freq_list = [t[0] for t in temp_list]
#print('freq_list: ', freq_list)
length = len(freq_list) # size of corpus
freq_dict = Counter(freq_list)
#print('freq_dict: ', freq_dict)
for k, v in freq_dict.items():
freq_dict[k] = {'tf':v/length, 'local_average':df[k][0], 'local_max_dist':df[k][1],
'global_average': df[k][2], 'global_max_dist':df[k][3], 'subject_dist': df[k][4],
'object_dist':df[k][5], 'redundant_words':df[k][6]}
#print('freq_dict: ', freq_dict)
return freq_dict
# count the average distance of a word to other words (important words/terms only) in the same sentence
def word_distance_to_sentence(quad_items, word, local_model, global_model):
local_length = len(quad_items) # the numbers of quad items
global_length = 0
local_sum = 0
global_sum = 0
local_max_dist = 0
global_max_dist = 0
subject_dist = object_dist = 0
try:
subject_dist = local_model.similarity(word, '[s]') # subject distance
object_dist = local_model.similarity(word, '[o0]') # object distance
except:
pass
# local model
for term in quad_items: # can be qualifiers or all items in quad (subject, object, qualifiers)
try:
dist = local_model.similarity(word, term)
#print('dist, word, term: ', dist, word, term)
if (dist > local_max_dist):
local_max_dist = dist
local_sum += dist
#print('local_sum: ', local_sum)
except:
local_length = local_length - 1 # word is not in model
pass
# global model
#print('quad_items: +++', quad_items)
for term in quad_items:
value = term[term.index(':')+1:term.index('-')]
temp_list = []
try:
temp_list = definition_properties[value]
except:
pass
temp_length = len(temp_list)
#print('term, value, temp_list, temp_length: ', term, value, temp_list, temp_length)
for t in temp_list:
try:
dist = global_model.similarity(word, t)
#print('dist: ', dist, word, t)
if (dist > global_max_dist):
global_max_dist = dist
global_sum += dist
except:
temp_length = temp_length - 1 # word is not in model
pass
global_length += temp_length
local_average = global_average = 0
if (local_length == 0): local_average = 0
else: local_average = local_sum/local_length
if (global_length == 0): global_average = 0
else: global_average = global_sum/global_length
result_dict = {'local_average':local_average, 'local_max_dist': local_max_dist,
'global_average': global_average, 'global_max_dist': global_max_dist,
'subject_dist': subject_dist, 'object_dist': object_dist}
#print('result_dict: ', result_dict)
return result_dict
# count average distance of a word to other words in a sentence (use important terms)
def word_distance_to_property_definition(prop_items, word, global_model):
length = len(prop_items)
temp_sum = 0
max_dist = 0
for term in prop_items:
try:
dist = global_model.similarity(word, term)
if (dist > max_dist):
max_dist = dist
temp_sum += dist
except:
length = length - 1 # word is not in model
pass
if (length == 0):
return temp_sum, max_dist
return temp_sum/length, max_dist
# rank predicate (Wikidata properties) by term frequency
def rank_predicate(sen_df, best_sentences, counter, local_model, global_model, by_qualifier=False):
result_dict = Counter()
predicate_criteria_list = [] # list of criteria of each predicate
property_name_list = []
redundant_list = []
for index, sen_row in sen_df.iterrows():
predicate = sen_row['predicate'].strip() # Wikidata property
qualifiers = sen_row['qualifiers'].strip().split('-')
#prepositional_verb = sen_row['prepositional_verb'].split(',')[0].strip("'")
root = sen_row['root'].split(',')
root_value = root[0].strip("'") # value of root (verb)
root_pos = root[1] # position of root
quad_items = get_qualifier_items(sen_row)
distance_dict = word_distance_to_sentence(quad_items, root_value, local_model, global_model)
if (by_qualifier == True):
term = predicate + '-' + root_value + '-' + '-'.join(qualifiers)
else:
term = predicate + '-' + root_value
property_name_list.append(predicate)
distance_dict['term'] = term
redundant_words = get_redundant_words(sen_row)
distance_dict['redundant_words'] = len(redundant_words)
predicate_criteria_list.append(distance_dict)
property_names = list(set(property_name_list))
# join dictionaries by property
for pn in property_names:
result_dict = {**result_dict, **rank_predicate_by_property(predicate_criteria_list, pn)} # join two dictionaries
normalized_values = []
normalized_labels = []
for k, v in result_dict.items():
temp = k.split('-')
property_name = temp[0]
predicate = temp[1]
tf = get_tf(counter, predicate)
'''average_def_dist, max_def_dist = word_distance_to_property_definition(definition_properties[property_name], predicate,
global_model)'''
#print('---', average_def_dist, v['local_average'], v['global_average'], v['tf'])
temp_list = [v['local_average'], v['global_average'], v['tf']]
temp_score = (np.prod(temp_list)*len(temp_list))/sum(temp_list)
try: temp_score = 1/-math.log(temp_score)
except: temp_score = 0
result_dict[k] = (temp_score, temp_score, v['tf'])
normalized_values.append((temp_score, temp_score, v['tf']))
normalized_labels.append(k)
#{'local_average':local_average, 'local_max_dist': local_max_dist, 'global_average': global_average,
# 'global_max_dist': global_max_dist, 'subject_dist': subject_dist, 'object_dist': object_dist}
# normalize values
normalized_values = MinMaxScaler().fit(normalized_values).transform(normalized_values)
for k, v in zip(normalized_labels, normalized_values):
result_dict[k] = v.tolist()
#print('result_dict: ', result_dict)
result_dict = dict(sorted(result_dict.items(), key = lambda v: v[1], reverse = True))
return result_dict
def group_predicate(predicate_dict, top=10, show=False):
group_dict = {}
for k, v in predicate_dict.items():
temp_list = k.split('-')
key = temp_list[0] + '-' + '-'.join(temp_list[2:])
key = key.strip('-')
predicate = temp_list[1]
temp_list = [*v]
temp_list.insert(0, predicate)
if (key not in group_dict):
group_dict[key] = [temp_list]
else:
group_dict[key].append(temp_list)
#group_dict = sorted(group_dict.items(), key = lambda v: (v[0]), reverse = True))
if (show==False): return group_dict
i = 1
for k, v in group_dict.items():
print('+', k)
for x in v:
if (i > top): break
print('---', x)
i = i + 1
i = 1
return group_dict
#...........................
#...........................
# get idf of a word
def create_idf_word(sentences, word):
n = len(sentences)
freq = 0 # raw frequency
for s in sentences:
freq += sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(word), s))
return freq
# create idf dictionary
def create_idf_dict(sentences):
n = len(sentences)
result_dict = {}
result_dict['%%SIZE%%'] = n # number of documents (sentences)
for s in sentences:
doc = nlp(s)
for token in doc:
if (str(token.text) not in result_dict):
result_dict[str(token.text)] = create_idf_word(sentences, str(token.text))
return result_dict
# get inverse document frequency, a sentence as a document
def get_idf(idf_dict, word):
n = idf_dict['%%SIZE%%']
freq = 0
if (word in idf_dict):
freq = idf_dict[word]
# return -math.log((freq + 1)/n)
return -math.log((freq+1)/n) + 1
# get frequency of a term in corpus, corpus as a document
def get_tf(counter, word):
temp = (word,) # create key
freq = 0
freq = counter[temp] # raw frequency
#n = len(counter)
return math.log(freq+1) + 1
# count and rank the number of sentence by its redudant words
def rank_sentence_by_redundant_words(redundants):
count_dict = {}
for r in redundants:
if (r not in count_dict):
count_dict[r] = 1
else:
count_dict[r] += 1
count_dict = sorted(count_dict.items(), key=lambda x: x[0])
return count_dict
# show sentences by distance
def show_sentence_distance(labels, scores, redundants, sentences, redundant_word_list):
i = 0
for value in scores:
print('#' + str(i) + ': ', value, labels[i], redundants[i], redundant_word_list[i], '---', sentences[i])
i = i + 1
# show plot of sentences
def show_sentence_plot(labels, scores, redundants, sentences, redundant_word_list):
#labels = []
#scores = []
#redundants = []
#sentences = []
#redundant_word_list = []
#labels, scores, redundants, sentences, redundant_word_list = convert_corpus_to_distance(sen_list, best_sentences, model, counter)
#labels, tokens = convert_corpus_to_vector1(word_corpus, best_sentences, model, counter)
#tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
#new_values = tsne_model.fit_transform(tokens)
plt.figure(figsize=(20, 20))
for i in range(len(scores)):
# s: size, color: color
if (labels[i] == 'x'):
plt.scatter(scores[i], redundants[i], s=20, color='blue') # marker = 's'
plt.annotate('', xy=(scores[i], redundants[i]), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
else:
plt.scatter(scores[i], redundants[i], s=2)
plt.annotate('', xy=(scores[i], redundants[i]), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.xlabel("Number of redundant words")
plt.ylabel("Score")
plt.title('Sentences in corpus')
plt.savefig('show_sentence_plot.pdf')
plt.show()
# show plot of predicates (Wikidata properties)
def show_predicate_plot(predicate_list, axis_labels):
plt.figure(figsize=(12, 4))
for index, predicate_dict in enumerate(predicate_list):
labels = []
values = []
for k, v in predicate_dict.items():
labels.append(k)
values.append(v)
#tsne_model = TSNE(perplexity=100, n_components=1, init='pca', n_iter=5000)
#values = tsne_model.fit_transform(values)
#values = decomposition(values, 'pca', dimension = 2)
#values = MinMaxScaler().fit(values).transform(values)
x = []
y = []
sizes = []
i = 0
for v in values:
a = v[0]
b = v[1]
#print('+++', labels[i], a, b, int(v[2]*300)+1)
x.append(a)
y.append(b)
sizes.append(int(v[2]*300)+1)
i = i + 1
plt.rcParams.update({'font.size':10})
for i in range(len(x)):
# s: size, color: color
plt.scatter(1, 1, s=1, alpha=0.0)
plt.scatter(index + 2, y[i], s=sizes[i], alpha=0.6) # marker = 's'
if (i < 5):
temp_label = labels[i][labels[i].index('-')+1:]
#print('temp_label: ', temp_label)
plt.annotate(temp_label, xy=(index + 2, y[i]), xytext=(2, 2), textcoords='offset points', ha='right', va='bottom',
alpha=0.9, fontsize=8)
#fontsize=int(sizes[i]/10)+1
plt.grid(color = 'grey', linestyle = 'dotted', linewidth = 1)
#plt.gca().axes.get_xaxis().set_visible(False)
# axis labels
plt.xticks(range(2, len(axis_labels)+2), axis_labels)
plt.show()
# get all qualifiers by Wikidata properties
def get_all_qualifiers(sen_df):
result_list = []
result_dict = {}
for index, sen_row in sen_df.iterrow():
temp_list = get_qualifier_items(sen_row)
for t in temp_list:
value = t[t.index(':') + 1:t.index('-')]
result_list.append(value)
result_list = list(set(result_list))
result_list = sorted(result_list, key = lambda x: int(x[1:]))
for r in result_list:
root = get_wikidata_root(r)
label = get_label(root)
description = get_description(root)
aliases = ' '.join(e for e in get_alias(root))
def_string = label + ' ' + description + ' ' + aliases
def_list = []
doc = nlp(def_string)
for token in doc:
if (token.pos_ == "X"):
continue
if (token.pos_ == "PUNCT"):
continue
if (token.pos_ == "CCONJ"):
continue
if (token.pos_ == "ADP"):
continue
if (token.pos_ == "PRON"):
continue
if (token.pos_ == "PART"):
continue
if (token.pos_ == "DET"):
continue
if (token.dep_ == "punct"):
continue
def_list.append(token.text)
def_list = list(set(def_list))
#print('def_list:', r, def_list)
result_dict[r] = def_list
print('result_dict qualifiers: ', result_dict)
return result_dict
# sentence plot by redundant words
def sentence_plot_by_redundant_words(total_cumulative_list, labels, plot_title, x_axis_label, y_axis_label):
#cmap = plt.get_cmap('plasma')
#colors = cmap(np.linspace(0, 1, len(labels)))
colors = ['green', 'blue', 'red', 'coral', 'orchid', 'gray', 'gold']
colorcyler = cycle(colors)
lines = ['+', '*', '>', 'x', 'o', ':', '--']
linecycler = cycle(lines)
plt.rcParams.update({'font.size':10})
plt.ylabel(y_axis_label)
plt.xlabel(x_axis_label)
#plt.title(plot_title)
#plt.figure(figsize=(1,30))
#plt.figure(figsize=(1, 1), dpi=1000)
scale_factor = 30
xmin, xmax = plt.xlim()
plt.xlim(xmin * scale_factor, xmax * scale_factor)
for cumulative_list, name, color in zip(total_cumulative_list, labels, colors):
x, y = [], []
i = 0
for r in cumulative_list:
x.append(r[0])
y.append(r[1])
i = i + 1
plt.plot(x, y, next(linecycler), label=name, c=next(colorcyler))
plt.legend()
'''for i in range(len(y)):
plt.scatter(x[i], y[i], s=2, color=color)'''
#ymin, ymax = plt.ylim()
#plt.ylim(ymin * scale_factor, ymax * scale_factor)
plt.grid(color = 'grey', linestyle = 'dotted', linewidth = 0.5)
plt.savefig('sentence_plot_by_redundant_words.pdf')
plt.savefig('sentence_plot_by_redundant_words.svg')
plt.show()
plt.style.use('default') # reset style to default
# accumulative rate [0-1]
def cumulative_rate(rank_list):
result_list = []
total = sum([r[1] for r in rank_list])
temp = 0
for r in rank_list:
temp += r[1]/total
#print(temp)
result_list.append([r[0], temp])
#print(result_list)
return result_list
# minimums by redundant words
def minimums_by_redundant_words(scores, redundants):
result_dict = {}
for s, r in zip(scores, redundants):
if (r not in result_dict):
result_dict[r] = s
else: # get min
if s < result_dict[r]: result_dict[r] = s
result_dict = sorted(result_dict.items(), key=lambda x: x[0])
#print(result_dict)
return result_dict
# linear regression
def linear_regression(x, y):
print('x: ', x)
print('y: ', y)
x = np.array(x).reshape((-1, 1))
y = np.array(y)
model = ''
try:
model = LinearRegression().fit(x, y)
except:
model = LinearRegression().fit(x, y)
r_sq = model.score(x, y)
print('coefficient of determination:', r_sq)
print('intercept:', model.intercept_)
print('slope:', model.coef_)
y_pred = model.predict(x)
print('predicted response:', y_pred, sep='\n')
mae = metrics.mean_absolute_error(y, y_pred)
print('Mean Absolute Error:', mae)
mse = metrics.mean_squared_error(y, y_pred)
print('Mean Squared Error:', mse)
rmse = np.sqrt(metrics.mean_squared_error(y, y_pred))
print('Root Mean Squared Error:', rmse)
result_dict = {}
result_dict['y_pred'] = y_pred
result_dict['intercept'] = model.intercept_
result_dict['coef'] = model.coef_
result_dict['r_sq'] = r_sq
result_dict['mae'] = mae
result_dict['mse'] = mse
result_dict['rmse'] = rmse
return result_dict
# linear regression plot
def linear_regression_plot(x, y, dict1, dict2, plot_title, x_axis_label, y_axis_label):
plt.figure(figsize=(20, 20))
for i, j in zip(x, y):
plt.scatter(i, j, s=10, alpha=0.5)
plt.annotate('', xy=(i, j), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
axes1 = plt.gca()
x_vals1 = np.array(axes1.get_xlim())
y_vals1 = dict1['intercept'] + dict1['coef']*x_vals1
print('x_vals1, y_vals1: ', x_vals1, y_vals1)
plt.plot(x_vals1, y_vals1, '--')
axes2 = plt.gca()
x_vals2 = np.array(axes2.get_xlim())
y_vals2 = dict2['intercept'] + dict2['coef']*x_vals2
print('x_vals2, y_vals2: ', x_vals2, y_vals2)
plt.plot(x_vals2, y_vals2)
plt.grid(color = 'grey', linestyle = 'dotted', linewidth = 0.5)
plt.savefig('linear_regression_plot.pdf')
plt.show()
#plt.style.use('default') # reset style to default
# filter noise by cumulative rate
def filter_noise_by_cumulative_rate(sentences, redundant_word_list, number_redundant_word_list, cumulative_list,
rate = 0, top_words = 0):
sentences_, redundant_word_list_, number_redundant_word_list_ = [], [], []
if (rate == 0 and top_words == 0):
return sentences, redundant_word_list, number_redundant_word_list
bound = 0 # number of words used to filter
# filter by rate only
if (rate != 0 and top_words == 0):
for c in cumulative_list:
bound = c[0]
if (c[1] > rate):
break
elif(rate == 0 and top_words != 0):
bound = top_words
if (bound == 0):
return sentences, redundant_word_list, number_redundant_word_list
for a, b, c in zip(sentences, redundant_word_list, number_redundant_word_list):
if (c <= bound):
sentences_.append(a)
redundant_word_list_.append(b)
number_redundant_word_list_.append(c)
return sentences_, redundant_word_list_, number_redundant_word_list_
# filter noise by metrics
def filter_noise_by_metrics(df, field, frac=1, ascending=True):
# convert to numeric
df['local_tf_idf2'] = | pd.to_numeric(df['local_tf_idf2'], errors='coerce') | pandas.to_numeric |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
import datetime as dt
import os
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
debug = False
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css','./assets/custom.css' ]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
label_dict = {"p":"real power",
"q":"imaginary power",
"i":"current",
"v":"voltage"}
# dash components
dropdown_options = [{"label":v, "value":k} for k,v in label_dict.items()]
dropdown = dcc.Dropdown(id="type_selector",
options=dropdown_options,
multi=True,
value=list(label_dict.keys()),
style={}
)
outlier_radio_label = html.P('filter outliers:',style={'margin-bottom': 1,
'margin-top': 10})
outlier_radio=dcc.RadioItems(
id="outlier_radio",
options=[
{'label': 'include', 'value': 'include'},
{'label': 'remove', 'value': 'remove'},
],
value='include',
labelStyle={'display': 'inline-block'})
selectors = html.Div([dropdown,
outlier_radio_label,
outlier_radio],
className="three columns")
graph = dcc.Graph(id='value_plots', figure={})
selectors_graph = html.Div(className="row plot-layout",
children=[selectors,
html.Div(className="eight columns",
children=[
graph
]),
])
# dash layout
app.layout = html.Div(
children=[html.A(href="https://clemap.com", children=[
html.Div(className="row",
style={
"background-color": "#005499"
},
children=[
html.Div(className="one columns",
children=[
html.Img(src="https://images.squarespace-cdn.com/content/5c2bc35bcef3729843c7e70c/1570518147439-332SOX2GQ5N1EQYPVV8I/clemap_logo_stnd_clr%2Bwhite.png?format=1500w&content-type=image%2Fpng",
style={'max-height':'50px'}
),
]),
])
]),
html.H3("Three Phase Sensor",
style={'text-align': 'center'}
),
selectors_graph,
])
def prep_datetime(df):
"""
convert day and time columns to a single date time column
drop the original day and time columns
"""
df['time'] = df['time'].apply(pd.Timedelta)
df['datetime'] = | pd.to_datetime(df['day']) | pandas.to_datetime |
import os
import json
import folium
import numpy as np
import pandas as pd
from folium.plugins import TimeSliderChoropleth
from pprint import pprint
from branca.colormap import linear
def graph(data_frame,n_periods,freq,name,start_day):
"""
input : data_frame :: pandas data_frame of data, n_periods :: int, freq :: string, name :: string, start_day :: string
data_frame must be in followign format aggreagted values for borough(0:4) for every row,
each row is seperated by freq.
n_periods : number of rows
freq : Time gap between each period in format {'%nH','%nM','%nD'} where %n can be integer value specifying scalar multiple of hour/day/month
name : format of %name.html that will save result map
start_time : format of "YYYY-MM-DD" to mark start date of period
saves the map
out : None
"""
m = folium.Map(location=[40.718, -73.98], zoom_start=10)
folium.TileLayer('cartodbpositron').add_to(m)
datetime_index = pd.date_range(start_day, periods= n_periods , freq=freq)
dt_index = datetime_index.astype(int).astype('U10')
styledata = {}
"""
borough title :
0 is Staten Island
1 is Queens
2 is Brooklyn
3 is manhatan
4 is Bronx
"""
for borough in range(0,4):
df = pd.DataFrame({'color': data_frame.iloc[:,borough].values,'opacity': data_frame.iloc[:,borough].values},index=dt_index)
df.sort_index()
styledata[borough] = df
max_color, min_color, max_opacity, min_opacity = 0, 0, 0, 0
for borough, data in styledata.items():
max_color = max(max_color, data['color'].max())
min_color = min(max_color, data['color'].min())
max_opacity = max(max_color, data['opacity'].max())
max_opacity = min(max_color, data['opacity'].max())
cmap = linear.PuRd_09.scale(min_color, max_color)
#convert color value to hexcolor
#and normalize the opacity
for country, data in styledata.items():
data['color'] = data['color'].apply(cmap)
data['opacity'] = data['opacity']
styledict = {
str(borough): data.to_dict(orient='index') for
borough, data in styledata.items()
}
#load the geojson file that we have
nyc_zone = os.path.join('./','newyorkborough.json')
nyc_json = json.load(open(nyc_zone))
#create timesliderchoropleth with geojson file & style as arguments and add it to the map
g = TimeSliderChoropleth( nyc_json, styledict=styledict).add_to(m)
#save the map
title_statement = "<h1>"+ name+"</h1>"
m.get_root().html.add_child(folium.Element(title_statement))
colormap = linear.OrRd_08.scale(min_color, max_color)
colormap.caption = 'Ride count in New York boroughs'
colormap.add_to(m)
m.save(name+".html")
#function for normalizing the opacity
def norm(x):
return (x - x.min()) / (x.max() - x.min())
def main():
data = np.array([[0.01,0.1,0.09,0.75,0.05] ,[0.1,0.09,0.75,0.05,0.01]])
#dataframe that will be used for plotting
data_df = | pd.DataFrame(data) | pandas.DataFrame |
from datetime import datetime
from datetime import timedelta
import json
import pandas as pd
import logging
logging.info('Started')
# Do I have the right or not, if I have right for how long
# Given a date, for how long I stay in Schengen countries?
def transform_data(ref_date, entries, exits):
logging.info(
'Control if Reference Date is Null and default to today if so')
today = datetime.now()
if ref_date == '': # or is None
reference_date = today
else:
reference_date = datetime.strptime(ref_date, '%Y-%m-%d')
logging.info('Control if Reference Date is past date, not interesting')
if reference_date < today:
exit() # need a function generating json response
logging.info('Create reference date/entries/exists dataframe')
df_entries = pd.DataFrame.from_dict(entries)
df_entries['Coef'] = 1
df_exits = pd.DataFrame.from_dict(exits)
df_exits['Coef'] = 0
df_raw_input = df_entries.append(df_exits, ignore_index=True, sort=True)
df_raw_input.columns = ['Date', 'Coef']
df_raw_input['Date'] = pd.to_datetime(df_raw_input['Date'])
data_ref = {'Date': [reference_date], 'Coef': [0]}
df_reference_date = pd.DataFrame(data=data_ref)
df_reference_date['Date'] = | pd.to_datetime(df_reference_date['Date']) | pandas.to_datetime |
"""Classes and utility functions for processing game data.
EbbFlowGameData: Container for data from a single game.
EbbFlowDataset: Subclass of PyTorch Dataset,
container for data from multiple games.
EbbFlowStats: Subclass of EbbFlowDataset,
provides extra functionality for analysis.
"""
import os
import random
import copy
import pickle
from itertools import product
from collections import defaultdict
import pdb
import torch
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
import dill
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import gaussian_kde, bernoulli
from . import transforms as T
from .utils import z_pca
class EbbFlowDataset(Dataset):
"""Container for an Ebb and Flow dataset; also provides
functionality for processing the data and interfacing with PyTorch.
Data are stored in two separate formats with a one-to-one correspondence:
self.xu contains the model inputs: this is the "continuous" format.
self.discrete contains the same data in an alternative format
that facilitates analysis.
Args
----
experiment_dir (str): Root directory containing info for current
model training run.
params (dict): Processing parameters; see e.g. train_transform_kwargs
in default config (config/model_config.yaml).
preprocessed (dict): Preprocessed game data.
split (str): Can be either 'train', 'val', or 'test';
split is incorporated into the file name of the processed data
upon saving.
processed_dir (str): Directory in which to save the processed data.
pre_transform (list of callables, optional): Transformations which
are applied before the processed data is saved.
transform (list of callables, optional): Transformations which are applied
'online' on each iteration of the training loop.
pre_transform_params (list of dicts, optional): List of parameters to
pass to the pre_transforms. This should be a list of dictionaries
with the same length as pre_transform.
transform_params (list of dicts, optional): List of parameters to
pass to the transforms. This should be a list of dictionaries
with the same length as transform.
"""
needs_data_augmentation = ('kde', 'kde_no_switch_cost', 'adaptive_gaussian')
def __init__(self, experiment_dir, params, preprocessed, split,
processed_dir, pre_transform=None, transform=None,
pre_transform_params=None, transform_params=None):
self.experiment_dir = experiment_dir
self.processed_dir = processed_dir
self.params = params
# rename a couple keys
preprocessed['urt_ms'] = preprocessed.pop('resp_time')
preprocessed['urespdir'] = preprocessed.pop('resp_dir')
self.preprocessed = preprocessed
self.split = split
self.resampling_type = self.params.get('data_augmentation_type', None)
self._build_transforms(transform, transform_params,
pre_transform, pre_transform_params)
self.process() # also saves processed data
self.xu = torch.load(self.processed_paths[0]) # xu = model inputs
with open(self.processed_paths[1], 'rb') as path:
other_data = pickle.load(path)
self.discrete = other_data['discrete']
self.game_ids = other_data['game_ids']
self.resampling_info = other_data['resampling']
def _build_transforms(self, transform, transform_params,
pre_transform, pre_transform_params):
# build pre transform - usually just outlier filtering
if pre_transform is not None:
pre = [t(p) for t, p in zip(pre_transform, pre_transform_params)]
self.pre_transform = T.Compose(pre)
# build transform
default_transform = [T.SmoothResponses(self.params)]
if transform is not None:
supplied_t = [t(p) for t, p in zip(transform, transform_params)]
tr = default_transform + supplied_t
else:
tr = default_transform
self.transform_list = tr
def update_smoothing(self, epoch):
# Update the kernel used to smoothing the response template
self.transform_list[0]._update_sm_kernel(epoch)
self.transform = T.Compose(self.transform_list)
def get_processed_sample(self, idx):
"""Return an EbbFlowGameData instance with data from a single game.
Args
----
idx (int): Index of the game to return.
Returns
-------
An EbbFlowGameData instance containing the data for this game.
"""
discrete = {key: vals[idx] for key, vals in self.discrete.items()}
self.update_smoothing(9999999)
cnp = self[idx].numpy()
continuous = {'urespdir': cnp[:, :4], 'point_dir': cnp[:, 4:8],
'mv_dir': cnp[:, 8:12], 'task_cue': cnp[:, 12:]}
game_id = self.game_ids[idx]
return EbbFlowGameData.processed_format(discrete, continuous,
self.params, game_id)
def __getitem__(self, idx):
# Return a single sample (game) to train the model (continuous format).
# Called by PyTorch during model training.
xu_idx = self.xu[:, idx, :]
xu_idx = xu_idx if self.transform is None else self.transform(xu_idx)
return xu_idx
def __len__(self):
# Return the number of samples (games) in the dataset.
return self.xu.shape[1]
@property
def processed_paths(self):
"""Return the full paths to the processed data."""
return [os.path.join(self.processed_dir, f)
for f in self.processed_file_names]
@property
def processed_file_names(self):
"""Return the names of the processed data files."""
return [f'{self.split}_model_inputs.pt',
f'{self.split}_other_data.pkl']
def process(self):
"""Prepare an Ebb and Flow dataset for model training.
Apply pretransforms and filtering criteria;
determine the continuous and discrete formats of the dataset.
"""
if _files_exist(self.processed_paths):
return
os.makedirs(self.processed_dir, exist_ok=True)
# Do an initial processing run to get info for resampling
# and/or response smoothing.
if ((self.resampling_type in self.needs_data_augmentation)
or (self.params['smoothing_type'] in
self.needs_data_augmentation)):
throwaway_data = self._get_preprocessed_games(for_resampling=True)
[td.standard_prep() for td in throwaway_data]
# Remove outliers if specified in pre-transform
# before resampler estimation
out_method = self.params['outlier_params'].get('method', None)
if out_method is not None:
out_filter = T.FilterOutliers(self.params)
rs_pre_transform = T.Compose([T._Trim(), out_filter])
else:
rs_pre_transform = T.Compose([T._Trim()])
throwaway_data = [rs_pre_transform(td) for td in throwaway_data]
throwaway_data = [td for td in throwaway_data if td.is_valid]
resampling_info, sm_params = self._get_resampling_sm_info(
throwaway_data)
self.resampling_info = resampling_info
else:
sm_params = copy.deepcopy(self.params)
# Process each game
data_list = self._get_preprocessed_games()
[d.standard_prep() for d in data_list]
data_list = [self.pre_transform(d) for d in data_list]
self.excluded_list = [d for d in data_list if not d.is_valid]
data_list = [d for d in data_list if d.is_valid]
self._collate(data_list)
self._save_processed_data()
def _get_preprocessed_games(self, for_resampling=False):
data_list = []
resampling_info = getattr(self, 'resampling_info', None)
start_times = self.params['start_times']
if for_resampling:
upscale_mult = 1
else:
upscale_mult = self.params['upscale_mult']
for start_time, game_ind, _ in product(
start_times, range(len(self.preprocessed['urt_ms'])),
range(upscale_mult)):
preprocessed_game = {key: self.preprocessed[key][game_ind]
for key in self.preprocessed.keys()}
data_list.append(
EbbFlowGameData.preprocessed_format(
preprocessed_game, self.params, start_time,
resampling_info=resampling_info))
return data_list
def _remove_switch_cost(self, rts_typed):
# Translate RTs to eliminate switch cost
con_rt_diff = np.mean(rts_typed[2]) - np.mean(rts_typed[0])
incon_rt_diff = np.mean(rts_typed[3]) - np.mean(rts_typed[1])
orig_mean_typed_rt = np.mean([np.mean(rts_typed[i]) for i in range(4)])
rts_typed[2] = np.array(rts_typed[2]) - con_rt_diff
rts_typed[3] = np.array(rts_typed[3]) - incon_rt_diff
# Translate RTs again so that mean RT is the same
new_mean_typed_rt = np.mean([np.mean(rts_typed[i]) for i in range(4)])
mean_rt_diff = orig_mean_typed_rt - new_mean_typed_rt
for ttype in range(4):
rts_typed[ttype] += mean_rt_diff
return rts_typed
def _get_resampling_sm_info(self, data):
# Trial types:
# 0 = congruent + stay
# 1 = incongruent + stay
# 2 = congruent + switch
# 3 = incongruent + switch
resampling_dists = {}
acc = {}
rts_typed = {}
sm_params = {'step_size': self.params['step_size'],
'smoothing_type': self.params.get('smoothing_type',
'gaussian'),
'kernel_sd': self.params.get('kernel_sd', 50),
'params': {}}
for ttype in range(4):
this_rts = []
this_correct = []
for d in data:
d.get_extra_stats()
this_rts.extend(d._get_field_by_trial_type(ttype, 'urt_ms'))
this_correct.extend(d._get_field_by_trial_type(
ttype, 'ucorrect'))
rts_typed[ttype] = this_rts
# Resampling info
bw = self.params.get('data_aug_kernel_bandwidth', 0.25)
if self.resampling_type == 'kde_no_switch_cost':
rts_typed = self._remove_switch_cost(rts_typed)
for ttype in range(4):
this_rts = rts_typed[ttype]
if self.resampling_type in ['kde', 'kde_no_switch_cost']:
this_resampling = gaussian_kde(this_rts, bw_method=bw)
else:
this_resampling = None
# Smoothing info
if self.params['smoothing_type'] == 'adaptive_gaussian':
this_sm = np.std(this_rts)
elif self.params['smoothing_type'] == 'kde':
bw = self.params.get('data_aug_kernel_bandwidth', 0.25)
this_sm = gaussian_kde(this_rts, bw_method=bw)
else:
this_sm = None
resampling_dists[ttype] = this_resampling
sm_params['params'][ttype] = this_sm
acc[ttype] = np.mean(this_correct)
if resampling_dists[0] is not None:
resampling_info = {'rts': resampling_dists, 'acc': acc}
else:
resampling_info = None
return resampling_info, sm_params
def _save_processed_data(self):
other_data = {'discrete': self.discrete,
'excluded': self.excluded_list,
'resampling': getattr(self, 'resampling_info', None),
'game_ids': self.game_ids}
torch.save(self.xu, self.processed_paths[0])
with open(self.processed_paths[1], 'xb') as path:
dill.dump(other_data, path, protocol=4)
def _collate(self, data_list):
# Continuous format (model inputs)
con_keys = ['urespdir', 'point_dir', 'mv_dir', 'task_cue']
xu_split = [torch.cat([torch.tensor(d.continuous[key]).unsqueeze(1)
for d in data_list], 1)
for key in con_keys]
self.xu = torch.cat([d for d in xu_split], 2).to(dtype=torch.float32)
# Discrete format
disc_keys = data_list[0].discrete_fields
self.discrete = {key: [d.discrete[key] for d in data_list]
for key in disc_keys}
self.game_ids = [d.game_id for d in data_list]
class EbbFlowStats(EbbFlowDataset):
"""Extends EbbFlowDataset with extra functionality for
analyzing user and model behavior.
Args
----
output_rates (PyTorch tensor): The models outputs (responses for each of
the four directions). Has dimensions n_timesteps x n_samples x 4.
dataset (EbbFlowDataset instance): The dataset to be analyzed.
latents (PyTorch tensor, optional): The model latent state variables.
Has dimensions n_timesteps x n_samples x latent_dim.
n_pcs (int, optional): The number of principal components to keep
in the PCA transformed latent state variables (self.pca_latents).
**kwargs (optional): Extra options to be supplied for calls to
EbbFlowGameData.get_extra_stats().
"""
def __init__(self, output_rates, dataset, latents=None,
n_pcs=3, **kwargs):
self.rates = output_rates.cpu().detach().numpy()
self.xu = dataset.xu
self.discrete = dataset.discrete
self.transform = dataset.transform
self.params = dataset.params
self.step = self.params['step_size']
self.game_ids = dataset.game_ids
td_kwargs = {'t_pre': 100, 't_post': 1600}
td_kwargs.update(kwargs)
self.trial_data_kwargs = td_kwargs
self.n_pre = np.round(td_kwargs['t_pre'] / self.step).astype('int')
self.n_post = np.round(td_kwargs['t_post'] / self.step).astype('int')
self.t_axis = self.step * np.arange(-self.n_pre, self.n_post, 1)
if latents is not None:
self.latents = latents.cpu().detach().numpy()
pca_latents, explained_var, pca_obj = z_pca(self.latents, n_pcs)
self.pca_latents = pca_latents
self.pca_explained_var = explained_var
self.pca_obj = pca_obj
else:
self.latents = None
self.pca_latents = None
self.pca_explained_var = None
self.pca_obj = None
if kwargs['alphas'] is not None:
self.alphas = kwargs['alphas'].cpu().detach().numpy()
self.As = kwargs['As'].cpu().detach().numpy()
self.Bs = kwargs['Bs'].cpu().detach().numpy()
self.Cs = kwargs['Cs'].cpu().detach().numpy()
self.windowed = None
self._get_trial_data()
def _get_trial_data(self):
# Transform the discrete dataset to a pandas data frame;
# get extra stats in the process. Also window and concatenate
# the output rates and optionally the model latents from each trial.
dfs = []
for d in range(self.rates.shape[1]):
this_game = self.get_processed_sample(d)
this_rates = np.squeeze(self.rates[:, d, :])
if self.latents is not None:
this_latents = np.squeeze(self.latents[:, d, :])
this_pca_latents = np.squeeze(self.pca_latents[:, d, :])
win_vars = this_game.get_extra_stats(
output_rates=this_rates, latents=this_latents,
pca_latents=this_pca_latents, **self.trial_data_kwargs)
else:
win_vars = this_game.get_extra_stats(
output_rates=this_rates, **self.trial_data_kwargs)
self._concat_windowed(win_vars)
dfs.append(this_game._to_pandas())
self.df = | pd.concat(dfs, ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 17 10:26:08 2020
@author: weiweijin
"""
# %% Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
import sys
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
from itertools import compress
from sklearn import metrics
from bland_altman_plot import bland_altman_plot
# %% Import data
Twin_data = pd.read_pickle("Twin_data_no_outliner.pkl")
# %% Preparing data for main analysis
# feature name
target_name = ['PWV']
feature_name = list(Twin_data.columns)
feature_name.remove('PWV')
# %% Lasso
# Separating out the features
x = Twin_data.loc[:, feature_name].values
# Separating out the target
y = Twin_data.loc[:,target_name].values
y = y.reshape(-1)
# Standardizing the features
XL = StandardScaler().fit_transform(x)
#perform Lasso regreession
lasso_tuned = Lasso(alpha=0.01, max_iter=10e5)
lasso_tuned.fit(XL,y)
# %% prepare data for SVM
# evaluate the seperated features
LassoKInd = abs(lasso_tuned.coef_) > 0.001
feature_name_SVR = list(compress(feature_name,LassoKInd))
SVR_data = Twin_data[feature_name_SVR]
# Separating out the features
SVR_features = SVR_data.loc[:, feature_name_SVR].values
# Separateing out the targets
target = Twin_data.loc[:, target_name].values
target = target.reshape(-1)
# Standardizing the features
S_feature = StandardScaler().fit_transform(SVR_features)
# %% Predict using the tuned parameters
#split traning and testing datasets
Feature_train,Feature_test,Target_train,Target_test = train_test_split(S_feature,target, test_size=0.3, random_state=31)
# SVM model
SVM_tuned = SVR(kernel='rbf', gamma = 'auto', C=10.113535298901722)
# Fit the tuned model
SVM_tuned.fit(Feature_train, Target_train)
# Predict the target
Target_pred = SVM_tuned.predict(Feature_test)
# Plot fig
# blandAltman plot
fig1 = plt.figure(figsize = (8,8))
ax1 = fig1.add_subplot(1,1,1)
ax1 = bland_altman_plot(Target_test, Target_pred)
plt.ylim(-5,6)
ax1.tick_params(axis='x',labelsize = 20)
ax1.tick_params(axis='y',labelsize = 20)
plt.xticks(np.arange(6, 19, 6))
plt.yticks(np.arange(-4, 5, 4))
filename1 = 'SVR_Bland_Altman.png'
fig1.savefig(filename1)
# Estimated vs measured
m, b = np.polyfit(Target_pred, Target_test,1)
X = sm.add_constant(Target_pred)
est = sm.OLS(Target_test, X)
est2 = est.fit()
p_value = est2.pvalues[1]
r_squared = est2.rsquared
fig2 = plt.figure(figsize = (8,8))
ax2 = fig2.add_subplot(1,1,1)
plt.plot(Target_pred, Target_test, 'k.', markersize = 10)
ax2.plot(Target_pred, m*Target_pred +b, 'r', label = 'y = {:.2f}x+{:.2f}'.format(m, b))
plt.xlim(3,18)
plt.ylim(3,25)
plt.rc('xtick',labelsize=20)
plt.rc('ytick',labelsize=20)
plt.xticks(np.arange(3, 16, 6))
plt.yticks(np.arange(3, 22, 6))
plt.legend(fontsize=18,loc=2)
ax2.text(4, 21, 'r$^2$ = {:.2f}'.format(r_squared), fontsize=18)
ax2.text(4, 19, 'p < 0.0001', fontsize=18)
filename2 = 'SVR_est_vs_med.png'
fig2.savefig(filename2)
# save target_test and target_pred
savedata = [Target_test, Target_pred]
df_savedata = | pd.DataFrame(savedata) | pandas.DataFrame |
from pickle import load
from typing import Dict, List
import altair as alt
import streamlit as st
from matplotlib.pyplot import subplots
from pandas import DataFrame, read_csv
from seaborn import heatmap
from sklearn.metrics import confusion_matrix
from streamlit.delta_generator import DeltaGenerator
from .classes import Page
from .config import STREAMLIT_STATIC_PATH
class ModelsEvaluation(Page):
labels = ["bulerias", "alegrias", "sevillanas"]
def write(self):
title = "Models evaluation"
st.title(title)
st.header("Neural networks learning")
model_full_history: Dict = load(
open(STREAMLIT_STATIC_PATH / "data/history_conv_model_70_epochs.p", "rb")
)
cols: List[DeltaGenerator] = st.columns(2)
cols[0].subheader("Model with full data")
full_loss = (
alt.Chart(DataFrame(model_full_history).reset_index())
.transform_fold(["loss", "val_loss"])
.mark_line()
.encode(x="index:Q", y="value:Q", color="key:N")
).properties(width=600)
cols[0].altair_chart(full_loss)
model_partial_history: Dict = load(
open(
STREAMLIT_STATIC_PATH / "data/history_conv_model_only_mel_100_epochs.p",
"rb",
)
)
cols[1].subheader("Model with spectrogram data only")
full_loss = (
alt.Chart(DataFrame(model_partial_history).reset_index())
.transform_fold(["loss", "val_loss"])
.mark_line()
.encode(x="index:Q", y="value:Q", color="key:N")
).properties(width=600)
cols[1].altair_chart(full_loss)
st.header("Prediction execution time")
execution_times = read_csv(STREAMLIT_STATIC_PATH / "data/time_results.csv")
execution_times: DataFrame
boxplot = (
(
alt.Chart(execution_times)
.mark_boxplot(size=50, extent=0.5)
.encode(
x=alt.X("model:N", scale=alt.Scale(type="log")),
y=alt.Y("time:Q", scale=alt.Scale(zero=False)),
color=alt.Color("model", legend=None),
)
)
.properties(width=900, height=600)
.configure_axis(labelFontSize=16, titleFontSize=16)
)
st.altair_chart(boxplot)
st.header("Prediction metrics")
prediction_metrics = | read_csv(STREAMLIT_STATIC_PATH / "data/metrics.csv") | pandas.read_csv |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from pandas import DataFrame, Timestamp, to_datetime
from superset.exceptions import InvalidPostProcessingError
from superset.utils.pandas_postprocessing import _flatten_column_after_pivot, pivot
from tests.unit_tests.fixtures.dataframes import categories_df, single_metric_df
from tests.unit_tests.pandas_postprocessing.utils import (
AGGREGATES_MULTIPLE,
AGGREGATES_SINGLE,
)
def test_flatten_column_after_pivot():
"""
Test pivot column flattening function
"""
# single aggregate cases
assert (
_flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE,
column="idx_nulls",
)
== "idx_nulls"
)
assert (
_flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE,
column=1234,
)
== "1234"
)
assert (
_flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE,
column=Timestamp("2020-09-29T00:00:00"),
)
== "2020-09-29 00:00:00"
)
assert (
_flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE,
column="idx_nulls",
)
== "idx_nulls"
)
assert (
_flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE,
column=("idx_nulls", "col1"),
)
== "col1"
)
assert (
_flatten_column_after_pivot(
aggregates=AGGREGATES_SINGLE,
column=("idx_nulls", "col1", 1234),
)
== "col1, 1234"
)
# Multiple aggregate cases
assert (
_flatten_column_after_pivot(
aggregates=AGGREGATES_MULTIPLE,
column=("idx_nulls", "asc_idx", "col1"),
)
== "idx_nulls, asc_idx, col1"
)
assert (
_flatten_column_after_pivot(
aggregates=AGGREGATES_MULTIPLE,
column=("idx_nulls", "asc_idx", "col1", 1234),
)
== "idx_nulls, asc_idx, col1, 1234"
)
def test_pivot_without_columns():
"""
Make sure pivot without columns returns correct DataFrame
"""
df = pivot(
df=categories_df,
index=["name"],
aggregates=AGGREGATES_SINGLE,
)
assert df.columns.tolist() == ["name", "idx_nulls"]
assert len(df) == 101
assert df.sum()[1] == 1050
def test_pivot_with_single_column():
"""
Make sure pivot with single column returns correct DataFrame
"""
df = pivot(
df=categories_df,
index=["name"],
columns=["category"],
aggregates=AGGREGATES_SINGLE,
)
assert df.columns.tolist() == ["name", "cat0", "cat1", "cat2"]
assert len(df) == 101
assert df.sum()[1] == 315
df = pivot(
df=categories_df,
index=["dept"],
columns=["category"],
aggregates=AGGREGATES_SINGLE,
)
assert df.columns.tolist() == ["dept", "cat0", "cat1", "cat2"]
assert len(df) == 5
def test_pivot_with_multiple_columns():
"""
Make sure pivot with multiple columns returns correct DataFrame
"""
df = pivot(
df=categories_df,
index=["name"],
columns=["category", "dept"],
aggregates=AGGREGATES_SINGLE,
)
assert len(df.columns) == 1 + 3 * 5 # index + possible permutations
def test_pivot_fill_values():
"""
Make sure pivot with fill values returns correct DataFrame
"""
df = pivot(
df=categories_df,
index=["name"],
columns=["category"],
metric_fill_value=1,
aggregates={"idx_nulls": {"operator": "sum"}},
)
assert df.sum()[1] == 382
def test_pivot_fill_column_values():
"""
Make sure pivot witn null column names returns correct DataFrame
"""
df_copy = categories_df.copy()
df_copy["category"] = None
df = pivot(
df=df_copy,
index=["name"],
columns=["category"],
aggregates={"idx_nulls": {"operator": "sum"}},
)
assert len(df) == 101
assert df.columns.tolist() == ["name", "<NULL>"]
def test_pivot_exceptions():
"""
Make sure pivot raises correct Exceptions
"""
# Missing index
with pytest.raises(TypeError):
pivot(df=categories_df, columns=["dept"], aggregates=AGGREGATES_SINGLE)
# invalid index reference
with pytest.raises(InvalidPostProcessingError):
pivot(
df=categories_df,
index=["abc"],
columns=["dept"],
aggregates=AGGREGATES_SINGLE,
)
# invalid column reference
with pytest.raises(InvalidPostProcessingError):
pivot(
df=categories_df,
index=["dept"],
columns=["abc"],
aggregates=AGGREGATES_SINGLE,
)
# invalid aggregate options
with pytest.raises(InvalidPostProcessingError):
pivot(
df=categories_df,
index=["name"],
columns=["category"],
aggregates={"idx_nulls": {}},
)
def test_pivot_eliminate_cartesian_product_columns():
# single metric
mock_df = DataFrame(
{
"dttm": | to_datetime(["2019-01-01", "2019-01-01"]) | pandas.to_datetime |
import subprocess
from pandas.io.json import json_normalize
import pandas as pd
import os
import PIL
import glob
import argparse
import numpy as np
import pandas as pd
from PIL import Image
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from models.vgg import vgg16_bn
from models.inception import inception_v3
from models.resnet import resnet50,resnet152
from models.googleNet import googlenet
from densenet import densenet121, densenet161
from models.incept_resnet_v2 import InceptionResNetV2
from models.inception_v4 import InceptionV4
from models.unet import UNet
from data_util import *
from scipy.misc import imread
from scipy.misc import imresize
import random
import imp
from collections import defaultdict, OrderedDict
import time
import io
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', default=1,
help='gpu ids to use, e.g. 0 1 2 3', type=int)
parser.add_argument('--batch_size', default=4,
help='batch size, e.g. 16, 32, 64...', type=int)
parser.add_argument('--input_dir', default = '/home/shh/Passport/jyz/data/IJCAI/dev_data',
help="data input dir", type=str)
parser.add_argument('--output_dir', default="Out",
help='output dir', type=str)
parser.add_argument('--log_dir',default="./logs/test_search", type=str)
parser.add_argument('--results_file', default='results.csv',type=str)
parser.add_argument('--mode', default="nontarget", type=str)
parser.add_argument('--attack_file', default='attack_tijiao.py', type=str)
parser.add_argument('--if_attack',default=1,type=int)
parser.add_argument('--jpeg_quality',default=70,type=float)
return parser.parse_args()
args = parse_args()
print(args)
def check_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
check_mkdir(args.log_dir)
log_file = "%s/eval.log"%args.log_dir
err_file = "%s/eval.err"%args.log_dir
log_all_file = "%s/all.log"%args.log_dir
err_all_file = "%s/all.err"%args.log_dir
def load_model(model,pth_file,device):
model = model.to(device)
model.load_state_dict(torch.load(pth_file))
return model
def get_model_dics(device, model_list= None):
if model_list is None:
model_list = ['densenet121', 'densenet161', 'resnet50', 'resnet152',
'incept_v1', 'incept_v3', 'inception_v4', 'incept_resnet_v2',
'incept_v4_adv2', 'incept_resnet_v2_adv2',
'black_densenet161','black_resnet50','black_incept_v3',
'old_vgg','old_res','old_incept']
models = {}
for model in model_list:
if model=='densenet121':
models['densenet121'] = densenet121(num_classes=110)
load_model(models['densenet121'],"../pre_weights/ep_38_densenet121_val_acc_0.6527.pth",device)
if model=='densenet161':
models['densenet161'] = densenet161(num_classes=110)
load_model(models['densenet161'],"../pre_weights/ep_30_densenet161_val_acc_0.6990.pth",device)
if model=='resnet50':
models['resnet50'] = resnet50(num_classes=110)
load_model(models['resnet50'],"../pre_weights/ep_41_resnet50_val_acc_0.6900.pth",device)
if model=='incept_v3':
models['incept_v3'] = inception_v3(num_classes=110)
load_model(models['incept_v3'],"../pre_weights/ep_36_inception_v3_val_acc_0.6668.pth",device)
if model=='incept_v1':
models['incept_v1'] = googlenet(num_classes=110)
load_model(models['incept_v1'],"../pre_weights/ep_33_googlenet_val_acc_0.7091.pth",device)
#vgg16 = vgg16_bn(num_classes=110)
#load_model(vgg16, "./pre_weights/ep_30_vgg16_bn_val_acc_0.7282.pth",device)
if model=='incept_resnet_v2':
models['incept_resnet_v2'] = InceptionResNetV2(num_classes=110)
load_model(models['incept_resnet_v2'], "../pre_weights/ep_17_InceptionResNetV2_ori_0.8320.pth",device)
if model=='incept_v4':
models['incept_v4'] = InceptionV4(num_classes=110)
load_model(models['incept_v4'],"../pre_weights/ep_17_InceptionV4_ori_0.8171.pth",device)
if model=='incept_resnet_v2_adv':
models['incept_resnet_v2_adv'] = InceptionResNetV2(num_classes=110)
load_model(models['incept_resnet_v2_adv'], "../pre_weights/ep_22_InceptionResNetV2_val_acc_0.8214.pth",device)
if model=='incept_v4_adv':
models['incept_v4_adv'] = InceptionV4(num_classes=110)
load_model(models['incept_v4_adv'],"../pre_weights/ep_24_InceptionV4_val_acc_0.6765.pth",device)
if model=='incept_resnet_v2_adv2':
models['incept_resnet_v2_adv2'] = InceptionResNetV2(num_classes=110)
#load_model(models['incept_resnet_v2_adv2'], "../test_weights/ep_29_InceptionResNetV2_adv2_0.8115.pth",device)
load_model(models['incept_resnet_v2_adv2'], "../test_weights/ep_13_InceptionResNetV2_val_acc_0.8889.pth",device)
if model=='incept_v4_adv2':
models['incept_v4_adv2'] = InceptionV4(num_classes=110)
# load_model(models['incept_v4_adv2'],"../test_weights/ep_32_InceptionV4_adv2_0.7579.pth",device)
load_model(models['incept_v4_adv2'],"../test_weights/ep_50_InceptionV4_val_acc_0.8295.pth",device)
if model=='resnet152':
models['resnet152'] = resnet152(num_classes=110)
load_model(models['resnet152'],"../pre_weights/ep_14_resnet152_ori_0.6956.pth",device)
if model=='resnet152_adv':
models['resnet152_adv'] = resnet152(num_classes=110)
load_model(models['resnet152_adv'],"../pre_weights/ep_29_resnet152_adv_0.6939.pth",device)
if model=='resnet152_adv2':
models['resnet152_adv2'] = resnet152(num_classes=110)
load_model(models['resnet152_adv2'],"../pre_weights/ep_31_resnet152_adv2_0.6931.pth",device)
if model=='black_resnet50':
models['black_resnet50'] = resnet50(num_classes=110)
load_model(models['black_resnet50'],"../test_weights/ep_0_resnet50_val_acc_0.7063.pth",device)
if model=='black_densenet161':
models['black_densenet161'] = densenet161(num_classes=110)
load_model(models['black_densenet161'],"../test_weights/ep_4_densenet161_val_acc_0.6892.pth",device)
if model=='black_incept_v3':
models['black_incept_v3']=inception_v3(num_classes=110)
load_model(models['black_incept_v3'],"../test_weights/ep_28_inception_v3_val_acc_0.6680.pth",device)
if model=='old_res':
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_resnet_v1_50.py")
models['old_res'] = torch.load('./models_old/tf_to_pytorch_resnet_v1_50.pth').to(device)
if model=='old_vgg':
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_vgg16.py")
models[model] = torch.load('./models_old/tf_to_pytorch_vgg16.pth').to(device)
if model=='old_incept':
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_inception_v1.py")
models[model] = torch.load('./models_old/tf_to_pytorch_inception_v1.pth').to(device)
return models
def load_data_for_defense(input_dir, batch_size=16): #Only forward
all_img_paths = glob.glob(os.path.join(input_dir, '*.png'))
all_labels = [-1 for i in range(len(all_img_paths))]
dev_data = pd.DataFrame({'image_path':all_img_paths, 'label_idx':all_labels})
transformer = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
])
datasets = {
'dev_data': ImageSet(dev_data, transformer)
}
dataloaders = {
ds: DataLoader(datasets[ds],
batch_size=batch_size,
num_workers=0,
shuffle=False) for ds in datasets.keys()
}
return dataloaders
def input_diversity(image, prob, low, high):
if random.random()<prob:
return image
rnd = random.randint(low, high)
rescaled = F.upsample(image, size=[rnd, rnd], mode='bilinear')
h_rem = high - rnd
w_rem = high - rnd
pad_top = random.randint( 0, h_rem)
pad_bottom = h_rem - pad_top
pad_left = random.randint(0, w_rem)
pad_right = w_rem - pad_left
padded = F.pad(rescaled, [pad_top, pad_bottom, pad_left, pad_right], 'constant', 0)
return padded
def preprocess(image,model_name="vgg16",prob=1.0):
if "incept_v3" in model_name or model_name[:16]=='incept_resnet_v2' or model_name[:9]=='incept_v4' or model_name=='resnet_152' or model_name=="black_incept_v3":
return input_diversity(image,prob,270,299)
else:
image = F.upsample(image, size=(224, 224), mode='bilinear')
if model_name=="old_res" or model_name=="old_vgg":
image = ((image/2.0)+0.5)*255.0
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
image[:, 0,:, :] = image[:, 0,:, :] - _R_MEAN
image[:, 1,:, :] = image[:, 1,:, :] - _G_MEAN
image[:, 2,:, :] = image[:, 2,:, :] - _B_MEAN
return input_diversity(image,prob,220,224)
else:
return input_diversity(image,prob,220,224)
def check_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def MSE(old_dir, new_dir,filename):
img1 = imread(os.path.join(old_dir,filename)).astype(np.float)
img2 = imread(os.path.join(new_dir,filename)).astype(np.float)
#print(np.sum((img1-img2)**2,axis=2).shape)
mse = np.sqrt(np.sum((img1-img2)**2,axis=2)).mean()
return mse
def images_tojpeg(images,images_new):
buffer = io.BytesIO()
#print('1',images.mean())
for i in range(images.shape[0]):
pil_img = transforms.ToPILImage()(images[i].detach().cpu())
pil_img.save(buffer, format='jpeg', quality=args.jpeg_quality)
images_new[i] = transforms.ToTensor()(Image.open(buffer).convert('RGB'))
def test_target_attack(device, models_dic, old_dir, new_dir, labels_dic, mode):
loader2 = load_data_for_defense(new_dir, args.batch_size)
scores , accuracys= {}, {}
per_score =0
err = 0
old_score,adv_score,black_score = [],[],[]
with torch.no_grad():
for key in models_dic.keys():
model = models_dic[key]
j=0
score = 0
correct = 0
for data in loader2['dev_data']:
image = data["image"].to(device)
filenames = data["filename"]
#images_tojpeg(image,image)
img = preprocess(image,key)
out = model(img)
if 'incept_v3' in key or "incept_v1" in key:
pred = out[0].max(1)[1]
else:
try:
pred = out.max(1)[1]
except:
print("Error!!!!, key",key,img.shape,out.max(1))
for i in range(len(pred)):
mse = MSE(old_dir, new_dir,filenames[i])
err+=mse
if mode=="target" and pred[i].item()!=labels_dic[filenames[i]]:
score+=64
elif mode=="nontarget" and pred[i].item()==labels_dic[filenames[i]]:
score+=64
correct+=1
else:
score+=mse
if mode!='nontarget':
correct+=1
j+=image.shape[0]
print(key)
if 'old' in key:
old_score.append(score/j)
if 'adv' in key:
adv_score.append(score/j)
else:
black_score.append(score/j)
scores[key] = score/j
accuracys[key] = correct/float(j)
per_score+=score/j
err = err/j
print("Test Model %s, acc is %.3f, score is %.3f."%(key, correct/float(j), score/j))
per_score/=len(models_dic)
old_score = np.mean(np.array(old_score))
adv_score = np.mean(np.array(adv_score))
black_score = np.mean(np.array(black_score))
print("Per_score:", per_score)
print("Per score for white model: %.3f"%old_score)
print("score for adv:%.2f"%adv_score)
print('score for black:%.2f'%black_score)
print('err %.3f'%err)
modified_score = old_score*0.4+adv_score*0.4+black_score*0.2
print('Modified score is %.3f'%modified_score)
return scores, accuracys, err, [old_score, adv_score, black_score, modified_score]
def try_str_to_num(str_):
try:
return int(str_)
except:
try:
return float(str_)
except:
return str_
def get_labels(input_dir):
table = | pd.read_csv(input_dir+'/dev.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 16 15:50:03 2017
@author: konodera
そのユーザーがそのアイテムを買う時間帯の割合
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
from collections import defaultdict
import utils
utils.start(__file__)
#==============================================================================
# load
#==============================================================================
col = ['order_id', 'user_id', 'product_id', 'order_dow', 'order_hour_of_day', 'order_number_rev']
log = utils.read_pickles('../input/mk/log', col).sort_values('user_id')
log = pd.merge(log, | pd.read_pickle('../input/mk/timezone.p') | pandas.read_pickle |
# -*- coding: utf-8 -*-
# Operational Libs
import collections
import functools
import logging
import os
# Dash Libs
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from dash.dependencies import Input, Output, ALL
# Analytic Libs
import pandas as pd
import numpy as np
import math
LOG = logging.getLogger(__name__)
logging.basicConfig(level="INFO")
# In production, the CSV files are at this project on Civis Platform:
# https://platform.civisanalytics.com/spa/#/projects/135876
CIVIS_PROJECT_ID = 135876
LOCAL_DATA_DIR = "data"
DEV_CSV_FILENAMES = [
"dash_trajectoriesDat_baseline.csv", # file ID: 103222548
"dash_trajectoriesDat_june1partial10.csv", # file ID: 103222598
"dash_trajectoriesDat_june1partial30.csv", # file ID: 103222650
]
EMS_MAPPING = {"North-Central": (1, 2), "Central": (3, 6), "Southern": (4, 5), "Northeast": (7, 8, 9, 10, 11)}
N_SLIDER_MARKS = 5
N_CHART_COLUMNS = 2
app = dash.Dash(__name__, prevent_initial_callbacks=True)
# Mark the correct server for Heroku or Civis deployment
server = app.server
if os.getenv("CIVIS_SERVICE_VERSION"):
# This environment variable will be set when deployed in Civis
import civis
client = civis.APIClient()
CSV_FILES = civis.find(
client.projects.get(CIVIS_PROJECT_ID).files,
file_name=lambda filename: filename.endswith(".csv"),
)
logging.info("%d CSV files found", len(CSV_FILES))
else:
CSVFile = collections.namedtuple("CSVFile", ("id", "file_name"))
CSV_FILES = [
CSVFile(id=None, file_name=file_name)
for file_name in DEV_CSV_FILENAMES
]
# Setup
#############################################################################
# Color Options
COLORS = {
'sf': '#1798c1',
'green': '#416165', # Color for plots & text
'beige': '#F7F7FF', #Color for gridlinesgit
}
# RangeSlider values need to be ints - convert to unix timestamp
def dtToUnix (dt):
''' Convert datetime to Unix Milliseconds
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#from-timestamps-to-epoch
'''
unixTime = (dt - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
return unixTime
# Convert unix Time back to datetime
def unixToDt (unixTime):
''' Convert Unix milliseconds to datetime '''
return pd.to_datetime(unixTime, unit='s')
@functools.lru_cache(maxsize=4)
def get_df(csv_file_path):
"""Get pandas DataFrame from CSV and apply pre-processing.
Parameters
----------
csv_file_path : str
Returns
-------
pd.DataFrame
"""
df = | pd.read_csv(csv_file_path) | pandas.read_csv |
import os
import traceback
import sys
import pkg_resources
import pandas as pd
import openpyxl
import matplotlib
import socket
import setuptools
import base64
import numpy as np
import pickle as pkl
import xml.etree.ElementTree as et
import matplotlib.pyplot as plt
import seaborn as sns
from aniachi.systemUtils import Welcome as W
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPNotFound
from io import StringIO
from io import BytesIO
from multiprocessing import Manager
from sklearn.preprocessing import scale
mgr = Manager()
ns = mgr.Namespace()
port =8080
"""
"""
def __doc__():
return 'Web application server'
"""
"""
def __str__():
return 'Web application server. \n To to EMBI analysis and mead and variance reduction'
#
#
@view_config(route_name='hello', renderer='home.jinja2')
def hello_world(request):
return {'name': 'Running Server','port':port,'pyramid':pkg_resources.get_distribution('pyramid').version
,'numpy':np.__version__,'pandas':pd.__version__ ,'favicon':'aniachi_logo.png','matplotlib':matplotlib.__version__,
'fbprophet':pkg_resources.get_distribution('fbprophet').version,'openpyxl ':openpyxl.__version__,'setuptools':setuptools.__version__,
'py_common_fetch':pkg_resources.get_distribution('py-common-fetch').version,'host':socket.gethostbyname(socket.gethostname()),
'pyqrcode':pkg_resources.get_distribution('pyqrcode').version,'argparse':'','pypng':pkg_resources.get_distribution('pypng').version
}
"""
"""
@view_config(route_name='entry')
def entry_point(request):
return HTTPFound(location='app/welcome')
"""
A friendly error request
"""
@view_config(context=Exception, renderer='error.jinja2')
def error(context, request):
fp = StringIO()
traceback.print_exc(file=fp)
return {'error':fp.getvalue(),'favicon':'logo.png'}
"""
"""
def getParamterOrdefault(d,k,v,valid):
aux = v
try:
if (d[k] in valid): aux = d[k]
except Exception as e:
pass
return aux
#
#
"""
Auxiliar method if you want to test something
"""
def testSomething():
pass
"""
A friendly 404 request
"""
@view_config(context=HTTPNotFound, renderer='404.jinja2')
def not_found(context, request):
request.response.status = 404
return {}
"""
"""
def readAndFilterDataframe(file):
try:
print('Rerading...',os.path.join(os.getcwd(),file))
df = pd.read_csv(os.path.join(os.getcwd(),file))
#remove unamed columns.
columnsToDelete = []
for i in range(15, 29):
columnsToDelete.append('Unnamed: ' + str(i))
df = df.drop(columns=columnsToDelete)
print('Fixing columns')
#convert to data time objects
df.Fecha = pd.to_datetime(df['Fecha'])
print('Fixing dateTime Objects')
print('Dataframe rows:',df.shape[0])
ns.df = df #multitreading shared object
except:
print(os.path.join(os.getcwd(),file),'Not found')
sys.exit(-1)
@view_config(route_name='dataset')
def datasetServer(request):
format = getParamterOrdefault(request.params,'format','default',['html','json','xml','serialized','csv','excel'])
if (format=='csv'):
s = StringIO()
ns.df.to_csv(s)
r = Response(s.getvalue(), content_type='application/CSV', charset='UTF-8')
elif (format=='json'):
df = ns.df
s = StringIO()
df.to_json(s)
r = Response(s.getvalue(), content_type='application/json', charset='UTF-8')
elif (format=='xml'):
df =ns.df
root = et.Element('root')
for i, row in df.iterrows():
data = et.SubElement(root, 'row')
data.set('iter', str(i))
for head in df.columns:
aux = et.SubElement(data, head)
if head =='Fecha':
aux.text = str(row[head].strftime('%Y-%d-%m'))
else:
aux.text = str(row[head])
r = Response(et.tostring(root), content_type='application/xml', charset='UTF-8')
elif (format=='html'):
df = ns.df
s = StringIO()
df.to_html(s,index=True)
r = Response(s.getvalue(), content_type='text/html', charset='UTF-8')
elif (format=='serialized'):
r = Response(base64.encodebytes(pkl.dumps(ns.df)).decode('utf-8'), content_type='text/html', charset='UTF-8')
elif (format == 'excel'):
b= BytesIO()
pd.ExcelWriter(b)
ns.df.to_excel(b)
r = Response(b.getvalue(), content_type='application/force-download', content_disposition='attachment; filename=data.xls')
else:
r = Response('Bad paramters ' + str(request.params), content_type='text/html', charset='UTF-8')
return r
"""
return an empty dataframe to avoid errors
"""
def getNormalizedDataAsDF(country):
try:
mx_scale = scale(ns.df[country])
d = {'Fecha': ns.df.Fecha, country: ns.df[country], 'NORMALIZED': mx_scale}
return pd.DataFrame(d)
except:
return pd.DataFrame(dict())
"""
"""
def getIntParameter(d,k,v,r):
aux=int(v)
try:
if isinstance(int(d[k]), int):
if int(d[k]) in r:
aux= int(d[k])
except Exception as e:
pass
return aux
"""
"""
@view_config(route_name='normalized')
def normalizedServer(request):
format = getParamterOrdefault(request.params, 'format', 'default',
['html', 'json', 'xml', 'serialized', 'csv', 'excel'])
country = getParamterOrdefault(request.params, 'country', 'default',
['LATINO','REP_DOM','BRAZIL','COLOMBIA','ECUADOR','ARGENTINA','MEXICO','PERU','PANAMA','VENEZUELA','URUGUAY','CHILE','EL_SALVADOR'])
if country == 'default' or format == 'default':
r = Response('Bad paramters ' + str(request.params), content_type='text/html', charset='UTF-8')
return r
else:
df= getNormalizedDataAsDF(country)
if (format == 'csv'):
s = StringIO()
df.to_csv(s)
r = Response(s.getvalue(), content_type='application/CSV', charset='UTF-8')
elif (format == 'json'):
s = StringIO()
df.to_json(s)
r = Response(s.getvalue(), content_type='application/json', charset='UTF-8')
elif (format == 'xml'):
root = et.Element('root')
for i, row in df.iterrows():
data = et.SubElement(root, 'row')
data.set('iter', str(i))
for head in df.columns:
aux = et.SubElement(data, head)
if head == 'Fecha':
aux.text = str(row[head].strftime('%Y-%d-%m'))
else:
aux.text = str(row[head])
r = Response(et.tostring(root), content_type='application/xml', charset='UTF-8')
elif (format == 'html'):
s = StringIO()
df.to_html(s, index=True)
r = Response(s.getvalue(), content_type='text/html', charset='UTF-8')
elif (format == 'serialized'):
r = Response(base64.encodebytes(pkl.dumps(df)).decode('utf-8'), content_type='text/html',charset='UTF-8')
elif (format == 'excel'):
b = BytesIO()
| pd.ExcelWriter(b) | pandas.ExcelWriter |
""" plotting functions for Dataset objects
To Do:
Edit hyp_stats plots to take transitions.HypStats object instead of ioeeg.Dataset object
Remove redundant plotting fns added into EKG classs
Add subsetEEG function to break up concatenated NREM segments for plotting. Will require adjustments
to specified detections added to plot.
"""
import itertools
import igraph as ig
import math
import matplotlib
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import shapely.geometry as SG
from matplotlib.widgets import Slider
from pandas.plotting import register_matplotlib_converters
from scipy.signal import find_peaks, butter, sosfiltfilt
from scipy import interpolate
register_matplotlib_converters()
def plotEEG(d, raw=True, filtered=False, spindles=False, spindle_rejects=False):
""" plot multichannel EEG w/ option for double panel raw & filtered. For short, pub-ready
figures. Use vizeeg for data inspection
red = spindle rejects by time domain criteria; dark red = spindle rejects by frequency domain criteria
Parameters
----------
d: instance of ioeeg Dataset class
raw: bool, optional, default: True
Option to plot raw EEG
filtered: bool, optional, default: False
Option to plot filtered EEG
spindles: bool, optional, default: False
Option to plot spindle detections
spindle_rejects: bool, optional, default: False
Option to plot rejected spindle detections
Returns
-------
matplotlib.pyplot figure instance
"""
data = []
title = []
# import data
if raw == True:
raw = d.data
data.append(raw)
title.append('Raw')
if filtered == True:
filtd = d.spindle_calcs.loc(axis=1)[:, 'Filtered']
data.append(filtd)
title.append('Filtered')
# flatten events list by channel for plotting
if spindles == True:
sp_eventsflat = [list(itertools.chain.from_iterable(d.spindle_events[i])) for i in d.spindle_events.keys()]
if spindle_rejects == True:
sp_rej_t_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_t[i])) for i in d.spindle_rejects_t.keys()]
sp_rej_f_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_f[i])) for i in d.spindle_rejects_f.keys()]
# set channels for plotting
channels = [x[0] for x in d.data.columns]
# plot data
fig, axs = plt.subplots(len(data), 1, sharex=True, figsize=(10,10), squeeze=False)
fig.subplots_adjust(hspace=.1, top=.9, bottom=.1, left=.05, right=.95)
for dat, ax, t in zip(data, axs.flatten(), title):
for i, c in enumerate(channels):
# normalize each channel to [0, 1]
dat_ser = pd.Series(dat[(c, t)], index=dat.index)
norm_dat = (dat_ser - min(dat_ser))/(max(dat_ser)-min(dat_ser)) - i # subtract i for plotting offset
ax.plot(norm_dat, linewidth=.5, color='C0')
# plot spindles
if spindles == True:
sp_events_TS = [pd.Timestamp(x) for x in sp_eventsflat[i]]
spins = pd.Series(index=norm_dat.index)
spins[sp_events_TS] = norm_dat[sp_events_TS]
ax.plot(spins, color='orange', alpha=0.5)
if spindle_rejects == True:
# plot time-domain rejects
sp_rejs_t_TS = [pd.Timestamp(x) for x in sp_rej_t_eventsflat[i]]
spin_rejects_t = pd.Series(index=norm_dat.index)
spin_rejects_t[sp_rejs_t_TS] = norm_dat[sp_rejs_t_TS]
ax.plot(spin_rejects_t, color='red', alpha=0.5)
# plot frequency-domain rejects
sp_rejs_f_TS = [pd.Timestamp(x) for x in sp_rej_f_eventsflat[i]]
spin_rejects_f = pd.Series(index=norm_dat.index)
spin_rejects_f[sp_rejs_f_TS] = norm_dat[sp_rejs_f_TS]
ax.plot(spin_rejects_f, color='darkred', alpha=0.5)
ax.set_title(t)
ax.set_yticks(list(np.arange(0.5, -(len(channels)-1), -1)))
ax.set_yticklabels(channels)
ax.margins(x=0) # remove white space margins between data and y axis
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# set overall parameters
fig.suptitle(d.metadata['file_info']['in_num'])
plt.xlabel('Time')
return fig, axs
def plotEEG_singlechan(d, chan, raw=True, filtered=False, rms=False, thresholds=False, spindles=False, spindle_rejects=False):
""" plot single channel EEG. Options for multipaneled calculations. Not for concatenated datasets
Parameters
----------
d: instance of ioeeg Dataset class
chan: str
channel to plot
raw: bool, optional, default: True
Option to plot raw EEG panel
filtered: bool, optional, default: False
Option to plot filtered EEG panel
rms: bool, optional, default: False
Option to plot filtered EEG panel with RMS and RMS moving average
thresholds: bool, optional, default: False
Option to plot spindle threshold lines on rms panel
spindles: bool, optional, default: False
Option to plot filtered EEG with spindle detection panel
spindle_rejects: bool, optional, default: False
Option to plot filtered EEG with spindle rejection panel.
Note: Spindles and spindle_rejects plot on same panel if
both True
Returns
-------
matplotlib.pyplot figure instance
"""
data = []
dtype = []
labels = []
c = chan
# import data
if raw == True:
raw_data = d.data[c, 'Raw']
if filtered == True or rms == True or spindles == True or spindle_rejects == True:
filtd_data = d.spindle_calcs.loc(axis=1)[c, 'Filtered']
# set data to plot
if raw == True:
#raw = d.data[c, 'Raw']
data.append(raw_data)
dtype.append('raw')
labels.append('Raw Signal')
if filtered == True:
#filtd = d.spindle_calcs.loc(axis=1)[c, 'Filtered']
data.append(filtd_data)
dtype.append('filtd')
labels.append('Filtered Signal')
if rms == True:
data.append(filtd_data)
dtype.append('filtd+rms')
labels.append('Filtered Signal')
if spindles == True or spindle_rejects == True:
data.append(filtd_data)
labels.append('Filtered Signal')
if spindles == True and spindle_rejects == False:
dtype.append('filtd+spin')
elif spindles == False and spindle_rejects == True:
dtype.append('filtd+rej')
elif spindles == True and spindle_rejects == True:
dtype.append('filtd+spin+rej')
# pull out thresholds for labels
loSD = d.metadata['spindle_analysis']['sp_loSD']
hiSD = d.metadata['spindle_analysis']['sp_hiSD']
# plot data
fig, axs = plt.subplots(len(data), 1, sharex=True, figsize=(18,6), squeeze=False)
fig.subplots_adjust(hspace=.1, top=.9, bottom=.1, left=.05, right=.95)
for dat, ax, dt, label in zip(data, axs.flatten(), dtype, labels):
# plot EEG
ax.plot(dat, linewidth=.5, color='C0', label=label)
# plot filtered EEG w/ rms & thresholds
if dt == 'filtd+rms':
ax.plot(d.spRMS[c], label='RMS', color='green')
ax.plot(d.spRMSmavg[c], label='RMS moving average', color='orange')
if dt == 'filtd+rms' and thresholds == True:
ax.axhline(d.spThresholds[c].loc['Low Threshold'], linestyle='solid', color='grey', label = f'Mean RMS + {loSD} SD')
ax.axhline(d.spThresholds[c].loc['High Threshold'], linestyle='dashed', color='grey', label = f'Mean RMS + {hiSD} SD')
# plot spindles
if dt =='filtd+spin' or dt =='filtd+spin+rej':
sp_valuesflat = []
sp_eventsflat = []
for n in range(len(d.spindle_events[c])):
for m in range(len(d.spindle_events[c][n])):
sp_valuesflat.append(dat[d.spindle_events[c][n][m]])
sp_eventsflat.append(d.spindle_events[c][n][m])
sp_events_TS = [pd.Timestamp(x) for x in sp_eventsflat]
spins = pd.Series(index=dat.index)
spins[sp_events_TS] = dat[sp_events_TS]
ax.plot(spins, color='orange', alpha=0.5, label='Spindle Detection')
# plot spindle rejections
if dt == 'filtd+rej' or dt == 'filtd+spin+rej':
# plot time-domain rejects
sp_rej_t_valuesflat = []
sp_rej_t_eventsflat = []
for n in range(len(d.spindle_rejects_t[c])):
for m in range(len(d.spindle_rejects_t[c][n])):
sp_rej_t_valuesflat.append(dat[d.spindle_rejects_t[c][n][m]])
sp_rej_t_eventsflat.append(d.spindle_rejects_t[c][n][m])
sp_rej_t_events_TS = [pd.Timestamp(x) for x in sp_rej_t_eventsflat]
spin_rejects_t = pd.Series(index=dat.index)
spin_rejects_t[sp_rej_t_events_TS] = dat[sp_rej_t_events_TS]
ax.plot(spin_rejects_t, color='red', alpha=0.5, label='Rejected Detection (T)')
# plot frequency-domain rejects
sp_rej_f_valuesflat = []
sp_rej_f_eventsflat = []
for n in range(len(d.spindle_rejects_f[c])):
for m in range(len(d.spindle_rejects_f[c][n])):
sp_rej_f_valuesflat.append(dat[d.spindle_rejects_f[c][n][m]])
sp_rej_f_eventsflat.append(d.spindle_rejects_f[c][n][m])
sp_rej_f_events_TS = [pd.Timestamp(x) for x in sp_rej_f_eventsflat]
spin_rejects_f = pd.Series(index=dat.index)
spin_rejects_f[sp_rej_f_events_TS] = dat[sp_rej_f_events_TS]
ax.plot(spin_rejects_f, color='darkred', alpha=0.5, label='Rejected Detection (F)')
ax.legend(loc='lower left')
#ax.set_title(t)
#ax.set_yticks(list(np.arange(0.5, -(len(chan)-1), -1)))
#ax.set_yticklabels(chan)
ax.margins(x=0) # remove white space margins between data and y axis
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# plot minor axes
seconds = mdates.SecondLocator()
ax.xaxis.set_minor_locator(seconds)
ax.grid(axis='x', which='minor', linestyle=':')
ax.grid(axis='x', which='major')
# set overall parameters
fig.suptitle(d.metadata['file_info']['in_num'])
plt.xlabel('Time')
return fig
def vizeeg(d, raw=True, filtered=False, spindles=False, spindle_rejects=False, slider=True, win_width=15, raw_lowpass=True,
lowpass_freq=25, lowpass_order=4):
""" vizualize multichannel EEG w/ option for double panel raw and/or filtered. Optimized for
inspecting spindle detections (title/axis labels removed for space)
Spindles rejected based on time-domain criteria are plotted in red; rejections based on
frequency-domain criteria are plotted in darkred.
Parameters
----------
d: instance of ioeeg Dataset class
raw: bool, optional, default: True
Option to plot raw EEG
filtered: bool, optional, default: False
Option to plot spindle filtered EEG
spindles: bool, optional, default: False
Option to plot spindle detections
spindle_rejects: bool, optional, default: False
Option to plot rejected spindle detections
slider: bool (default: False)
Option to implement an X-axis slider instead of built-in matplotlib zoom. Useful
for inspecting long segments of EEG with a set window
win_width: int (default: 15)
If using slider option, number of seconds to set window width
raw_lowpass: bool (default: True)
Whether to plot the lowpass filtered raw data [in place of the unchanged raw data]
lowpass_freq: int (default: 25)
Frequency to lowpass the raw data for visualization (if not already applied)
lowpass_order: int (default: 4)
Butterworth lowpass filter order to be used if lowpass_raw is not None (doubles for filtfilt)
Returns
-------
matplotlib.pyplot figure instance
"""
# Set figure size (double height if plotting both raw & filtered)
if raw == True & filtered == True:
figsize = (14, 14)
else:
figsize = (14, 7)
data = []
title = []
# import data
if raw == True:
if not raw_lowpass:
# use the unchanged raw data
raw_data = d.data
elif raw_lowpass:
# use the lowpass filtered raw data
try:
# check if filtered data exists
raw_lowpass_data = d.data_lowpass
except AttributeError:
# apply lowpass filter
d.lowpass_raw(lowpass_freq, lowpass_order)
raw_lowpass_data = d.data_lowpass
if filtered == True:
filtd = d.spindle_calcs.loc(axis=1)[:, 'Filtered']
# set data to plot (title corresponds to multiindex level 2 in data df)
if raw == True:
if not raw_lowpass:
# plot the unchanged data
data.append(raw_data)
title.append('Raw')
elif raw_lowpass:
# plot the lowpass data
data.append(raw_lowpass_data)
title.append('raw_lowpass')
if filtered == True:
data.append(filtd)
title.append('Filtered')
# flatten events list by channel for plotting
if spindles == True:
sp_eventsflat = [list(itertools.chain.from_iterable(d.spindle_events[i])) for i in d.spindle_events.keys()]
if spindle_rejects == True:
# time-domain rejects
sp_rej_t_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_t[i])) for i in d.spindle_rejects_t.keys()]
# frequency domain rejects
sp_rej_f_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_f[i])) for i in d.spindle_rejects_f.keys()]
# set channels for plotting
channels = [x[0] for x in d.data.columns if x[0] not in ['EKG', 'EOG_L', 'EOG_R']]
# set offset multiplier (distance between channels in plot)
mx = 0.1
# plot data
fig, axs = plt.subplots(len(data), 1, sharex=True, figsize=figsize, squeeze=False)
fig.subplots_adjust(hspace=.1, top=.9, bottom=.1, left=.05, right=.95)
yticks = []
for dat, ax, t in zip(data, axs.flatten(), title):
for i, c in enumerate(channels):
# normalize each channel to [0, 1] -> can also simply subtract the mean (cleaner looking), but
# normalization preserves relative differences between channels while putting them on a common scale
dat_ser = pd.Series(dat[(c, t)], index=dat.index)
norm_dat = (dat_ser - min(dat_ser))/(max(dat_ser)-min(dat_ser)) - i*mx # subtract i for plotting offset
yticks.append(np.nanmedian(norm_dat))
ax.plot(norm_dat, linewidth=.5, color='C0')
# plot spindles
if spindles == True:
sp_events_TS = [pd.Timestamp(x) for x in sp_eventsflat[i]]
spins = pd.Series(index=norm_dat.index)
spins[sp_events_TS] = norm_dat[sp_events_TS]
ax.plot(spins, color='orange', alpha=0.5)
if spindle_rejects == True:
# plot time-domain rejects
sp_rejs_t_TS = [pd.Timestamp(x) for x in sp_rej_t_eventsflat[i]]
spin_t_rejects = pd.Series(index=norm_dat.index)
spin_t_rejects[sp_rejs_t_TS] = norm_dat[sp_rejs_t_TS]
ax.plot(spin_t_rejects, color='red', alpha=0.5)
# plot frequency-domain rejects
sp_rejs_f_TS = [pd.Timestamp(x) for x in sp_rej_f_eventsflat[i]]
spin_f_rejects = pd.Series(index=norm_dat.index)
spin_f_rejects[sp_rejs_f_TS] = norm_dat[sp_rejs_f_TS]
ax.plot(spin_f_rejects, color='darkred', alpha=0.5)
# remove title to maximize on-screen plot area
#ax.set_title(t)
# set y axis params
ax.set_yticks(yticks)
ax.set_yticklabels(channels)
ax.set_ylim(bottom = yticks[-1]-3*mx, top=yticks[0]+3*mx)
ax.margins(x=0) # remove white space margins between data and y axis
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# if data roughly 5 mins or less, set minor x-axes
if (d.data.index[-1] - d.data.index[0]).total_seconds() < 400:
seconds = mdates.SecondLocator()
ax.xaxis.set_minor_locator(seconds)
ax.grid(axis='x', which='minor', linestyle=':')
ax.grid(axis='x', which='major')
# set overall parameters
#fig.tight_layout(pad=0) # remove figure padding --> this pushes slider onto fig
# remove labels to maximize on-screen plot area
#plt.xlabel('Time')
#fig.suptitle(d.metadata['file_info']['in_num'])
# option to use x-axis slider insted of matplotlib zoom
if slider:
# plot minor axes --> requires slider for segments longer than 5mins
seconds = mdates.SecondLocator()
ax.xaxis.set_minor_locator(seconds)
ax.grid(axis='x', which='minor', linestyle=':')
ax.grid(axis='x', which='major')
# set initial window
x_min_index = 0
x_max_index = win_width*int(d.s_freq)
x_min = d.data.index[x_min_index]
x_max = d.data.index[x_max_index]
x_dt = x_max - x_min
y_min, y_max = plt.axis()[2], plt.axis()[3]
plt.axis([x_min, x_max, y_min, y_max])
axcolor = 'lightgoldenrodyellow'
axpos = plt.axes([0.2, 0.1, 0.65, 0.03], facecolor=axcolor)
slider_max = len(d.data) - x_max_index - 1
# set slider position object
spos = Slider(axpos, 'Pos', matplotlib.dates.date2num(x_min), matplotlib.dates.date2num(d.data.index[slider_max]))
# format date names
#plt.gcf().autofmt_xdate()
# create slider update function
def update(val):
pos = spos.val
xmin_time = matplotlib.dates.num2date(pos)
xmax_time = matplotlib.dates.num2date(pos) + x_dt
ax.axis([xmin_time, xmax_time, y_min, y_max])
fig.canvas.draw_idle()
# update slider position on click
spos.on_changed(update)
#return fig, axs
return fig
def plotLFP(d, raw=True, filtered=True, thresholds=True, spindles=True, spindle_rejects=True, raw_lowpass=True, lowpass_freq=25,
lowpass_order=4, win_frac=None, xlim=None):
""" plot dual-channel LFP w/ option for double panel raw & filtered.
red = spindle rejects by time domain criteria; dark red = spindle rejects by frequency domain criteria
Parameters
----------
d: instance of ioeeg Dataset class
raw: bool, optional, default: True
Option to plot raw EEG
filtered: bool, optional, default: True
Option to plot filtered EEG
thresholds: bool, optional, default: True
Option to plot spindle detection thresholds
spindles: bool, optional, default: True
Option to plot spindle detections
spindle_rejects: bool, optional, default: True
Option to plot rejected spindle detections
raw_lowpass: bool (default: True)
Whether to plot the lowpass filtered raw data [in place of the unchanged raw data]
lowpass_freq: int (default: 25)
Frequency to lowpass the raw data for visualization (if not already applied)
lowpass_order: int (default: 4)
Butterworth lowpass filter order to be used if lowpass_raw is not None (doubles for filtfilt)
win_frac: str or None (default: None)
window count, if plotting x-axis in windows (ex. '3/4' for window 3 of 4)
xlim: tuple of DateTimeIndex
x-axis values to be used for x-limits
Returns
-------
matplotlib.pyplot figure instance
"""
data = []
title = []
# import data
if raw == True:
if not raw_lowpass:
# use the unchanged raw data
raw_data = d.data
elif raw_lowpass:
# use the lowpass filtered raw data
try:
# check if filtered data exists
raw_lowpass_data = d.data_lowpass
except AttributeError:
# apply lowpass filter
d.lowpass_raw(lowpass_freq, lowpass_order)
raw_lowpass_data = d.data_lowpass
if filtered == True or thresholds == True:
filtd = d.spindle_calcs.loc(axis=1)[:, 'Filtered']
# set data to plot (title corresponds to multiindex level 2 in data df)
if raw == True:
if not raw_lowpass:
# plot the unchanged data
data.append(raw_data)
title.append('Raw')
elif raw_lowpass:
# plot the lowpass data
data.append(raw_lowpass_data)
title.append('raw_lowpass')
if filtered == True:
data.append(filtd)
title.append('Filtered')
if thresholds == True:
data.append(filtd)
title.append('Filtered')
# flatten events list by channel for plotting
if spindles == True:
sp_eventsflat = [list(itertools.chain.from_iterable(d.spindle_events[i])) for i in d.spindle_events.keys()]
if spindle_rejects == True:
sp_rej_t_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_t[i])) for i in d.spindle_rejects_t.keys()]
sp_rej_f_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_f[i])) for i in d.spindle_rejects_f.keys()]
# set channels for plotting
channels = [x[0] for x in d.data.columns]
# plot data
fig, axs = plt.subplots(len(data), 1, sharex=True, figsize=(18,6), squeeze=False)
fig.subplots_adjust(hspace=.2, top=.9, bottom=.1, left=.05, right=.95)
for (e, dat), ax, t in zip(enumerate(data), axs.flatten(), title):
for i, c in enumerate(channels):
# set labels for only the first filtered channel (prevent duplicate legends)
if i == 0:
loSD = d.metadata['spindle_analysis']['sp_loSD']
hiSD = d.metadata['spindle_analysis']['sp_hiSD']
labels = {'RMS': 'RMS', 'RMS mavg': 'RMS mavg', 'lo_thres':f'RMS + {loSD} SD','hi_thres':f'RMS + {hiSD} SD', 'spindles':'Spindle Detection',
'spindle_rejects_t': 'Rejected Detection (time-domain)', 'spindle_rejects_f':'Rejected Detection (frequency-domain)'}
else:
label_keys = ['RMS', 'RMS mavg', 'lo_thres', 'hi_thres', 'spindles', 'spindle_rejects_t', 'spindle_rejects_f']
labels = {k:'_nolegend_' for k in label_keys}
# normalize each channel to [0, 1]; plot signal on 1st & 2nd panels
dat_ser = pd.Series(dat[(c, t)], index=dat.index)
norm_dat = (dat_ser - min(dat_ser))/(max(dat_ser)-min(dat_ser)) - i # subtract i for plotting offset
ax.plot(norm_dat, linewidth=.5, color='C0')
# plot thresholds on the second panel
if (thresholds == True) & (e == 1):
# RMS
rms_ser = d.spRMS[c].RMS
norm_rms = (rms_ser - min(dat_ser))/(max(dat_ser)-min(dat_ser)) - i
ax.plot(norm_rms, linewidth=.8, color='green', label = labels['RMS'])
# RMS moving average
rmsmavg_ser = d.spRMSmavg[c].RMSmavg
norm_rmsmavg = (rmsmavg_ser - min(dat_ser))/(max(dat_ser)-min(dat_ser)) - i
ax.plot(norm_rmsmavg, linewidth=.8, color='orange', label = labels['RMS mavg'])
# threshold values
norm_lo = (d.spThresholds[c].loc['Low Threshold'] - min(dat_ser))/(max(dat_ser)-min(dat_ser)) - i
norm_hi = (d.spThresholds[c].loc['High Threshold'] - min(dat_ser))/(max(dat_ser)-min(dat_ser)) - i
ax.axhline(norm_lo, linestyle='solid', color='grey', label = labels['lo_thres'])
ax.axhline(norm_hi, linestyle='dashed', color='grey', label = labels['hi_thres'])
# plot spindles on the 3rd panel
if (spindles == True) & (e == 2):
sp_events_TS = [pd.Timestamp(x) for x in sp_eventsflat[i]]
spins = pd.Series(index=norm_dat.index)
spins[sp_events_TS] = norm_dat[sp_events_TS]
ax.plot(spins, color='orange', alpha=0.5, label=labels['spindles'])
if (spindle_rejects == True) & (e == 2):
# plot time-domain rejects
sp_rejs_t_TS = [pd.Timestamp(x) for x in sp_rej_t_eventsflat[i]]
spin_rejects_t = pd.Series(index=norm_dat.index)
spin_rejects_t[sp_rejs_t_TS] = norm_dat[sp_rejs_t_TS]
ax.plot(spin_rejects_t, color='red', alpha=0.5, label=labels['spindle_rejects_t'])
# plot frequency-domain rejects
sp_rejs_f_TS = [pd.Timestamp(x) for x in sp_rej_f_eventsflat[i]]
spin_rejects_f = pd.Series(index=norm_dat.index)
spin_rejects_f[sp_rejs_f_TS] = norm_dat[sp_rejs_f_TS]
ax.plot(spin_rejects_f, color='darkred', alpha=0.5, label=labels['spindle_rejects_f'])
# set subplot title
if t == 'Raw':
subtitle = 'Original Signal'
elif t == 'raw_lowpass':
lp_filtfreq = d.metadata['visualizations']['lowpass_freq']
subtitle = f'{lp_filtfreq} Hz Lowpass Filtered Signal'
elif t == 'Filtered':
sp_filtfreqs = d.metadata['spindle_analysis']['sp_filtwindow']
subtitle = f'{sp_filtfreqs[0]}-{sp_filtfreqs[1]} Hz Bandpass Filtered Signal'
# set xlimit for windowing
if xlim is not None:
ax.set_xlim(xlim)
# set subplot params
ax.set_title(subtitle, pad=5, fontsize='medium')
ax.set_yticks(list(np.arange(0.5, -(len(channels)-1), -1)))
ax.set_yticklabels(channels)
ax.margins(x=0) # remove white space margins between data and y axis
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# plot minor axes
seconds = mdates.SecondLocator()
ax.xaxis.set_minor_locator(seconds)
ax.grid(axis='x', which='minor', linestyle=':')
ax.grid(axis='x', which='major')
# set overall parameters
fig_title = d.metadata['file_info']['in_num'] + ' ' + d.metadata['file_info']['path'].split('\\')[1] + ' ' + d.metadata['file_info']['path'].split('.')[0].split('_')[-1]
if win_frac is not None:
frac = win_frac.split('/')
fig_title = fig_title + f' (Figure {frac[0]} of {frac[1]})'
fig.suptitle(fig_title)
fig.legend(ncol=2, loc='upper right', fancybox=True, framealpha=0.5)
plt.xlabel('Time')
return fig
### Spindle Methods ###
def plot_spindlepower_chan_i(n, chan, show_peaks='spins', dB=False, spin_type='true_spins'):
""" Plot individual spindle spectra for a given channel
Parameters
----------
n: nrem.NREM object
chan: str
channel to plot
show_peaks: bool or str (default: 'spins')
which peaks to plot. 'spins' plots only peaks in the spindle range (options: None, 'spins', 'all')
spin_type: str (default: 'true_spins')
type of spindle to plot (options: 'true_spins', 'rejects')
note: 'rejects' option plots spindles rejected in the frequency domain, not in the time domain
"""
# set data to plot
if spin_type == 'true_spins':
psd_dict = n.spindle_psd_i
elif spin_type == 'rejects':
psd_dict = n.spindle_psd_i_rejects
# end if no spindles found matching the criteria
if len(psd_dict[chan]) < 1:
print(f'No {spin_type} found for channel {chan}')
return
# set figure & subplot params
ncols = int(np.sqrt(len(psd_dict[chan])))
nrows = len(psd_dict[chan])//ncols + (len(psd_dict[chan]) % ncols > 0)
fig, axs = plt.subplots(nrows = nrows, ncols = ncols, figsize=(ncols*3, ncols*2))
fig.subplots_adjust(hspace=0.8, wspace=0.5)
# move axes into a list for plotting if only one subplot
try:
axs_flat = axs.flatten()
except AttributeError:
axs_flat = [axs]
# plot spindles
for spin, ax in zip(psd_dict[chan], axs_flat):
# transform units
if dB == True:
pwr = 10 * np.log10(psd_dict[chan][spin].values)
ylabel = 'Power (dB)'
else:
pwr = psd_dict[chan][spin].values
ylabel = 'Power (mV^2/Hz)'
# set y-axis to scientific notation
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
# highlight spindle range. aquamarine or lavender works here too
spin_range = n.metadata['spindle_analysis']['spin_range']
ax.axvspan(spin_range[0], spin_range[1], color='lavender', alpha=0.8, zorder=1)
# plot spectrum
ax.plot(psd_dict[chan][spin].index, pwr, color='black', alpha=0.9, linewidth=0.8, zorder=2)
# grab the peaks on the power spectrum
p_idx, props = find_peaks(psd_dict[chan][spin])
peaks = psd_dict[chan][spin].iloc[p_idx]
# plot all peaks
if show_peaks == 'all':
ax.scatter(x=peaks.index, y=peaks.values, alpha=0.5, zorder=3)
# plot only peaks in the spindle range
elif show_peaks == 'spins':
peaks_spins = peaks[(peaks.index > spin_range[0]) & (peaks.index < spin_range[1])]
ax.scatter(x=peaks_spins.index, y=peaks_spins.values, alpha=0.5, zorder=3)
# set subplot params
ax.set_xlim(0, 25)
ax.margins(y=0)
ax.set_xticks([5, 10, 15, 20])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_title(spin, size='x-small')
ax.tick_params(axis='both', labelsize='x-small', labelleft=True) # turn labelleft=False to remove y-tick labels
# delete empty subplots --> this can probably be combined with previous loop
for i, ax in enumerate(axs_flat):
if i >= len(psd_dict[chan]):
fig.delaxes(ax)
# set figure params
fig.tight_layout(pad=1, rect=[0, 0, 1, 0.93])
fig.text(0.5, 0, 'Frequency (Hz)', ha='center')
fig.text(0, 0.5, ylabel, va='center', rotation='vertical')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0] + f'\nSpindle Power {chan}: {spin_type}')
return fig
def spec_spins(n, chan, x, labels=True, raw_lowpass = True):
""" Vizualize individual peak detections, looking at % of > 4Hz power w/in the spindle range
Parameters
----------
n: nrem.NREM object
compatible with psd_type = 'i' under analyze_spindles method
chan: str
Channel to plot
x: int
Spindle # to plot
labels: bool (default: True)
Whether to print axis labels
raw_lowpass: bool (default: True)
Whether to plot the lowpass filtered raw data [in place of the unchanged raw data]
Returns
-------
matplotlib.Figure
"""
spin_range = n.metadata['spindle_analysis']['spin_range']
prune_range = n.metadata['spindle_analysis']['prune_range']
lowpass_freq = n.metadata['visualizations']['lowpass_freq']
# set data to plot
try:
psd = n.spindle_psd_i[chan][x]
if raw_lowpass:
# set spindle to lowpass data
zpad_spin = n.spindles_zpad_lowpass[chan][x]
else:
# use original data
zpad_spin = n.spindles_zpad[chan][x]
spin_perc = n.spindle_multitaper_calcs[chan][f'perc_{prune_range[0]}-{prune_range[1]}Hzpwr_in_spin_range'].loc[x]
status = 'accepted'
except KeyError:
psd = n.spindle_psd_i_rejects[chan][x]
if raw_lowpass:
# set spindle to lowpass data
zpad_spin = n.spindles_zpad_rejects_lowpass[chan][x]
else:
# use original data
zpad_spin = n.spindles_zpad_rejects[chan][x]
spin_perc = n.spindle_multitaper_calcs_rejects[chan][f'perc_{prune_range[0]}-{prune_range[1]}Hzpwr_in_spin_range'].loc[x]
status = 'rejected'
# subset of power w/in prune range
psd_subset = psd[(psd.index >= prune_range[0]) & (psd.index <= prune_range[1])]
# power in spindle range
psd_spins = psd[(psd.index >= spin_range[0]) & (psd.index <= spin_range[1])]
# plot the peak detections
fig, axs = plt.subplots(3, 1, figsize=(5,5))
plt.subplots_adjust(top=0.88, bottom=0.125, hspace=0.5)
# plot the raw spindle + zpad
## set zpad label
if raw_lowpass:
zpad_label = f'{lowpass_freq}Hz Lowpass Filtered Signal'
else:
zpad_label = 'Original Signal'
axs[0].plot(zpad_spin, alpha=1, lw=0.8, label=zpad_label)
# convert x-tick labels from samples to ms
xticks = axs[0].get_xticks().tolist()
ms_xticks = [int(sample/n.s_freq*1000) for sample in xticks]
axs[0].set_xticklabels(ms_xticks)
if labels:
axs[0].set_ylabel('Amplitude (mV)', size = 'small')
axs[0].set_xlabel('Time (ms)', size = 'small')
# plot the whole spectrum
axs[1].plot(psd, c='black', lw=0.8, label='Power Spectrum')
axs[1].axvspan(spin_range[0], spin_range[1], color='grey', alpha=0.2, zorder=0)
if labels:
axs[1].set_ylabel('Power (mv$^2$/Hz)', size = 'small')
axs[1].set_xlabel('Frequency (Hz)', size = 'small')
# plot just the subset of the spectrum used for pruning
axs[2].plot(psd_subset, c='black', lw=0.8, zorder=3)
axs[2].axvspan(spin_range[0], spin_range[1], color='grey', alpha=0.2, label = 'Spindle Range', zorder=0)
axs[2].fill_between(psd_subset.index, psd_subset.values, zorder=0, alpha=0.3, color='pink')
axs[2].fill_between(psd_subset.index, psd_subset.values, where=[sub in psd_spins.index for sub in psd_subset.index], zorder=1, alpha=1, color='white')
axs[2].fill_between(psd_subset.index, psd_subset.values, where=[sub in psd_spins.index for sub in psd_subset.index], zorder=2, alpha=0.8, color='pink')
if labels:
axs[2].set_ylabel('Power (mv$^2$/Hz)', size = 'small')
axs[2].set_xlabel('Frequency (Hz)', size = 'small')
for ax in axs.flatten():
ax.tick_params(labelsize=9)
# set y-axis to scientific notation
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
fig.legend(loc='lower center', ncol=3, bbox_to_anchor=(0.5, 0), fontsize='x-small')
fig.suptitle(f'Channel {chan} Spindle #{x}\nSpindle range comprises {spin_perc}% of {prune_range[0]}-{prune_range[1]}Hz power ({status})', size = 'medium')
plt.xticks(fontsize=8)
return fig
def spec_peaks_SD(n, chan, x, labels=True):
""" Vizualize spectral peaks from individual spindles
This looks at mean power >4Hz + 1 SD as a potential frequency domain threshold
Plots three panels: Upper = spindle tracing (w/ zero-pad) ,
Center = spectrum w/ peaks, Lower = > 4Hz spectrum w/ peaks
*NOTE this is not used in final detection criteria
Parameters
----------
n: nrem.NREM object
compatible with psd_type = 'i' under analyze_spindles method
chan: str
Channel to plot
x: int
Spindle # to plot
labels: bool (default: True)
Whether to print axis labels
Returns
-------
matplotlib.Figure
"""
spin_range = n.metadata['spindle_analysis']['spin_range']
prune_frange = [4, 25] # change this to pull the value from metadata
s = 1 # pull from metadata --> number of standard deviations above mean
psd = n.spindle_psd_i[chan][x]
zpad_spin = n.spindles_zpad[chan][x]
psd_subset = psd[(psd.index >= prune_frange[0]) & (psd.index <= prune_frange[1])]
# grab the peaks on the power spectrum
p_idx, props = find_peaks(psd)
peaks = psd.iloc[p_idx]
peaks_spins = peaks[(peaks.index > spin_range[0]) & (peaks.index < spin_range[1])]
# plot the peak detections
fig, axs = plt.subplots(3, 1, figsize=(5,5))
plt.subplots_adjust(top=0.9, bottom=0.1, hspace=0.5)
#fig.set_tight_layout(True)
# plot the raw spindle + zpad
axs[0].plot(zpad_spin, alpha=1, lw=0.8, label='raw signal')
# convert x-tick labels from samples to ms
xticks = axs[0].get_xticks().tolist()
ms_xticks = [int(sample/n.s_freq*1000) for sample in xticks]
axs[0].set_xticklabels(ms_xticks)
if labels:
axs[0].set_ylabel('Amplitude (mV)', size = 'small')
axs[0].set_xlabel('Time (ms)', size = 'small')
# plot the whole spectrum + peaks
axs[1].plot(psd, c='black', lw=0.8, label='power spectrum')
axs[1].axvspan(spin_range[0], spin_range[1], color='grey', alpha=0.2, label = 'Spindle Range', zorder=0)
axs[1].scatter(x=peaks.index, y=peaks.values, c='grey', alpha=0.8, label='spectral peaks')
if labels:
axs[1].set_ylabel('Power (mv$^2$/Hz)', size = 'small')
axs[1].set_xlabel('Frequency (Hz)', size = 'small')
# plot just the subset of the spectrum used for pruning + mean & SD
axs[2].plot(psd_subset, c='black', lw=0.8)
axs[2].axvspan(spin_range[0], spin_range[1], color='grey', alpha=0.2, label = 'Spindle Range', zorder=0)
axs[2].axhline(psd_subset.mean(), c='orange', linestyle = '-', label = 'mean power')
axs[2].axhline(psd_subset.mean() + s*psd_subset.std(), c='orange', linestyle=':', label = f'mean+{s}SD')
axs[2].scatter(x=peaks_spins.index, y=peaks_spins.values, c='grey', alpha=0.8, label='spectral peaks')
if labels:
axs[2].set_ylabel('Power (mv$^2$/Hz)', size = 'small')
axs[2].set_xlabel('Frequency (Hz)', size = 'small')
for ax in axs.flatten():
ax.tick_params(labelsize=9)
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
#fig.legend()
fig.suptitle(f'Channel {chan} Spindle #{x}', size = 'medium')
plt.xticks(fontsize=8)
return fig
def plot_spins(n, datatype='Raw'):
""" plot all spindle detections by channel
Params
------
datatype: str (default: 'Raw')
Data to plot [Options: 'Raw', 'spfilt']
"""
exclude = ['EKG', 'EOG_L', 'EOG_R']
eeg_chans = [x for x in n.spindles.keys() if x not in exclude]
ncols = 6
nrows = len(eeg_chans)//ncols + (len(eeg_chans) % ncols > 0)
fig, axs = plt.subplots(nrows = nrows, ncols = ncols, sharex=True, figsize=(15, 7))
fig.subplots_adjust(hspace=0.5)
for chan, ax in zip(n.spindles.keys(), axs.flatten()):
if chan not in exclude:
# set color iterator -- for other colors look at ocean, gnuplot, prism
color=iter(plt.cm.nipy_spectral(np.linspace(0, 1, len(n.spindles[chan]))))
for i in n.spindles[chan]:
c = next(color)
ax.plot(n.spindles[chan][i][datatype], c=c, alpha=1, lw=0.8)
# set subplot params
ax.set_xlim([-1800, 1800])
ax.set_title(chan, fontsize='medium')
ax.tick_params(axis='both', which='both', labelsize=8)
# delete empty subplots --> this can probably be combined with previous loop
for i, ax in enumerate(axs.flatten()):
if i >= len(eeg_chans):
fig.delaxes(ax)
# set figure params
fig.tight_layout(pad=1, rect=[0, 0, 1, 0.95])
fig.text(0.5, 0, 'Time (ms)', ha='center')
fig.text(0, 0.5, 'Amplitude (mV)', va='center', rotation='vertical')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0])
return fig
def plot_spin_means(n, datatype='Raw', spins=True, count=True, buffer=False, err='sem', spin_color='black', count_color='dodgerblue', buff_color='lightblue'):
""" plot all spindle detections by channel
Note: Removed buffer option bc buffer calculations not maintained in nrem module (11-26-19)
Parameters
----------
datatype: str (default: 'Raw')
data to plot [options: 'Raw', 'spfilt']
spins: bool (default: True)
plot spindle averages
count: bool (default: True)
plot overlay of spindle count at each timedelta
# buffer: bool (default:False)
# plot average data +/- 3s from zero-neg spindle peaks.
# Note: this has an effect of washing out spindles features due to asymmetry in spindle distribution
# around the negative peak and corresponding averaging of spindle with non-spindle data
err: str (default:'sem')
type of error bars to use [options: 'std', 'sem']
spin_color: str (default: 'black')
color for plotting spindles
buff_color: str (default:'lightblue')
color for plotting buffer data
"""
exclude = ['EKG', 'EOG_L', 'EOG_R']
eeg_chans = [x for x in n.spindles.keys() if x not in exclude]
# set channel locations
locs = {'FPz': [4, 0],'Fp1': [3, 0],'Fp2': [5, 0],'AF7': [1, 1],'AF8': [7, 1],'F7': [0, 2],'F8': [8, 2],'F3': [2, 2],'F4': [6, 2],'F1': [3, 2],
'F2': [5, 2],'Fz': [4, 2],'FC5': [1, 3],'FC6': [7, 3],'FC1': [3, 3],'FC2': [5, 3],'T3': [0, 4],'T4': [8, 4],'C3': [2, 4],'C4': [6, 4],
'Cz': [4, 4],'CP5': [1, 5],'CP6': [7, 5],'CP1': [3, 5],'CP2': [5, 5],'CPz': [4, 5],'P3': [2, 6],'P4': [6, 6],'Pz': [4, 6],'T5': [0, 6],
'T6': [8, 6],'POz': [4, 7],'PO7': [1, 7],'PO8': [7, 7],'O1': [2, 8],'O2': [6, 8],'Oz': [4, 8]}
fig, ax = plt.subplots(9,9, figsize=(15, 13))
plt.subplots_adjust(hspace=0.3, wspace=0.3)
for chan in n.spindles.keys():
if chan not in exclude:
# if buffer:
# data = n.spindle_buffer_means
# ax.plot(data[(chan, 'mean')], alpha=1, color=buff_color, label='Overall Average', lw=1)
# ax.fill_between(data.index, data[(chan, 'mean')] - data[(chan, err)], data[(chan, 'mean')] + data[(chan, err)],
# color=buff_color, alpha=0.2)
if spins:
data = n.spindle_means[datatype]
ax[locs[chan][1], locs[chan][0]].plot(data[(chan, 'mean')], alpha=1, color=spin_color, label='Spindle Average', lw=1)
ax[locs[chan][1], locs[chan][0]].fill_between(data.index, data[(chan, 'mean')] - data[(chan, err)], data[(chan, 'mean')] + data[(chan, err)],
color=spin_color, alpha=0.2)
if count:
ax1 = ax[locs[chan][1], locs[chan][0]].twinx()
ax1.plot(data[chan, 'count'], color=count_color, alpha=0.3)
ax1.fill_between(data.index, 0, data[(chan, 'count')], color=count_color, alpha=0.3)
max_count = len(n.spindles[chan])
ticks = np.linspace(0, max_count, num=5, dtype=int)
ax1.set_yticks(ticks=ticks)
ax1.set_yticklabels(labels=ticks, color=count_color)
ax1.tick_params(axis='y', labelsize=8) #color=count_color)
# set subplot params
ax[locs[chan][1], locs[chan][0]].set_xlim([-1800, 1800])
ax[locs[chan][1], locs[chan][0]].set_title(chan, fontsize='medium')
ax[locs[chan][1], locs[chan][0]].tick_params(axis='both', which='both', labelsize=8)
# remove unused plots
coords = [[x, y] for x in range(0, 9) for y in range(0,9)]
unused = [c for c in coords if c not in locs.values()]
for u in unused:
fig.delaxes(ax[u[1], u[0]])
# set figure params
#fig.legend()
fig.tight_layout(pad=1, rect=[0, 0, 1, 0.95])
fig.text(0.5, 0, 'Time (ms)', ha='center', size='large')
fig.text(0, 0.5, 'Amplitude (mV)', va='center', rotation='vertical', color=spin_color, size='large')
fig.text(1, 0.5, 'Spindle Count', va='center', rotation=270, color=count_color, size='large')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0] + f'\nSpindle Averages ({datatype})')
return fig
def plot_spin_clust_means(n, datatype='Raw', spins=True, count=True, err='sem', spin_color='black', count_color='dodgerblue'):
""" plot mean spindles by cluster
Parameters
----------
cluster: int
which cluster to plot [options: 0, 1]
datatype: str (default: 'Raw')
data to plot [options: 'Raw', 'spfilt']
spins: bool (default: True)
plot spindle averages
count: bool (default: True)
plot overlay of spindle count at each timedelta
err: str (default:'sem')
type of error bars to use [options: 'std', 'sem']
spin_color: str (default: 'black')
color for plotting spindles
"""
fig, axs = plt.subplots(1, 2, figsize=(7,3), sharey=True)
for ax, clust in zip(axs.flatten(), n.spindle_aggregates_clust):
if spins:
data = n.spindle_clust_means[datatype]
ax.plot(data[(clust, 'mean')], alpha=1, color=spin_color, label='Spindle Average', lw=1)
ax.fill_between(data.index, data[(clust, 'mean')] - data[(clust, err)], data[(clust, 'mean')] + data[(clust, err)],
color=spin_color, alpha=0.2)
if count:
ax1 = ax.twinx()
ax1.plot(data[clust, 'count'], color=count_color, alpha=0.3)
ax1.fill_between(data.index, 0, data[(clust, 'count')], color=count_color, alpha=0.3)
max_count = len(n.spindle_aggregates_clust[clust]['Raw'].columns)
ticks = np.linspace(0, max_count, num=5, dtype=int)
ax1.set_yticks(ticks=ticks)
ax1.set_yticklabels(labels=ticks, color=count_color)
ax1.tick_params(axis='y', labelsize=8) #color=count_color)
# set subplot params
ax.set_xlim([-1000, 1000])
ax.set_title('Cluster ' + str(clust), fontsize='medium')
ax.set_xlabel('Time (ms)', size='large')
ax.set_ylabel('Amplitude (mV)', size='large')
ax.tick_params(axis='both', which='both', labelsize=8)
# set figure params
#fig.legend()
fig.tight_layout(pad=1, rect=[0, 0, 1, 0.92])
if count:
fig.text(1, 0.5, 'Spindle Count', va='center', rotation=270, color=count_color, size='large')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0] + f'\nSpindle Averages ({datatype})')
return fig
def plot_avg_spindle(n, datatype='Raw', spins=True, count=True, err='sem', spin_color='black', count_color='dodgerblue'):
""" plot the average spindle tracing across the head
For use in comparison with cluster averages
"""
fig, ax = plt.subplots()
if spins:
data = n.spindle_means_all[datatype]
ax.plot(data['mean'], alpha=1, color=spin_color, label='Spindle Average', lw=1)
ax.fill_between(data.index, data['mean'] - data[err], data['mean'] + data[err],
color=spin_color, alpha=0.2)
if count:
ax1 = ax.twinx()
ax1.plot(data['count'], color=count_color, alpha=0.3)
ax1.fill_between(data.index, 0, data['count'], color=count_color, alpha=0.3)
max_count = len(n.spindle_aggregates_all['Raw'].columns)
ticks = np.linspace(0, max_count, num=5, dtype=int)
ax1.set_yticks(ticks=ticks)
ax1.set_yticklabels(labels=ticks, color=count_color)
ax1.tick_params(axis='y', labelsize=8) #color=count_color)
# set subplot params
ax.set_xlim([-1000, 1000])
#ax.set_title('Cluster ' + str(clust), fontsize='medium')
ax.set_xlabel('Time (ms)', size='large')
ax.set_ylabel('Amplitude (mV)', size='large')
ax.tick_params(axis='both', which='both', labelsize=8)
# set figure params
#fig.legend()
fig.tight_layout(pad=1, rect=[0, 0, 1, 0.92])
if count:
fig.text(1, 0.5, 'Spindle Count', va='center', rotation=270, color=count_color, size='large')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0] + f'\nSpindle Averages ({datatype})')
return fig
def plot_spindlepower_chan(n, chan, dB=True):
""" Plot spindle power spectrum for a single channel """
# transform units
if dB == True:
pwr = 10 * np.log10(n.spindle_psd_concat[chan].values)
ylabel = 'Power (dB)'
else:
pwr = n.spindle_psd_concat[chan].values
ylabel = 'Power (mV^2/Hz)'
fig, ax = plt.subplots()
# plot just spectrum
ax.plot(n.spindle_psd_concat[chan].index, pwr, color='black', alpha=0.9, linewidth=0.8)
ax.axvspan(9, 16, color='lavender', alpha=0.8)
ax.set_xlim(0, 25)
ax.margins(y=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlabel('Frequency (Hz)')
plt.ylabel(ylabel)
plt.title((n.metadata['file_info']['fname'].split('.')[0] + '\n\n' + chan + ' Concatenated Spindle Power'), size='medium', weight='semibold')
return fig
def plot_spindlepower(n, dB=True):
""" Plot spindle power spectrum (from concatenated spindles) for all channels """
exclude = ['EKG', 'EOG_L', 'EOG_R']
eeg_chans = [x for x in n.spindle_psd_concat.keys() if x not in exclude]
# set subplot parameters
if len(eeg_chans) < 1:
print('No concatened spindles detected to plot.')
return
elif len(eeg_chans) < 6:
ncols = len(eeg_chans)
else:
ncols = int(len(eeg_chans)/6)
nrows = len(eeg_chans)//ncols + (len(eeg_chans) % ncols > 0)
fig, axs = plt.subplots(nrows = nrows, ncols = ncols, figsize=(ncols*3, ncols*2))
fig.subplots_adjust(hspace=0.8, wspace=0.5)
# move axes into a list for plotting if only one subplot
try:
axs_flat = axs.flatten()
except AttributeError:
axs_flat = [axs]
for chan, ax in zip(eeg_chans, axs_flat):
# transform units
if dB == True:
pwr = 10 * np.log10(n.spindle_psd_concat[chan].values)
ylabel = 'Power (dB)'
else:
pwr = n.spindle_psd_concat[chan].values
ylabel = 'Power (mV^2/Hz)'
# set y-axis to scientific notation
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
# plot spectrum
ax.plot(n.spindle_psd_concat[chan].index, pwr, color='black', alpha=0.9, linewidth=0.8)
# highlight spindle range. aquamarine or lavender works here too
spin_range = n.metadata['spindle_analysis']['spin_range']
ax.axvspan(spin_range[0], spin_range[1], color='grey', alpha=0.2)
# set subplot params
ax.set_xlim(0, 25)
ax.margins(y=0)
ax.set_xticks([5, 10, 15, 20])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_title(chan, size='medium', weight='bold')
# delete empty subplots --> this can probably be combined with previous loop
for i, ax in enumerate(axs_flat):
if i >= len(eeg_chans):
fig.delaxes(ax)
# set figure params
fig.tight_layout(pad=1, rect=[0, 0, 1, 0.93])
fig.text(0.5, 0, 'Frequency (Hz)', ha='center')
fig.text(0, 0.5, ylabel, va='center', rotation='vertical')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0] + '\nSpindle Power')
return fig
def plot_spindlepower_headplot(n, dB=True):
"""
Headplot of spindle power spectrum for all channels
NOTE: only for FS128 (12-11-19)
"""
# set channel locations
locs = {'FPz': [4, 0],'Fp1': [3, 0],'Fp2': [5, 0],'AF7': [1, 1],'AF8': [7, 1],'F7': [0, 2],'F8': [8, 2],'F3': [2, 2],'F4': [6, 2],'F1': [3, 2],
'F2': [5, 2],'Fz': [4, 2],'FC5': [1, 3],'FC6': [7, 3],'FC1': [3, 3],'FC2': [5, 3],'T3': [0, 4],'T4': [8, 4],'C3': [2, 4],'C4': [6, 4],
'Cz': [4, 4],'CP5': [1, 5],'CP6': [7, 5],'CP1': [3, 5],'CP2': [5, 5],'CPz': [4, 5],'P3': [2, 6],'P4': [6, 6],'Pz': [4, 6],'T5': [0, 6],
'T6': [8, 6],'POz': [4, 7],'PO7': [1, 7],'PO8': [7, 7],'O1': [2, 8],'O2': [6, 8],'Oz': [4, 8]}
fig, ax = plt.subplots(9,9, figsize=(12, 12))
plt.subplots_adjust(hspace=0.3, wspace=0.3) # use this or tight_layout
for chan in locs.keys():
# transform units
if dB == True:
pwr = 10 * np.log10(n.spindle_psd_concat[chan].values)
ylabel = 'Power (dB)'
else:
pwr = n.spindle_psd_concat[chan].values
ylabel = 'Power (mV^2/Hz)'
# plot spectrum
#ax = plt.subplot()
ax[locs[chan][1], locs[chan][0]].plot(n.spindle_psd_concat[chan].index, pwr, color='black', alpha=0.9, linewidth=0.8)
# highlight spindle range. aquamarine or lavender works here too
ax[locs[chan][1], locs[chan][0]].axvspan(9, 16, color='grey', alpha=0.2)
# set subplot params
ax[locs[chan][1], locs[chan][0]].set_xlim(0, 25)
ax[locs[chan][1], locs[chan][0]].margins(y=0)
ax[locs[chan][1], locs[chan][0]].set_xticks([5, 10, 15, 20])
ax[locs[chan][1], locs[chan][0]].tick_params(axis='both', labelsize=7)
ax[locs[chan][1], locs[chan][0]].spines['top'].set_visible(False)
ax[locs[chan][1], locs[chan][0]].spines['right'].set_visible(False)
ax[locs[chan][1], locs[chan][0]].set_title(chan, size='small', weight='semibold')
ax[locs[chan][1], locs[chan][0]].title.set_position([.5, 0.75])
#ax[locs[chan][1], locs[chan][0]].text(0.0, 0.0, chan)
# remove unused plots
coords = [[x, y] for x in range(0, 9) for y in range(0,9)]
unused = [c for c in coords if c not in locs.values()]
for u in unused:
fig.delaxes(ax[u[1], u[0]])
# set labels
fig.text(0.5, 0.08, 'Frequency (Hz)', ha='center', size='large', weight='semibold')
fig.text(0.08, 0.5, ylabel, va='center', rotation='vertical', size='large', weight='semibold')
return fig
def plot_gottselig(n, datatype='calcs', plot_peaks=True, smoothed=True):
""" plot gottselig normalization for all channels
Parameters
----------
datatype: str (default: 'calcs')
which data to plot [options: 'calcs', 'normed_pwr']
plot_peaks: bool (default: True)
whether to plot peak detections [only if datatype='normed_pwr']
smoothed: bool (default: False)
whether to plot rms smoothed signal used for peak calculations [only if datatype='normed_pwr']
"""
exclude = ['EKG', 'EOG_L', 'EOG_R']
eeg_chans = [x for x in n.spindle_psd_concat.keys() if x not in exclude]
# set subplot parameters
if len(eeg_chans)/6 < 1:
ncols = 1
else:
ncols = int(len(eeg_chans)/6)
nrows = len(eeg_chans)//ncols + (len(eeg_chans) % ncols > 0)
fig, axs = plt.subplots(nrows = nrows, ncols = ncols, figsize=(ncols*3, ncols*2))
fig.subplots_adjust(hspace=0.8, wspace=0.5)
for chan, ax in zip(eeg_chans, axs.flatten()):
data = n.spindle_psd_concat[chan]
data_normed = n.spindle_psd_concat_norm[chan]
if datatype == 'calcs':
# first plot
ax.scatter(data_normed['values_to_fit'].index, data_normed['values_to_fit'].values, alpha=0.8, color='mediumslateblue', linewidths=0, marker='s', label='Normalization Range')
ax.plot(data.index, 10*np.log10(data.values), color='black', label = 'Power Spectrum')
ax.plot(data_normed['exp_fit_line'], color='mediumblue', label = 'Exponential fit')
ax.set_title(chan)
elif datatype == 'normed_pwr':
# second plot
ax.plot(data_normed['normed_pwr'], color='black', lw=0.8, label='Normalized power', zorder=2)
ax.axvspan(9, 16, color='lightgrey', alpha=0.8, label = 'Spindle Range', zorder=1)
ax.set_title(chan)
if smoothed:
# plot smoothed psd
ax.plot(n.psd_concat_norm_peaks[chan]['smoothed_data'], lw=1.2, alpha=0.8, color='lime', zorder=3)
if plot_peaks:
# plot peak detections
peaks = n.psd_concat_norm_peaks[chan]['peaks']
ax.scatter(x=peaks.index, y=peaks.values, color='magenta', alpha=0.8, marker=7, zorder=4)
# set subplot params
ax.margins(y=0)
#ax.set_xticks([5, 10, 15, 20])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_title(chan, size='medium')
# delete empty subplots
for i, ax in enumerate(axs.flatten()):
if i >= len(eeg_chans):
fig.delaxes(ax)
# set figure params
fig.tight_layout(pad=1, rect=[0, 0, 1, 0.93])
fig.text(0.5, 0, 'Frequency (Hz)', ha='center', size='large')
fig.text(0, 0.5, 'Power (dB)', va='center', rotation='vertical', size='large')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0] + '\n\nGottselig Normalization', size='large')
return fig
def plot_gottselig_headplot(n, datatype='calcs', plot_peaks=True, smoothed=True):
""" plot gottselig normalization headplot for all channels
NOTE: only for FS128 (12-11-19)
To Do: Change plots for consistent y-axis
Parameters
----------
datatype: str (default: 'calcs')
which data to plot [options: 'calcs', 'normed_pwr']
plot_peaks: bool (default: True)
whether to plot peak detections [only if datatype='normed_pwr']
smoothed: bool (default: False)
whether to plot rms smoothed signal used for peak calculations [only if datatype='normed_pwr']
"""
# set channel locations
locs = {'FPz': [4, 0],'Fp1': [3, 0],'Fp2': [5, 0],'AF7': [1, 1],'AF8': [7, 1],'F7': [0, 2],'F8': [8, 2],'F3': [2, 2],'F4': [6, 2],'F1': [3, 2],
'F2': [5, 2],'Fz': [4, 2],'FC5': [1, 3],'FC6': [7, 3],'FC1': [3, 3],'FC2': [5, 3],'T3': [0, 4],'T4': [8, 4],'C3': [2, 4],'C4': [6, 4],
'Cz': [4, 4],'CP5': [1, 5],'CP6': [7, 5],'CP1': [3, 5],'CP2': [5, 5],'CPz': [4, 5],'P3': [2, 6],'P4': [6, 6],'Pz': [4, 6],'T5': [0, 6],
'T6': [8, 6],'POz': [4, 7],'PO7': [1, 7],'PO8': [7, 7],'O1': [2, 8],'O2': [6, 8],'Oz': [4, 8]}
fig, ax = plt.subplots(9,9, figsize=(12, 12))
plt.subplots_adjust(hspace=0.3, wspace=0.3) # use this or tight_layout
for chan in locs.keys():
data = n.spindle_psd_concat[chan]
data_normed = n.spindle_psd_concat_norm[chan]
if datatype == 'calcs':
# first plot
ax[locs[chan][1], locs[chan][0]].scatter(data_normed['values_to_fit'].index, data_normed['values_to_fit'].values, alpha=0.8, color='mediumslateblue', linewidths=0, marker='s', label='Normalization Range')
ax[locs[chan][1], locs[chan][0]].plot(data.index, 10*np.log10(data.values), color='black', label = 'Power Spectrum')
ax[locs[chan][1], locs[chan][0]].plot(data_normed['exp_fit_line'], color='mediumblue', label = 'Exponential fit')
ax[locs[chan][1], locs[chan][0]].set_title(chan)
elif datatype == 'normed_pwr':
# second plot
ax[locs[chan][1], locs[chan][0]].plot(data_normed['normed_pwr'], color='black', lw=0.8, label='Normalized power', zorder=2)
ax[locs[chan][1], locs[chan][0]].axvspan(9, 16, color='lavender', alpha=0.8, label = 'Spindle Range', zorder=1)
ax[locs[chan][1], locs[chan][0]].set_title(chan)
if smoothed:
# plot smoothed psd
ax[locs[chan][1], locs[chan][0]].plot(n.psd_concat_norm_peaks[chan]['smoothed_data'], lw=1.2, color='lime', alpha=0.8, label='Smoothed Spectrum', zorder=3)
if plot_peaks:
# plot peak detections
peaks = n.psd_concat_norm_peaks[chan]['peaks']
ax[locs[chan][1], locs[chan][0]].scatter(x=peaks.index, y=peaks.values, color='magenta', marker=7, alpha=0.8, label ='Spindle Peak', zorder=4)
# set subplot params
ax[locs[chan][1], locs[chan][0]].margins(y=0)
ax[locs[chan][1], locs[chan][0]].set_xlim(0, 25)
ax[locs[chan][1], locs[chan][0]].margins(y=0)
ax[locs[chan][1], locs[chan][0]].set_xticks([5, 10, 15, 20])
ax[locs[chan][1], locs[chan][0]].tick_params(axis='both', labelsize=7)
ax[locs[chan][1], locs[chan][0]].spines['top'].set_visible(False)
ax[locs[chan][1], locs[chan][0]].spines['right'].set_visible(False)
ax[locs[chan][1], locs[chan][0]].set_title(chan, size='small', weight='semibold')
ax[locs[chan][1], locs[chan][0]].title.set_position([.5, 0.75])
# remove unused plots
coords = [[x, y] for x in range(0, 9) for y in range(0,9)]
unused = [c for c in coords if c not in locs.values()]
for u in unused:
fig.delaxes(ax[u[1], u[0]])
# set labels
fig.text(0.5, 0.08, 'Frequency (Hz)', ha='center', size='large', weight='semibold')
fig.text(0.08, 0.5, 'Power (dB)', va='center', rotation='vertical', size='large', weight='semibold')
return fig
### Slow Oscillation Methods ###
def plot_so(n, datatype='Raw'):
""" plot all slow oscillation detections by channel
Params
------
datatype: str (default: 'Raw')
Data to plot [Options: 'Raw', 'sofilt']
"""
# set channel locations
locs = {'FPz': [4, 0],'Fp1': [3, 0],'Fp2': [5, 0],'AF7': [1, 1],'AF8': [7, 1],'F7': [0, 2],'F8': [8, 2],'F3': [2, 2],'F4': [6, 2],'F1': [3, 2],
'F2': [5, 2],'Fz': [4, 2],'FC5': [1, 3],'FC6': [7, 3],'FC1': [3, 3],'FC2': [5, 3],'T3': [0, 4],'T4': [8, 4],'C3': [2, 4],'C4': [6, 4],
'Cz': [4, 4],'CP5': [1, 5],'CP6': [7, 5],'CP1': [3, 5],'CP2': [5, 5],'CPz': [4, 5],'P3': [2, 6],'P4': [6, 6],'Pz': [4, 6],'T5': [0, 6],
'T6': [8, 6],'POz': [4, 7],'PO7': [1, 7],'PO8': [7, 7],'O1': [2, 8],'O2': [6, 8],'Oz': [4, 8]}
exclude = ['EKG', 'EOG_L', 'EOG_R']
eeg_chans = [x for x in n.spindles.keys() if x not in exclude]
fig, ax = plt.subplots(9,9, figsize=(15, 13))
plt.subplots_adjust(hspace=0.3, wspace=0.3)
for chan in n.so.keys():
if chan not in exclude:
# set color iterator -- for other colors look at ocean, gnuplot, prism
color=iter(plt.cm.nipy_spectral(np.linspace(0, 1, len(n.so[chan]))))
for i in n.so[chan]:
c = next(color)
ax[locs[chan][1], locs[chan][0]].plot(n.so[chan][i][datatype], c=c, alpha=1, lw=0.8)
# set subplot params
ax[locs[chan][1], locs[chan][0]].set_xlim([-2500, 2500])
ax[locs[chan][1], locs[chan][0]].set_title(chan, fontsize='medium')
ax[locs[chan][1], locs[chan][0]].tick_params(axis='both', which='both', labelsize=8)
# remove unused plots
coords = [[x, y] for x in range(0, 9) for y in range(0,9)]
unused = [c for c in coords if c not in locs.values()]
for u in unused:
fig.delaxes(ax[u[1], u[0]])
# set figure params
fig.tight_layout(pad=1, rect=[0, 0, 1, 0.95])
fig.text(0.5, 0, 'Time (ms)', ha='center')
fig.text(0, 0.5, 'Amplitude (mV)', va='center', rotation='vertical')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0])
fig.tight_layout()
return fig
### SpSO Methods ###
def plot_spsomap(n):
""" Plot histogram mapping of spso """
fig, ax = plt.subplots(figsize=(14, 3))
ax.fill_between(x = n.so_bool.index, y1=0, y2=n.so_bool.T.sum(), alpha=0.5, color='blue', label='Slow Oscillations')
ax.fill_between(x = n.spin_bool.index, y1=0, y2=n.spin_bool.T.sum(), alpha=0.5, color='green', label='Spindles')
ax.set_xlabel('Time')
ax.set_ylabel('Channel Count')
ax.margins(x=0)
ax.legend()
def plot_spso_chan_subplots(n, chan, so_dtype='sofilt', sp_dtype='spfilt'):
""" Subplot individual slow oscillations with overriding spindle detections (one subplot per SO) """
if sp_dtype == 'spfilt':
spin_data = n.spfiltEEG
elif sp_dtype == 'spsofilt':
spin_data = n.spsofiltEEG
elif sp_dtype == 'sofilt':
spin_data = n.sofiltEEG
height = 2/3 * int(len(n.so_spin_map[chan]))
fig, axs = plt.subplots(nrows=int(len(n.so_spin_map[chan])/3)+1, ncols=3, figsize=(10, height))
fig.subplots_adjust(hspace=0.4)
for ax, (so, spins) in zip(axs.flatten(), n.so_spin_map[chan].items()):
ax.plot(n.so[chan][so].time, n.so[chan][so][so_dtype])
for spin in spins:
ax.plot(spin_data[(chan, 'Filtered')].loc[n.spindle_events[chan][spin]], lw=1)
ax.tick_params(axis='x', labelsize='small', rotation=15., pad=.1)
# delete empty subplots --> this can probably be combined with previous loop
for i, ax in enumerate(axs.flatten()):
if i >= len(n.so_spin_map[chan]):
fig.delaxes(ax)
fig.text(0.5, 0, 'Time (ms)', ha='center')
fig.text(0, 0.5, 'Amplitude (mV)', va='center', rotation='vertical')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0])
def plot_spso_chan(n, chan, so_dtype='sofilt', sp_dtype='spsofilt', spin_tracings=False, plot_dist=True, so_tracings=True):
""" Plot individual slow oscillations with overriding spindle detections
Parameters
----------
chan: str
channel to plot
so_dtype: str (default: 'sofilt')
slow oscillation data to plot [Options: 'sofilt', 'spsofilt']
sp_dtype: str (default: 'spsofilt')
spindle data to plot [Options: 'spfilt', 'spsofilt']
*Note: spfilt is broken ATM
spin_tracings: bool (default: False)
whether to plot spindle tracings
plot_dist: bool (default: True)
whether to plot spindle distribution
so_tracings: bool (default: True)
whether to plot so tracings (if False, will plot SO mean)
"""
fig, ax = plt.subplots(figsize=(10, 10))
so_dict = {}
for so_id, df in n.spso_aggregates[chan].items():
if so_tracings:
# plot slow oscillation
ax.plot(df[so_dtype], color='black', alpha=0.2)
else:
# grab the slow oscillations to calculate mean
so_dict[chan+'_'+str(so_id)] = df[df.index.notna()][so_dtype]
# grab spindle columns
spin_cols = [x for x in df.columns if x.split('_')[0] == 'spin']
for spin in spin_cols:
# get index & cluster of spindle
spin_idx = int(spin_cols[0].split('_')[1])
clust = int(n.spindle_stats_i[(n.spindle_stats_i.chan == chan) & (n.spindle_stats_i.spin == spin_idx)].cluster.values)
if spin_tracings:
# plot spindle
c = plt.get_cmap('RdYlBu', 2)(clust)
hx = matplotlib.colors.rgb2hex(c[:-1])
ax.plot(df[sp_dtype][df[spin].notna()], lw=3, color=hx, alpha=0.5)
# plot SO mean
if so_tracings == False:
so_df = pd.DataFrame(so_dict)
mean = so_df.mean(axis=1)
sd = so_df.std(axis=1)
if len(mean) > 0:
ax.plot(mean, color='black')
ax.fill_between(mean.index, mean-sd, mean+sd, color='black', alpha=0.3)
if plot_dist:
# plot normalized distribution of each cluster for each timepoint
ax1 = ax.twinx()
for clust, dct in n.spin_dist['by_chan'][chan].items():
# set color
c = plt.get_cmap('RdYlBu', 2)(int(clust))
hx = matplotlib.colors.rgb2hex(c[:-1])
# plot normed distribution
ax1.plot(dct['dist_norm'], color=c, label='Cluster ' + clust)
ax1.fill_between(dct['dist_norm'].index, 0, dct['dist_norm'].values, color=c, alpha=0.3)
ax1.set_ylabel('Proportion of spindles present')
ax1.legend()
ax.tick_params(axis='x', rotation=15., pad=.1)
ax.tick_params(axis='y', rotation=0, pad=.1)
ax.set_ylabel('Ampltiude (mV)')
ax.set_xlabel('Time (ms)')
fig.suptitle(n.metadata['file_info']['fname'].split('.')[0])
fig.tight_layout()
return fig
def plot_spso(n, so_dtype='sofilt', sp_dtype='spsofilt', spin_tracings=False, plot_dist=True, so_tracings=True, cmap='winter', ylims=None, legend=False):
""" Plot individual slow oscillations with overriding spindle detections
Parameters
----------
so_dtype: str (default: 'sofilt')
slow oscillation data to plot [Options: 'sofilt', 'spsofilt']
sp_dtype: str (default: 'spsofilt')
spindle data to plot [Options: 'spfilt', 'spsofilt']
*Note: spfilt is broken ATM
spin_tracings: bool (default: False)
whether to plot spindle tracings
plot_dist: bool (default: True)
whether to plot spindle distribution
so_tracings: bool (default: True)
whether to plot SO tracings (if set to False, mean SO will be plotted)
cmap: str (default:'winter')
matplotlib colormap. usually 'winter' or 'RdYlBu'
ylims: tuple or None (default: None)
y limits for SO tracings axis. for use if an outlier is skewing the axis
legend: bool (default: False)
whether to plot the legend
"""
fig, ax = plt.subplots(figsize=(8, 8))
plt.rcParams["font.family"] = "Arial"
so_dict = {}
for chan in n.spso_aggregates.keys():
for so_id, df in n.spso_aggregates[chan].items():
if so_tracings:
# plot slow oscillation
ax.plot(df[so_dtype], color='black', alpha=0.2)
if ylims is not None:
ax.set_ylim(ylims)
else:
# grab the slow oscillations to calculate mean
so_dict[chan+'_'+str(so_id)] = df[df.index.notna()][so_dtype]
# grab spindle columns
spin_cols = [x for x in df.columns if x.split('_')[0] == 'spin']
for spin in spin_cols:
# get index & cluster of spindle
spin_idx = int(spin_cols[0].split('_')[1])
clust = int(n.spindle_stats_i[(n.spindle_stats_i.chan == chan) & (n.spindle_stats_i.spin == spin_idx)].cluster.values)
if spin_tracings:
# plot spindle
c = plt.get_cmap(cmap, 2)(clust)
hx = matplotlib.colors.rgb2hex(c[:-1])
ax.plot(df[sp_dtype][df[spin].notna()], lw=3, color=hx, alpha=0.8)
# plot SO mean
if so_tracings == False:
so_df = | pd.DataFrame(so_dict) | pandas.DataFrame |
import os, glob
import numpy as np
import pandas as pd
import torch
import torch.utils.data as data
from torchvision import datasets, models, transforms
import cv2
import json
from PIL import Image
import random
from sklearn.utils import shuffle
from weighted_sampler import get_weighted_sample
import settings
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90, RandomBrightnessContrast,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, VerticalFlip,
IAASharpen, IAAEmboss, RandomContrast, RandomBrightness, Flip, OneOf, Compose, RandomGamma, ElasticTransform, ChannelShuffle,RGBShift, Rotate
)
class Rotate90(RandomRotate90):
def apply(self, img, factor=3, **params):
return np.ascontiguousarray(np.rot90(img, 1))
#def apply_to_bbox(self, bbox, factor=3, **params):
# return F.bbox_rot90(bbox, 3, **params)
def strong_aug(p=1):
return Compose([
RandomRotate90(),
Flip(),
#Transpose(),
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
], p=0.2),
OneOf([
MotionBlur(p=.2),
MedianBlur(blur_limit=3, p=.1),
Blur(blur_limit=3, p=.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.4),
OneOf([
OpticalDistortion(p=0.3),
GridDistortion(p=.1),
IAAPiecewiseAffine(p=0.3),
], p=0.2),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomContrast(),
RandomBrightness(),
], p=0.3),
HueSaturationValue(p=0.3),
], p=p)
def augment_inclusive(p=.9):
return Compose([
RandomRotate90(),
Flip(),
#Transpose(),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomContrast(),
RandomBrightness(),
], p=0.3),
#
#HorizontalFlip(.5),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=20, p=.75 ),
Blur(blur_limit=3, p=.33),
OpticalDistortion(p=.33),
GridDistortion(p=.33),
#HueSaturationValue(p=.33)
], p=p)
def weak_aug(p=1.):
return Compose([
RandomRotate90(),
Flip(),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=10, p=.75 ),
RandomBrightnessContrast(p=0.33),
#Blur(blur_limit=3, p=.33),
#OpticalDistortion(p=.33),
#GridDistortion(p=1.33),
#HueSaturationValue(p=.33)
], p=p)
def weak_aug_tta(p=1.):
return Compose([
RandomRotate90(p=1.),
Flip(p=1.),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=10, p=1. ),
RandomBrightnessContrast(p=1.),
#Blur(blur_limit=3, p=.33),
#OpticalDistortion(p=.33),
#GridDistortion(p=1.33),
#HueSaturationValue(p=.33)
], p=p)
#def augment(aug, image):
# return aug(image=image)['image']
def augment_4chan(aug, image):
#print(image.shape)
#image[:,:,0:3]=aug(image=image[:,:,0:3])['image']
#image[:,:,3]=aug(image=image[:,:,1:4])['image'][:,:,2]
augmented = aug(image=image[:,:,0:3], mask=image[:,:,3])
image[:,:,0:3] = augmented['image']
image[:,:,3] = augmented['mask']
#image[0:3,:,:]=aug(image=image[0:3,:,:])['image']
#print('>>>', image.shape)
#print(aug(image=image[1:4,:,:])['image'][2,:,:].shape)
#image[3,:,:]=aug(image=image[1:4,:,:])['image'][2,:,:]
return image
def get_tta_aug(tta_index=0):
tta_augs = {
1: HorizontalFlip(always_apply=True),
2: VerticalFlip(always_apply=True),
3: Compose([HorizontalFlip(always_apply=True),VerticalFlip(always_apply=True)]),
4: Rotate90(),
5: Compose([Rotate90(), HorizontalFlip(always_apply=True)]),
6: Compose([VerticalFlip(always_apply=True), Rotate90()]),
7: Compose([HorizontalFlip(always_apply=True),VerticalFlip(always_apply=True), Rotate90()]),
}
return tta_augs[tta_index]
def open_rgby(img_dir, id, suffix): #a function that reads RGBY image
colors = ['red','green','blue','yellow']
#flags = cv2.IMREAD_GRAYSCALE
#img = [cv2.imread(os.path.join(img_dir, id+'_'+color+'.png'), flags).astype(np.float32)/255
# for color in colors]
if suffix == 'png':
#img = [np.array(Image.open(os.path.join(img_dir, id+'_'+color+'.'+suffix)).convert('L')) for color in colors]
img = [cv2.imread(os.path.join(img_dir, id+'_'+color+'.png'))[:,:,0] for color in colors]
img = np.stack(img, axis=-1)
else:
#img = [np.array(Image.open(os.path.join(img_dir, id+'_'+color+'.'+suffix)).convert('L').resize((512,512))) for color in colors]
img = open_hpa_img(img_dir,id)
#img = np.stack(img, axis=-1)
#img = img.transpose((2,0,1))
return img.astype(np.uint8)
def open_hpa_img(img_dir, id):
colors = ['red','green','blue']
hpa_img = np.zeros((512, 512, 4))
for i, c in enumerate(colors):
img = cv2.imread(os.path.join(img_dir, id+'_'+c+'.jpg'))
img = cv2.resize(img, (512,512))
hpa_img[:,:,i] = img[:,:,i]
yellow_img = cv2.imread(os.path.join(img_dir, id+'_yellow.jpg'))
yellow_img = cv2.resize(yellow_img, (512,512))
hpa_img[:,:,3] = yellow_img[:,:,0]
return hpa_img
class ImageDataset(data.Dataset):
def __init__(self, train_mode, img_dir, img_ids, labels=None, suffix=None, tta_index=0, hpa_img_dir=settings.HPA_IMG_DIR):
self.train_mode = train_mode
self.img_dir = img_dir
self.img_ids = img_ids
self.labels = labels
self.suffix = suffix
self.tta_index = tta_index
self.hpa_img_dir = hpa_img_dir
def __getitem__(self, index):
if self.suffix[index] == 'png':
img = open_rgby(self.img_dir, self.img_ids[index], self.suffix[index])
else:
img = open_rgby(self.hpa_img_dir, self.img_ids[index], self.suffix[index])
#Image.fromarray(img[:,:,0:3], mode='RGB').show()
#Image.fromarray(img[:,:,3], mode='L').show()
#cv2.imshow('img', img[:,:,0:3])
#cv2.imshow('img', img[:,:,3])
#cv2.waitKey(0)
if self.train_mode:
aug = augment_inclusive()
#aug = weak_aug()
img = augment_4chan(aug, img)
elif self.tta_index != 0:
#print(self.tta_index)
if self.tta_index <= 7:
aug = get_tta_aug(self.tta_index)
else:
aug = weak_aug_tta()
#print(aug)
img = augment_4chan(aug, img)
else:
pass
#print(img.shape)
#Image.fromarray(img[:,:,0:3], mode='RGB').show()
#Image.fromarray(img[:,:,3], mode='L').show()
img = img.transpose((2,0,1))
img = (img /255).astype(np.float32)
#normalize
mean = [0.0804, 0.0526, 0.0548, 0.0827]
std = [0.1496, 0.1122, 0.1560, 0.1497]
img[0, :,:,] = (img[0, :,:,] - mean[0]) / std[0]
img[1, :,:,] = (img[1, :,:,] - mean[1]) / std[1]
img[2, :,:,] = (img[2, :,:,] - mean[2]) / std[2]
img[3, :,:,] = (img[3, :,:,] - mean[3]) / std[3]
if self.labels is None:
return img
else:
return img, self.get_label_tensor(self.labels[index])
def get_label_tensor(self, label):
classes = set([int(x) for x in label.strip().split()])
labels = torch.FloatTensor([ 1 if i in classes else 0 for i in range(28)])
return labels
def __len__(self):
return len(self.img_ids)
def get_train_val_loader(batch_size=4, val_batch_size=4, dev_mode=False, val_num=3500, balanced=False, hpa=0):
df = pd.read_csv(settings.TRAIN_LABEL)
df = shuffle(df, random_state=6)
df['suffix'] = 'png'
split_index = int(df.shape[0] * 0.9)
df_train = df.iloc[:split_index]
df_val = df.iloc[split_index:]
df_val = df_val.iloc[:val_num]
print(df_val.shape)
if hpa > 0:
df_hpa = get_hpa_train_df(hpa)
df_train = pd.concat([df_train, df_hpa])
df_train = shuffle(df_train)
#print(df_train.head())
img_dir = settings.TRAIN_IMG_DIR
img_ids_train = df_train['Id'].values.tolist()
labels_train = df_train['Target'].values.tolist()
suffix = df_train['suffix'].values.tolist()
if balanced:
img_ids_train = get_weighted_sample(df_train, 20000)
labels_train = df_train.set_index('Id').loc[img_ids_train].Target.values.tolist()
if dev_mode:
img_ids_train = img_ids_train[4:5]
labels_train = labels_train[4:5]
suffix = suffix[4:5]
dset_train = ImageDataset(True, img_dir, img_ids_train, labels_train, suffix, hpa_img_dir=settings.HPA_IMG_DIR)
dloader_train = data.DataLoader(dset_train, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True)
dloader_train.num = len(dset_train)
img_ids_val = df_val['Id'].values.tolist()
labels_val = df_val['Target'].values.tolist()
suffix_val = df_val['suffix'].values.tolist()
if dev_mode:
img_ids_val = img_ids_val[3:4]
labels_val = labels_val[3:4]
suffix_val = suffix_val[3:4]
dset_val = ImageDataset(False, img_dir, img_ids_val, labels_val, suffix_val)
dloader_val = data.DataLoader(dset_val, batch_size=val_batch_size, shuffle=False, num_workers=4, drop_last=False)
dloader_val.num = len(dset_val)
return dloader_train, dloader_val
def get_hpa_train_df(train_num):
df = pd.read_csv('HPAv18RGBY_WithoutUncertain_wodpl.csv')
df = shuffle(df, random_state=1234)
df['suffix'] = 'jpg'
split_index = int(df.shape[0] * 0.9)
df_train = df.iloc[:split_index]
df_train = shuffle(df_train)
return df_train.iloc[:train_num]
def get_hpa_loader(batch_size=4, dev_mode=False):
df_train = | pd.read_csv('HPAv18RGBY_WithoutUncertain_wodpl.csv') | pandas.read_csv |
import unittest
import os
import pandas as pd
import logging
from sklearn.exceptions import NotFittedError
from automatminer.utils.package_tools import compare_columns, check_fitted, set_fitted
from automatminer.utils.ml_tools import is_greater_better, regression_or_classification
from automatminer.utils.log_tools import initialize_logger, initialize_null_logger
from automatminer.base import DataframeTransformer, logger_base_name
run_dir = os.getcwd()
class MyTransformer(DataframeTransformer):
def __init__(self):
self.is_fit = False
@set_fitted
def fit(self, df, target):
return df
@check_fitted
def transform(self, df, target):
return df
class TestUtils(unittest.TestCase):
def test_logger_initialization(self):
log = initialize_logger(logger_base_name, level=logging.DEBUG)
log.info("Test logging.")
log.debug("Test debug.")
log.warning("Test warning.")
# test the log is written to run dir (e.g. where the script was called
# from and not the location of this test file
log_file = os.path.join(run_dir, logger_base_name + ".log")
self.assertTrue(os.path.isfile(log_file))
with open(log_file, 'r') as f:
lines = f.readlines()
self.assertTrue("logging" in lines[0])
self.assertTrue("debug" in lines[1])
self.assertTrue("warning" in lines[2])
null = initialize_null_logger("matbench_null")
null.info("Test null log 1.")
null.debug("Test null log 2.")
null.warning("Test null log 3.")
null_log_file = os.path.join(run_dir, logger_base_name + "_null.log")
self.assertFalse(os.path.isfile(null_log_file))
def test_is_greater_better(self):
self.assertTrue(is_greater_better('accuracy'))
self.assertTrue(is_greater_better('r2_score'))
self.assertTrue(is_greater_better('neg_mean_squared_error'))
self.assertFalse(is_greater_better('mean_squared_error'))
def test_compare_columns(self):
df1 = pd.DataFrame({"a": [1, 2], "b": [2, 3]})
df2 = | pd.DataFrame({"b": [3, 4], "c": [4, 5]}) | pandas.DataFrame |
import unittest
import pandas as pd
from tagr.tagging.artifacts import Artifact
DATA = [{"a": 1, "b": 2, "c": 3}, {"a": 10, "b": 20, "c": 30}]
DF = | pd.DataFrame(DATA) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with | tm.assert_produces_warning(PerformanceWarning) | pandas.util.testing.assert_produces_warning |
# -*- coding: utf-8 -*-
import json
import pandas
import re
import sys
import tests.sample_pydre.core
import tests.sample_pydre.rois
import tests.sample_pydre.metrics
import tests.sample_pydre.filters
import pathlib
from tqdm import tqdm
import logging
logger = logging.getLogger('PydreLogger')
class Project():
def __init__(self, projectfilename):
self.project_filename = projectfilename
# This will suppress the unnecessary SettingWithCopy Warning.
pandas.options.mode.chained_assignment = None
self.definition = None
with open(self.project_filename) as project_file:
try:
self.definition = json.load(project_file)
except json.decoder.JSONDecodeError as e:
# The exact location of the error according to the exception, is a little wonky. Amongst all the text
# editors used, the line number was consistently 1 more than the actual location of the syntax error.
# Hence, the "e.lineno -1" in the logger error below.
logger.error("In " + projectfilename + ": " + str(e.msg) + ". Invalid JSON syntax found at Line: "
+ str(e.lineno - 1) + ".")
# exited as a general error because it is seemingly best suited for the problem encountered
sys.exit(1)
self.data = []
def __loadSingleFile(self, filename):
"""Load a single .dat file (whitespace delmited csv) into a DriveData object"""
# Could cache this re, probably affect performance
d = pandas.read_csv(filename, sep='\s+', na_values='.')
datafile_re = re.compile("([^_]+)_Sub_(\d+)_Drive_(\d+)(?:.*).dat")
match = datafile_re.search(filename)
if match:
experiment_name, subject_id, drive_id = match.groups()
else:
logger.warning(
"Drivedata filename does not match expected format: ExperimentName_Subject_0_Drive_0.dat")
experiment_name = pathlib.Path(filename).stem
subject_id = 1
drive_id = 1
return tests.sample_pydre.core.DriveData(PartID=int(subject_id), DriveID=int(drive_id),
roi=None, data=d, sourcefilename=filename)
def processROI(self, roi, dataset):
"""
Handles running region of interest definitions for a dataset
Args:
roi: A dict containing the type of a roi and the filename of the data used to process it
dataset: a list of pandas dataframes containing the source data to partition
Returns:
A list of pandas DataFrames containing the data for each region of interest
"""
roi_type = roi['type']
if roi_type == "time":
logger.info("Processing ROI file " + roi['filename'])
roi_obj = tests.sample_pydre.rois.TimeROI(roi['filename'])
return roi_obj.split(dataset)
elif roi_type == "rect":
logger.info("Processing ROI file " + roi['filename'])
roi_obj = tests.sample_pydre.rois.SpaceROI(roi['filename'])
return roi_obj.split(dataset)
elif roi_type == "column":
logger.info("Processing ROI column " + roi['columnname'])
roi_obj = tests.sample_pydre.rois.ColumnROI(roi['columnname'])
return roi_obj.split(dataset)
else:
return []
def processFilter(self, filter, dataset):
"""
Handles running any filter definition
Args:
filter: A dict containing the type of a filter and the parameters to process it
Returns:
A list of values with the results
"""
try:
func_name = filter.pop('function')
filter_func = tests.sample_pydre.filters.filtersList[func_name]
report_name = filter.pop('name')
col_names = tests.sample_pydre.filters.filtersColNames[func_name]
except KeyError as e:
logger.warning(
"Filter definitions require both \"name\" and \"function\". Malformed filters definition: missing " + str(
e))
sys.exit(1)
if len(col_names) > 1:
x = [filter_func(d, **filter)
for d in tqdm(dataset, desc=func_name)]
report = pandas.DataFrame(x, columns=col_names)
else:
report = pandas.DataFrame([filter_func(
d, **filter) for d in tqdm(dataset, desc=func_name)], columns=[report_name, ])
return report
def processMetric(self, metric, dataset):
"""
Handles running any metric definition
Args:
metric: A dict containing the type of a metric and the parameters to process it
Returns:
A list of values with the results
"""
try:
func_name = metric.pop('function')
metric_func = tests.sample_pydre.metrics.metricsList[func_name]
report_name = metric.pop('name')
col_names = tests.sample_pydre.metrics.metricsColNames[func_name]
except KeyError as e:
logger.warning(
"Metric definitions require both \"name\" and \"function\". Malformed metrics definition: missing " + str(
e))
sys.exit(1)
if len(col_names) > 1:
x = [metric_func(d, **metric) for d in dataset]
report = | pandas.DataFrame(x, columns=col_names) | pandas.DataFrame |
import unittest
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
import numpy as np
from ITMO_FS.embedded import *
np.random.seed(42)
class TestCases(unittest.TestCase):
data, target = np.random.randint(10, size=(100, 20)), np.random.randint(10, size=(100,))
feature_names = [''.join(['f', str(i)]) for i in range(data.shape[1])]
feature_names_override = [''.join(['g', str(i)]) for i in range(data.shape[1])]
def test_MOSS(self):
# MOSS
res = MOS().fit_transform(self.data, self.target, sampling=True)
assert self.data.shape[0] == res.shape[0]
print("MOSS:", self.data.shape, '--->', res.shape)
def test_MOSNS(self):
# MOSNS
res = MOS().fit_transform(self.data, self.target, sampling=False)
assert self.data.shape[0] == res.shape[0]
print("MOSNS:", self.data.shape, '--->', res.shape)
def test_losses(self):
for loss in ['log', 'hinge']:
res = MOS(loss=loss).fit_transform(self.data, self.target)
assert self.data.shape[0] == res.shape[0]
def test_df(self):
f = MOS()
df = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), sampling=True)
arr = f.fit_transform(self.data, self.target, sampling=True)
np.testing.assert_array_equal(df, arr)
df = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), sampling=False)
arr = f.fit_transform(self.data, self.target, sampling=False)
np.testing.assert_array_equal(df, arr)
def test_pipeline(self):
# FS
p = Pipeline([('FS1', MOS())])
p.fit(self.data, self.target)
res = p.transform(self.data)
assert self.data.shape[0] == res.shape[0]
# FS - estim
p = Pipeline([('FS1', MOS()), ('E1', LogisticRegression())])
p.fit(self.data, self.target)
assert 0 <= p.score(self.data, self.target) <= 1
# FS - FS
p = Pipeline([('FS1', MOS(loss='log')), ('FS2', MOS(loss='hinge'))])
p.fit(self.data, self.target)
res = p.transform(self.data)
assert self.data.shape[0] == res.shape[0]
# FS - FS - estim
p = Pipeline([('FS1', MOS(loss='log')), ('FS2', MOS(loss='hinge')), ('E1', LogisticRegression())])
p.fit(self.data, self.target)
assert 0 <= p.score(self.data, self.target) <= 1
def test_feature_names_np(self):
f = MOS()
arr = f.fit_transform(self.data, self.target, feature_names=self.feature_names, sampling=True)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
arr = f.fit_transform(self.data, self.target, feature_names=self.feature_names, sampling=False)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
def test_feature_names_df(self):
f = MOS()
arr = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), feature_names=self.feature_names, sampling=True)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
arr = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), feature_names=self.feature_names, sampling=False)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
def test_feature_names_df_defined(self):
dfX = | pd.DataFrame(self.data) | pandas.DataFrame |
import pandas as pd
from firebase import firebase as frb
import json
import os
from dotenv import load_dotenv
from datetime import datetime, date
from time import gmtime, time, strftime, sleep
from pytz import timezone
import schedule
from tzlocal import get_localzone
load_dotenv()
columns = ['Time', 'Price', 'Net Change', 'Sell', 'Buy', 'Trading Volume']
kospi_columns = ['Time', 'Price', 'Net Change', 'Trading Volume', 'Dollar Volume']
exg_columns = ['Inquiry', 'Standard Rate', 'Net Change', 'Cash Buy', 'Cash Sell']
def crawl_intraday_data(code, time):
intra_df = pd.DataFrame()
for i in range(1, 41):
page_df = pd.read_html(os.getenv("INTRADAY_DATA_SOURCE_ADDRESS").format(code=code, time=time, page=i))[0]
intra_df = intra_df.append(page_df)
intra_df.dropna(inplace=True)
intra_df.drop(intra_df.columns[6], axis=1, inplace=True)
intra_df.reset_index(inplace=True, drop=True)
intra_df.columns = columns
yesterday_df = pd.read_html(os.getenv("DAILY_DATA_SOURCE_ADDRESS").format(code=code))[0]
yesterday_df.dropna(inplace=True)
price_yesterday = yesterday_df[yesterday_df.columns[1]].iloc[1]
intra_df['Net Change'] = intra_df['Price'] - price_yesterday
return intra_df
def save_intraday_data(code, date, df):
firebase = frb.FirebaseApplication(os.getenv("FIREBASE_ADDRESS"), None)
df.apply(lambda r: firebase.post('/stock/{code}/{date}'.format(code=code, date=date), json.loads(r.to_json())), axis=1)
def retrieve_intraday_data(code, date):
firebase = frb.FirebaseApplication(os.getenv("FIREBASE_ADDRESS"), None)
data = firebase.get('/stock/{code}/{date}'.format(code=code, date=date), None)
result = pd.DataFrame.from_dict(data, orient='index')
result = result[columns]
result.reset_index(inplace=True, drop=True)
return result
def crawl_intraday_kospi_data(time):
kospi_df = | pd.DataFrame() | pandas.DataFrame |
from clean2 import*
import pandas as pd
import matplotlib.pyplot as plt
import math
import datetime
import time
def main():
loop_set=[3,5]
set3=[] #labels scaled at different window sizes
set4=[] #labels without scaling
for i in range(0,len(loop_set)):
set3.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])+'0'])
set3.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])+'1'])
set3.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])+'2'])
set4.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])])
data_window=pd.DataFrame()
data_window_labels=pd.DataFrame()
final_data=pd.DataFrame()
predictors_1=pd.DataFrame()
predictors=pd.DataFrame()
predictors_final=pd.DataFrame()
data_copy_labels=pd.DataFrame()
data_predict=pd.DataFrame()
close_win=pd.DataFrame()
data=pd.DataFrame()
data_copy=pd.DataFrame()
labe_train=pd.DataFrame()
labe_test=pd.DataFrame()
data_la=pd.DataFrame()
data_confr=pd.DataFrame()
final_data.loc[0,'predicted_close']=0
final_data.loc[0,'predicted_close_high']=0
final_data.loc[0,'predicted_close_low']=0
now=datetime.datetime.now()
day=now.strftime('%d')
hour=now.strftime('%H')
now=now.strftime('%M')
size0=1999 #a too small size0 can lead to insufficient data to be elaborated
now1=int(day)*1440+int(hour)*60+int(now)+size0
now=int(day)*1440+int(hour)*60+int(now)
set_windows=[7,8,8.5]
starters=[]
size_=size0-15*loop_set[len(loop_set)-1]
for i in set_windows:
starters.extend([size_-int(i*size_/9)])
delay_max_window=20
count=0
count1=0
lab_tra=0
x=[]
y=[]
yy=[]
ya=[]
yb=[]
yc=[]
x.extend([count1])
plt.ion()
fig=plt.figure()
ax1=fig.add_subplot(1,1,1)
from sklearn.externals import joblib
gbrt_fin=joblib.load('gbrt_final_close')
from sklearn.preprocessing import StandardScaler
scaler=StandardScaler()
while now1-now>0:
size=now1-int(now)
now=datetime.datetime.now()
d=now.strftime('%d')
h=now.strftime('%H')
now=now.strftime('%M')
now=int(d)*1440+int(h)*60+int(now)
data_cycle=data_creator('BTC','EUR',size,1)
data=data.shift(-(size))
data.drop(data.tail(size+1).index,inplace=True)
frame_cycle=[data,data_cycle]
data=pd.concat(frame_cycle)
data=data.reset_index()
data=data.drop(['index'],axis=1)
data_feat=pd.DataFrame()
data_feat=data.copy()
data_feat=data_feat.iloc[len(data_feat)-size0-1:,:]
data_feat=data_feat.reset_index()
data_feat=data_feat.drop(['index'],axis=1)
last_data=size+1
seconds=datetime.datetime.now()
seconds=seconds.strftime('%S')
for i in range(0,len(loop_set)):
short_window=loop_set[i]
window1=2*loop_set[i]
window2=4*loop_set[i]
window3=6*loop_set[i]
window4=10*loop_set[i]
window5=15*loop_set[i]
local_variance_window=int(round(1+short_window/2))
slope_window=int(round(1+(short_window/2)))
#Labels
if last_data == len(data_feat):
movavfun=total_movavg_predict(data_feat,'close',short_window,window1,window2,window3,window4,window5)
avg_close1=total_movavg(data_feat,'close',short_window,window1,window2,window3,window4,window5)
#Features
short_window=int(round(1+short_window/2))
window1=int(round(1+window1/2))
window2=int(round(1+window2/2))
window3=int(round(1+window3/2))
window4=int(round(1+window4/2))
window5=int(round(1+window5/2))
local_variance_window=int(round(1+local_variance_window/2))
slope_window=int(round(1+slope_window/2))
avg_close=total_movavg(data_feat,'close',short_window,window1,window2,window3,window4,window5)
avg_close_root=movavg(data_feat,'close',short_window)
local_variance_close=local_msq(data_feat,'close',avg_close,local_variance_window)
msroot_close=msroot(data_feat,'close',avg_close_root,short_window)
entropy_close=entropy(data_feat,'close',msroot_close,short_window,size)
local_entropy_close=entropy(data_feat,'close',local_variance_close,short_window,size)
avg_entropy_close=movavg(data_feat,entropy_close[1],short_window)
slope_close=slope(data_feat,'close',slope_window)
avg_slope=total_movavg(data_feat,slope_close,short_window,window1,window2,window3,window4,window5)
avg_slope_root=movavg(data_feat,slope_close,short_window)
local_variance_slope=local_msq(data_feat,slope_close,avg_slope,local_variance_window)
msroot_slope=msroot(data_feat,slope_close,avg_slope_root,short_window)
entropy_slope=entropy(data_feat,slope_close,msroot_slope,short_window,size)
local_entropy_slope=entropy(data_feat,slope_close,local_variance_slope,short_window,size)
avg_entropy_slope=movavg(data_feat,entropy_slope[1],short_window)
data_feat['high_close'+str(loop_set[i])]=data_feat[avg_close]+data_feat[local_variance_close]
avg_high=total_movavg(data_feat,'high_close'+str(loop_set[i]),short_window,window1,window2,window3,window4,window5)
avg_high_root=movavg(data_feat,'high_close'+str(loop_set[i]),short_window)
local_variance_high=local_msq(data_feat,'high_close'+str(loop_set[i]),avg_high,local_variance_window)
msroot_high=msroot(data_feat,'high_close'+str(loop_set[i]),avg_high_root,short_window)
entropy_high=entropy(data_feat,'high_close'+str(loop_set[i]),msroot_high,short_window,size)
local_entropy_high=entropy(data_feat,'high_close'+str(loop_set[i]),local_variance_high,short_window,size)
avg_entropy_high=movavg(data_feat,entropy_high[1],short_window)
data_feat['low_close'+str(loop_set[i])]=data_feat[avg_close]-data_feat[local_variance_close]
avg_low=total_movavg(data_feat,'low_close'+str(loop_set[i]),short_window,window1,window2,window3,window4,window5)
avg_low_root=movavg(data_feat,'low_close'+str(loop_set[i]),short_window)
local_variance_low=local_msq(data_feat,'low_close'+str(loop_set[i]),avg_high,local_variance_window)
msroot_low=msroot(data_feat,'low_close'+str(loop_set[i]),avg_low_root,short_window)
entropy_low=entropy(data_feat,'low_close'+str(loop_set[i]),msroot_low,short_window,size)
local_entropy_low=entropy(data_feat,'low_close'+str(loop_set[i]),local_variance_low,short_window,size)
avg_entropy_low=movavg(data_feat,entropy_low[1],short_window)
else:
#Labels
movavfun=total_movavg_predict_cycle(data_feat,'close',last_data,short_window,window1,window2,window3,window4,window5)
avg_close1=total_movavg_cycle(data_feat,'close',last_data,short_window,window1,window2,window3,window4,window5)
#Features
short_window=int(round(1+short_window/2))
window1=int(round(1+window1/2))
window2=int(round(1+window2/2))
window3=int(round(1+window3/2))
window4=int(round(1+window4/2))
window5=int(round(1+window5/2))
local_variance_window=int(round(1+local_variance_window/2))
slope_window=int(round(1+slope_window/2))
avg_close=total_movavg_cycle(data_feat,'close',last_data,short_window,window1,window2,window3,window4,window5)
avg_close_root=movavg_cycle(data_feat,'close',short_window,last_data)
local_variance_close=local_msq_cycle(data_feat,'close',avg_close,local_variance_window,last_data)
msroot_close=msroot(data_feat,'close',avg_close_root,short_window)
entropy_close=entropy(data_feat,'close',msroot_close,short_window,size)
local_entropy_close=entropy(data_feat,'close',local_variance_close,short_window,size)
avg_entropy_close=movavg_cycle(data_feat,entropy_close[1],short_window,last_data)
slope_close=slope_cycle(data_feat,'close',slope_window,last_data)
avg_slope=total_movavg_cycle(data_feat,slope_close,last_data,short_window,window1,window2,window3,window4,window5)
avg_slope_root=movavg_cycle(data_feat,slope_close,short_window,last_data)
local_variance_slope=local_msq_cycle(data_feat,slope_close,avg_slope,local_variance_window,last_data)
msroot_slope=msroot(data_feat,slope_close,avg_slope_root,short_window)
entropy_slope=entropy(data_feat,slope_close,msroot_slope,short_window,size)
local_entropy_slope=entropy(data_feat,slope_close,local_variance_slope,short_window,size)
avg_entropy_slope=movavg_cycle(data_feat,entropy_slope[1],short_window,last_data)
data_feat['high_close'+str(loop_set[i])]=data_feat[avg_close]+data_feat[local_variance_close]
avg_high=total_movavg_cycle(data_feat,'high_close'+str(loop_set[i]),last_data,short_window,window1,window2,window3,window4,window5)
avg_high_root=movavg_cycle(data_feat,'high_close'+str(loop_set[i]),short_window,last_data)
local_variance_high=local_msq_cycle(data_feat,'high_close'+str(loop_set[i]),avg_high,local_variance_window,last_data)
msroot_high=msroot(data_feat,'high_close'+str(loop_set[i]),avg_high_root,short_window)
entropy_high=entropy(data_feat,'high_close'+str(loop_set[i]),msroot_high,short_window,size)
local_entropy_high=entropy(data_feat,'high_close'+str(loop_set[i]),local_variance_high,short_window,size)
avg_entropy_high=movavg_cycle(data_feat,entropy_high[1],short_window,last_data)
data_feat['low_close'+str(loop_set[i])]=data_feat[avg_close]-data_feat[local_variance_close]
avg_low=total_movavg_cycle(data_feat,'low_close'+str(loop_set[i]),last_data,short_window,window1,window2,window3,window4,window5)
avg_low_root=movavg_cycle(data_feat,'low_close'+str(loop_set[i]),short_window,last_data)
local_variance_low=local_msq_cycle(data_feat,'low_close'+str(loop_set[i]),avg_high,local_variance_window,last_data)
msroot_low=msroot(data_feat,'low_close'+str(loop_set[i]),avg_low_root,short_window)
entropy_low=entropy(data_feat,'low_close'+str(loop_set[i]),msroot_low,short_window,size)
local_entropy_low=entropy(data_feat,'low_close'+str(loop_set[i]),local_variance_low,short_window,size)
avg_entropy_low=movavg_cycle(data_feat,entropy_low[1],short_window,last_data)
if last_data == len(data_feat):
data_labels=pd.DataFrame()
labels(data_labels,data_feat,'close',loop_set)
lista=list(data_labels.columns.values)
quantity=int(round((loop_set[len(loop_set)-1]/2)+1))
if last_data != len(data_feat):
data_final=data_feat.iloc[len(data_feat)-(size+quantity+1):,:]
data.drop(data.tail(quantity+size+1).index,inplace=True)
else:
data_final=data_feat.iloc[len(data_feat)-(size+1):,:]
data.drop(data.tail(size+1).index,inplace=True)
frame0=[data,data_final]
data=pd.concat(frame0)
now1=datetime.datetime.now()
d1=now1.strftime('%d')
h1=now1.strftime('%H')
m1=now1.strftime('%M')
seconds1=now1.strftime('%S')
now1=int(d1)*1440+int(h1)*60+int(m1)
size1=now1-int(now)
difsec=int(seconds1)+60*size1-int(seconds)
if size1==1 and 60-int(seconds1)<int(difsec/size):
time.sleep(60-int(seconds1)+1)
now1=datetime.datetime.now()
d1=now1.strftime('%d')
h1=now1.strftime('%H')
m1=now1.strftime('%M')
now1=int(d1)*1440+int(h1)*60+int(m1)
print(now1)
print('i waited a little')
print(int(difsec/size))
data_work=data.copy()
data_copy_labels=pd.DataFrame()
labels(data_copy_labels,data_work,'close',loop_set)
clean_labels(data_work,'close',loop_set)
lista=list(data_labels.columns.values)
data_work=data_work.dropna()
data_work=data_work.reset_index()
data_work=data_work.drop(['index'],axis=1)
len1=starters[0]+21+150
data_work=data_work.iloc[len(data_work)-starters[0]-21-150:,:]
data_copy_labels=data_copy_labels.iloc[len(data_copy_labels)-starters[0]-21-150:,:]
data_work=data_work.reset_index()
data_work=data_work.drop(['index'],axis=1)
data_copy_labels=data_copy_labels.reset_index()
data_copy_labels=data_copy_labels.drop(['index'],axis=1)
len2=len(data_work)
if len1 != len2:
print('Warning, data_work length is varying!')
data_confr['totalmovavgclose'+str(loop_set[0])+'_'+str(15*loop_set[0])]=data_work['totalmovavgclose'+str(loop_set[0])+'_'+str(15*loop_set[0])]
data_work=data_work.drop(['totalmovavgclose'+str(loop_set[0])+'_'+str(15*loop_set[0])],axis=1)
data_work=data_work.drop(['totalmovavgclose'+str(loop_set[1])+'_'+str(15*loop_set[1])],axis=1)
data_work=data_work.drop(['date'],axis=1)
data_work=data_work.drop(['time'],axis=1)
data_iterator=pd.DataFrame()
for q in starters:
for h in range(0,len(lista)):
name=lista[h]
data_iterator.loc[0,'variance_pred'+str(starters.index(q))+name]=0
data_iterator.loc[0,'variance_gbrt'+str(starters.index(q))+name]=0
data_iterator.loc[0,'variance_ada'+str(starters.index(q))+name]=0
data_iterator.loc[0,'counter'+str(starters.index(q))+name]=0
if close_win.empty:
true_features = pd.read_csv('folder address'+'true_features.txt')
true_features=true_features.drop(['Unnamed: 0'],axis=1)
true_f=[]
for l in true_features.columns.values:
true_f.extend([l])
set1=[]
for k in data_work.columns.values:
set1.extend([k])
set2=set(set1) - set(true_f)
set2=list(set2)
for j in starters:
start=j
start0=starters[0]
for i in range(start0,len(data_work)+1):
data_copy=data_work['close'].iloc[i-start:i]
data_copy=data_copy.values.reshape(-1,1)
data_copy=pd.DataFrame(scaler.fit_transform(data_copy))
close_win.loc[i-start0,'close_'+str(start)]=data_copy[0][len(data_copy)-1]
del data_copy
data_copy=pd.DataFrame()
else:
close_win.drop(close_win.tail(1).index,inplace=True)
close_win=close_win.shift(-(size))
close_win=close_win.dropna()
for j in starters:
start=j
start0=starters[0]
for i in range(len(data_work)-(size),len(data_work)+1):
data_copy=data_work['close'].iloc[i-start:i]
data_copy=data_copy.values.reshape(-1,1)
data_copy=pd.DataFrame(scaler.fit_transform(data_copy))
close_win.loc[i-start0,'close_'+str(start)]=data_copy[0][len(data_copy)-1]
del data_copy
data_copy=pd.DataFrame()
if size1>=0 and count >0:
data_work1=data_work
for o in set2:
data_work1=data_work1.drop([str(o)],axis=1)
predictors_final=predictors_final.shift(-size)
predictors_final=predictors_final.dropna()
predictors_final=predictors_final.reset_index()
predictors_final=predictors_final.drop(['index'],axis=1)
for i in range(len(data_work1)-(size+quantity-1),len(data_work1)+1):
del predictors_1
predictors_1=pd.DataFrame()
for j in starters:
start=j
start0=starters[0]
data_copy=data_work1.copy()
data_copy1=data_copy_labels.copy()
data_copy=data_work1.iloc[i-start:i,:]
data_copy1=data_copy_labels.iloc[i-start:i,:]
data_copy1=data_copy1.reset_index()
data_copy1=data_copy1.drop(['index'],axis=1)
for b in data_copy1.columns.values:
if data_copy1[b].isnull().values.any():
data_la.loc[0,b]=data_copy1[b][len(data_copy1)-1]
else:
data_copy_=data_copy1[b]
data_copy_=data_copy_.values.reshape(-1,1)
data_copy_=pd.DataFrame(scaler.fit_transform(data_copy_))
data_copy_=data_copy_.iloc[len(data_copy_)-1:len(data_copy_)]
data_copy_=data_copy_.rename(index=str,columns={data_copy_.columns.values[0]:b})
data_copy_=data_copy_.reset_index()
data_la.loc[0,b]=data_copy_[b][0]
data_copy=pd.DataFrame(scaler.fit_transform(data_copy),columns=data_copy.columns)
data_copy=data_copy.iloc[start-1:start,:]
predictors=blender1(data_copy,data_la,lista,str(starters.index(start)))
predictors_1= | pd.concat([predictors_1,predictors],axis=1) | pandas.concat |
# Written by: <NAME>, @dataoutsider
# Viz: "Takeoff", enjoy!
import pandas as pd
import os
df_pietree = pd.read_csv(os.path.dirname(__file__) + '/pie_tree.csv')
df_lines = pd.read_csv(os.path.dirname(__file__) + '/lines.csv')
print(df_pietree)
print(df_lines)
df_curves = []
df_pietree['chart'] = 'pie_tree'
df_curves.append(df_pietree)
df_lines['chart'] = 'lines'
df_curves.append(df_lines)
df_combined = | pd.concat(df_curves, axis=0) | pandas.concat |
""" Model for output of general/metadata data, useful for a batch """
from typing import List
import pandas as pd
from pydantic import BaseModel, Field
from nowcasting_dataset.consts import SPATIAL_AND_TEMPORAL_LOCATIONS_OF_EACH_EXAMPLE_FILENAME
from nowcasting_dataset.filesystem.utils import check_path_exists
from nowcasting_dataset.utils import get_start_and_end_example_index
class Metadata(BaseModel):
"""Class to store metadata data"""
batch_size: int = Field(
...,
g=0,
description="The size of this batch. If the batch size is 0, "
"then this item stores one data item",
)
t0_datetime_utc: List[pd.Timestamp] = Field(
...,
description="The t0s of each example ",
)
x_center_osgb: List[int] = Field(
...,
description="The x centers of each example in OSGB coordinates",
)
y_center_osgb: List[int] = Field(
...,
description="The y centers of each example in OSGB coordinates",
)
def save_to_csv(self, path):
"""
Save metadata to a csv file
Args:
path: the path where the file shold be save
"""
filename = f"{path}/{SPATIAL_AND_TEMPORAL_LOCATIONS_OF_EACH_EXAMPLE_FILENAME}"
metadata_dict = self.dict()
metadata_dict.pop("batch_size")
# if file exists, add to it
try:
check_path_exists(filename)
except FileNotFoundError:
metadata_df = | pd.DataFrame(metadata_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_array_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
assert arr.equals(pa.array(expected))
result = arr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(arr.to_pandas(), expected)
result = arr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_chunked_array_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
carr = pa.chunked_array([data])
result = carr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(carr.to_pandas(), expected)
result = carr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_column_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
column = pa.column('date', arr)
result = column.to_pandas()
npt.assert_array_equal(column.to_pandas(), expected)
result = column.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_table_convert_date_as_object(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
df_datetime = table.to_pandas()
df_object = table.to_pandas(date_as_object=True)
tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,
check_dtype=True)
tm.assert_frame_equal(df, df_object, check_dtype=True)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
# ----------------------------------------------------------------------
# Conversion tests for string and binary types.
class TestConvertStringLikeTypes(object):
def test_pandas_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match="was not a utf8 string"):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match=r'Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(
pa.lib.ArrowInvalid,
match=r'Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
def test_strided_objects(self, tmpdir):
# see ARROW-3053
data = {
'a': {0: 'a'},
'b': {0: decimal.Decimal('0.0')}
}
# This yields strided objects
df = pd.DataFrame.from_dict(data)
_check_pandas_roundtrip(df)
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_nested_list_first_empty(self):
# ARROW-2711
data = pd.Series([[], [u"a"]])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.string())
def test_nested_smaller_ints(self):
# ARROW-1345, ARROW-2008, there were some type inference bugs happening
# before
data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])
result = pa.array(data)
result2 = pa.array(data.values)
expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))
assert result.equals(expected)
assert result2.equals(expected)
data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])
result3 = pa.array(data3)
expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))
assert result3.equals(expected3)
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_to_pandas(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
tm.assert_series_equal(series, expected)
def test_from_numpy(self):
dt = np.dtype([('x', np.int32),
(('y_title', 'y'), np.bool_)])
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(42, True), (43, False)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True},
{'x': 43, 'y': False}]
# With mask
arr = pa.array(data, mask=np.bool_([False, True]), type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True}, None]
# Trivial struct type
dt = np.dtype([])
ty = pa.struct([])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(), ()], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{}, {}]
def test_from_numpy_nested(self):
dt = np.dtype([('x', np.dtype([('xx', np.int8),
('yy', np.bool_)])),
('y', np.int16)])
ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),
pa.field('yy', pa.bool_())])),
pa.field('y', pa.int16())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2},
{'x': {'xx': 3, 'yy': False}, 'y': 4}]
@pytest.mark.large_memory
def test_from_numpy_large(self):
# Exercise rechunking + nulls
target_size = 3 * 1024**3 # 4GB
dt = np.dtype([('x', np.float64), ('y', 'object')])
bs = 65536 - dt.itemsize
block = b'.' * bs
n = target_size // (bs + dt.itemsize)
data = np.zeros(n, dtype=dt)
data['x'] = np.random.random_sample(n)
data['y'] = block
# Add implicit nulls
data['x'][data['x'] < 0.2] = np.nan
ty = pa.struct([pa.field('x', pa.float64()),
pa.field('y', pa.binary(bs))])
arr = pa.array(data, type=ty, from_pandas=True)
assert arr.num_chunks == 2
def iter_chunked_array(arr):
for chunk in arr.iterchunks():
for item in chunk:
yield item
def check(arr, data, mask=None):
assert len(arr) == len(data)
xs = data['x']
ys = data['y']
for i, obj in enumerate(iter_chunked_array(arr)):
try:
d = obj.as_py()
if mask is not None and mask[i]:
assert d is None
else:
x = xs[i]
if np.isnan(x):
assert d['x'] is None
else:
assert d['x'] == x
assert d['y'] == ys[i]
except Exception:
print("Failed at index", i)
raise
check(arr, data)
del arr
# Now with explicit mask
mask = np.random.random_sample(n) < 0.2
arr = pa.array(data, type=ty, mask=mask, from_pandas=True)
assert arr.num_chunks == 2
check(arr, data, mask)
del arr
def test_from_numpy_bad_input(self):
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
dt = np.dtype([('x', np.int32),
('z', np.bool_)])
data = np.array([], dtype=dt)
with pytest.raises(TypeError,
match="Missing field 'y'"):
pa.array(data, type=ty)
data = np.int32([])
with pytest.raises(TypeError,
match="Expected struct array"):
pa.array(data, type=ty)
class TestZeroCopyConversion(object):
"""
Tests that zero-copy conversion works with some types.
"""
def test_zero_copy_success(self):
result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)
npt.assert_array_equal(result, [0, 1, 2])
def test_zero_copy_dictionaries(self):
arr = pa.DictionaryArray.from_arrays(
np.array([0, 0]),
np.array([5]))
result = arr.to_pandas(zero_copy_only=True)
values = pd.Categorical([5, 5])
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
def check_zero_copy_failure(self, arr):
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas(zero_copy_only=True)
def test_zero_copy_failure_on_object_types(self):
self.check_zero_copy_failure(pa.array(['A', 'B', 'C']))
def test_zero_copy_failure_with_int_when_nulls(self):
self.check_zero_copy_failure(pa.array([0, 1, None]))
def test_zero_copy_failure_with_float_when_nulls(self):
self.check_zero_copy_failure(pa.array([0.0, 1.0, None]))
def test_zero_copy_failure_on_bool_types(self):
self.check_zero_copy_failure(pa.array([True, False]))
def test_zero_copy_failure_on_list_types(self):
arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64()))
self.check_zero_copy_failure(arr)
def test_zero_copy_failure_on_timestamp_types(self):
arr = np.array(['2007-07-13'], dtype='datetime64[ns]')
self.check_zero_copy_failure(pa.array(arr))
# This function must be at the top-level for Python 2.7's multiprocessing
def _non_threaded_conversion():
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=False)
_check_pandas_roundtrip(df, use_threads=False, as_batch=True)
def _threaded_conversion():
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=True)
_check_pandas_roundtrip(df, use_threads=True, as_batch=True)
class TestConvertMisc(object):
"""
Miscellaneous conversion tests.
"""
type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64()),
(np.float16, pa.float16()),
(np.float32, pa.float32()),
(np.float64, pa.float64()),
# XXX unsupported
# (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),
(np.object, pa.string()),
(np.object, pa.binary()),
(np.object, pa.binary(10)),
(np.object, pa.list_(pa.int64())),
]
def test_all_none_objects(self):
df = pd.DataFrame({'a': [None, None, None]})
_check_pandas_roundtrip(df)
def test_all_none_category(self):
df = pd.DataFrame({'a': [None, None, None]})
df['a'] = df['a'].astype('category')
_check_pandas_roundtrip(df)
def test_empty_arrays(self):
for dtype, pa_type in self.type_pairs:
arr = np.array([], dtype=dtype)
_check_array_roundtrip(arr, type=pa_type)
def test_non_threaded_conversion(self):
_non_threaded_conversion()
def test_threaded_conversion_multiprocess(self):
# Parallel conversion should work from child processes too (ARROW-2963)
pool = mp.Pool(2)
try:
pool.apply(_threaded_conversion)
finally:
pool.close()
pool.join()
def test_category(self):
repeats = 5
v1 = ['foo', None, 'bar', 'qux', np.nan]
v2 = [4, 5, 6, 7, 8]
v3 = [b'foo', None, b'bar', b'qux', np.nan]
arrays = {
'cat_strings': pd.Categorical(v1 * repeats),
'cat_strings_with_na': pd.Categorical(v1 * repeats,
categories=['foo', 'bar']),
'cat_ints': pd.Categorical(v2 * repeats),
'cat_binary': pd.Categorical(v3 * repeats),
'cat_strings_ordered': pd.Categorical(
v1 * repeats, categories=['bar', 'qux', 'foo'],
ordered=True),
'ints': v2 * repeats,
'ints2': v2 * repeats,
'strings': v1 * repeats,
'strings2': v1 * repeats,
'strings3': v3 * repeats}
df = pd.DataFrame(arrays)
_check_pandas_roundtrip(df)
for k in arrays:
_check_array_roundtrip(arrays[k])
def test_category_implicit_from_pandas(self):
# ARROW-3374
def _check(v):
arr = pa.array(v)
result = arr.to_pandas()
tm.assert_series_equal(pd.Series(result), pd.Series(v))
arrays = [
pd.Categorical(['a', 'b', 'c'], categories=['a', 'b']),
pd.Categorical(['a', 'b', 'c'], categories=['a', 'b'],
ordered=True)
]
for arr in arrays:
_check(arr)
def test_empty_category(self):
# ARROW-2443
df = pd.DataFrame({'cat': pd.Categorical([])})
_check_pandas_roundtrip(df)
def test_mixed_types_fails(self):
data = pd.DataFrame({'a': ['a', 1, 2.0]})
with pytest.raises(pa.ArrowTypeError):
pa.Table.from_pandas(data)
data = pd.DataFrame({'a': [1, True]})
with pytest.raises(pa.ArrowTypeError):
pa.Table.from_pandas(data)
data = pd.DataFrame({'a': ['a', 1, 2.0]})
expected_msg = 'Conversion failed for column a'
with pytest.raises(pa.ArrowTypeError, match=expected_msg):
pa.Table.from_pandas(data)
def test_strided_data_import(self):
cases = []
columns = ['a', 'b', 'c']
N, K = 100, 3
random_numbers = np.random.randn(N, K).copy() * 100
numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'f4', 'f8']
for type_name in numeric_dtypes:
cases.append(random_numbers.astype(type_name))
# strings
cases.append(np.array([tm.rands(10) for i in range(N * K)],
dtype=object)
.reshape(N, K).copy())
# booleans
boolean_objects = (np.array([True, False, True] * N, dtype=object)
.reshape(N, K).copy())
# add some nulls, so dtype comes back as objects
boolean_objects[5] = None
cases.append(boolean_objects)
cases.append(np.arange("2016-01-01T00:00:00.001", N * K,
dtype='datetime64[ms]')
.reshape(N, K).copy())
strided_mask = (random_numbers > 0).astype(bool)[:, 0]
for case in cases:
df = pd.DataFrame(case, columns=columns)
col = df['a']
_check_pandas_roundtrip(df)
_check_array_roundtrip(col)
_check_array_roundtrip(col, mask=strided_mask)
def test_all_nones(self):
def _check_series(s):
converted = pa.array(s)
assert isinstance(converted, pa.NullArray)
assert len(converted) == 3
assert converted.null_count == 3
for item in converted:
assert item is pa.NA
_check_series(pd.Series([None] * 3, dtype=object))
_check_series(pd.Series([np.nan] * 3, dtype=object))
_check_series( | pd.Series([None, np.nan, None], dtype=object) | pandas.Series |
from collections import namedtuple
import datetime
from tqdm import tqdm
import pandas as pd
from docplex.mp.model import Model
from docplex.util.environment import get_environment
from functools import reduce
import numpy as np
# ----------------------------------------------------------------------------
# Initialize the problem data
# ----------------------------------------------------------------------------
TMemberAvailability = namedtuple(
"TMemberAvailability", ["ContactID", "TimeFrom", "TimeTo"]
)
TMemberWorksiteReference = namedtuple(
"TMemberWorksiteReference", ["ContactID", "Worksite"]
)
TMemberShiftReference = namedtuple(
"TMemberShiftReference", ["ContactID", "Shift", "StartRange", "EndRange"]
)
TMemberMeasurement = namedtuple(
"TMemberMeasurement", ["ContactID", "Measurement"])
TVacancyDetail = namedtuple(
"TVacancyDetail",
[
"StartDate",
"EndDate",
"Quantity",
"Worksite",
"Position",
"WorksiteID",
"PositionID",
"Measurement",
],
)
TVacancyObjectTime = namedtuple(
"TVacancyObjectTime", ["Title", "DateFrom",
"DateTo", "Position", "Worksite"]
)
TShiftConstraints = namedtuple(
"TShiftContraints",
[
"MinPeopleWorking",
"ScheduledBreakHours",
"MaxHoursPerDay",
"MaxHoursPerWeek",
"MaxConsecutiveShift",
"MinBreakBetweenShift",
"MinHoursPerShift",
],
)
TRange = namedtuple("TRange", ["From", "To"])
file_name = "Squirrel_Optimization.xlsx"
excel_data_file = pd.ExcelFile(file_name)
MAX_BREAK_PER_SHIFT = 4
DEFAULT_BREAK_LENGTH = 0.5 * 60
MIN_SHIFT_LENGTH = 4 * 60
NUM_OBJECTTIME_PER_DAY = 12
def lookup(lst, func):
# print("Lookup ")
for i in lst:
# print(i)
# print(func(i))
if func(i):
return i
def load_data(model, excel, verbose):
df_vacancy_detail = excel.parse("Vacancy")
df_vacancy_objectTime = excel.parse("Vacancy Object Time")
df_teamMember_measurement = excel.parse("Team Member Measurement")
df_teamMember_worksiteReference = excel.parse(
"Team Member Worksite Preference")
df_teamMember_availability = excel.parse("Team Member Availability")
df_teamMember_shiftReference = excel.parse("Team Member Shift Preference")
df_shift_constraints = excel.parse("Shift Constraints")
anchor_date = df_vacancy_detail["StartDate"][0]
# print(anchor_date)
def date2num(dt): return int((dt - anchor_date).total_seconds() / 60)
model.num2date = lambda n: anchor_date + datetime.timedelta(
days=int(n / (60 * 24)), hours=int((n % (60 * 24))) / 60
)
# print(list(map(date2num, df_vacancy_objectTime["DateFrom"])))
df_vacancy_objectTime.loc[:, "DateFrom"] = list(
map(date2num, df_vacancy_objectTime["DateFrom"])
)
df_vacancy_objectTime.loc[:, "DateTo"] = list(
map(date2num, df_vacancy_objectTime["DateTo"])
)
df_teamMember_availability.loc[:, "TimeFrom"] = list(
map(date2num, df_teamMember_availability["TimeFrom"])
)
df_teamMember_availability.loc[:, "TimeTo"] = list(
map(date2num, df_teamMember_availability["TimeTo"])
)
del df_teamMember_availability["Team Member"]
del df_teamMember_worksiteReference["Team Member"]
del df_teamMember_shiftReference["Team Member"]
del df_teamMember_measurement["Team Member"]
MEM_AVAILAVILITY = [
TMemberAvailability(*row) for _, row in df_teamMember_availability.iterrows()
]
MEM_WORKSITE_REFERENCE = [
TMemberWorksiteReference(*row)
for _, row in df_teamMember_worksiteReference.iterrows()
]
MEM_SHIFT_REFERENCE = [
TMemberShiftReference(*row)
for _, row in df_teamMember_shiftReference.iterrows()
]
MEM_MEASUREMENT = [
TMemberMeasurement(*row) for _, row in df_teamMember_measurement.iterrows()
]
VACANCY_DETAIL = [TVacancyDetail(*row) for _, row in df_vacancy_detail.iterrows()][
0
]
VACANCY_OBJECTTIME = [
TVacancyObjectTime(*row) for _, row in df_vacancy_objectTime.iterrows()
]
SHIFT_CONSTRAINTS = TShiftConstraints(
*tuple(
[
i if i != "04:00 to 06:00" else TRange(4 * 60, 7 * 60)
for i in df_shift_constraints["Value"].tolist()
]
)
)
# model.number_of_overlaps = 0
model.availabilities = MEM_AVAILAVILITY
model.objecttimes = VACANCY_OBJECTTIME
model.worksite_refs = MEM_WORKSITE_REFERENCE
model.shift_refs = MEM_SHIFT_REFERENCE
model.member_measurement = MEM_MEASUREMENT[:11]
model.shift_constraints = SHIFT_CONSTRAINTS
model.vacancy_detail = VACANCY_DETAIL
def setup_data(model: Model):
model.members = {m.ContactID: m for m in model.member_measurement}
lst = list(set(model.objecttimes))
lst.sort(key=lambda x: x.DateFrom)
model.objecttime_ids = {i: o for i, o in enumerate(lst[:1])}
def setup_variables(model: Model):
# MemberAssignment_contactId
print("Num of Members: ", len(model.members))
model.member_assignment_vars = model.binary_var_dict(
keys=model.members.keys(), name="MemberAssignment"
)
# if a shift is assigned -> 1
model.shift_assignment_vars = model.binary_var_matrix(
keys1=model.members.keys(),
keys2=model.objecttime_ids.keys(),
name="ShiftAssignment"
)
# print(model.shift_assignment_vars, "\n")
# ShiftStart_contactId_objectId_shift
model.shift_start_vars = model.integer_var_matrix(
keys1=model.members.keys(),
keys2=model.objecttime_ids.keys(),
lb=0,
ub=60 * 24 * 10, # Limit for 10 days
name="ShiftStart",
)
# print(model.shift_start_vars,"\n")
# ShiftEnd
model.shift_end_vars = model.integer_var_matrix(
keys1=model.members.keys(),
keys2=model.objecttime_ids.keys(),
lb=0,
ub=60 * 24 * 10, # Limit for 10 days
name="ShiftEnd",
)
# BreakStart
model.break_start_vars = model.integer_var_cube(
keys1=model.members.keys(),
keys2=model.objecttime_ids.keys(),
keys3=[j for j in range(0, MAX_BREAK_PER_SHIFT)],
lb=0,
ub=60 * 24 * 10, # Limit for 10 days
name="BreakStart",
)
# BreakDuration
# print(model.break_start_vars)
model.break_duration_vars = model.integer_var_cube(
keys1=model.members.keys(),
keys2=model.objecttime_ids.keys(),
keys3=[j for j in range(0, MAX_BREAK_PER_SHIFT)],
lb=0,
ub=DEFAULT_BREAK_LENGTH, # maximum break length (in minute)
name="BreakDuration",
)
# model.member_over_average_time_vars = model.continuous_var_dict(
# model.members.keys(), lb=0, name="MemberOverAverageWorkTime"
# )
# model.member_under_average_time_vars = model.continuous_var_dict(
# model.members.keys(), lb=0, name="MemberUnderAverageWorkTime"
# )
# model.average_member_work_time = model.continuous_var(lb=0, name="AverageWorkTime")
return
def setup_constraints(model: Model):
def getDate(x): return int(x / (24 * 60))
numDayOfVacancy = (
model.vacancy_detail.EndDate - model.vacancy_detail.StartDate
).days + 1
# If any partial shift of a member is assigned => this member is assigned
for ctactId, assignmendVar in model.member_assignment_vars.items():
lstShift = [
model.shift_assignment_vars[(ctactId, objecttimeId)]
for objecttimeId in model.objecttime_ids.keys()
]
# if assigned
model.add_constraint(
model.equivalence_constraint(
assignmendVar, model.sum(lstShift) >= 1, true_value=1
),
"ShiftAssignedToMemberAssigned",
)
# else
model.add_constraint(
model.equivalence_constraint(
assignmendVar, model.sum(lstShift) == 0, true_value=0
),
"ShiftAssignedToMemberAssigned",
)
# ## CONSTRAINT: LIMIT THE NUMBER OF CONSECUTIVE day-SHIFT
# _working_day_vars = model.binary_var_matrix(
# model.members.keys(), range(0, numDayOfVacancy), "_WorkedDay"
# )
# maxConsecutiveShift = model.shift_constraints.MaxConsecutiveShift
# for ctactId in model.members.keys():
# for objtId, objt in model.objecttime_ids.items():
# # Flag a day is a working day if it has atleast 1 shift
# model.add_constraint(
# model.equivalence_constraint(
# _working_day_vars[(ctactId, objtId)],
# model.sum(
# model.shift_assignment_vars[ctactId, objtId, shift]
# for shift in range(0, MAX_SHIFT_PER_OBJECTTIME)
# )
# >= 1, # there is atleast 1 assigned shift
# ),
# "CheckingWorkingDay",
# )
# # Flag a day is NOT a working day if it has NO shift
# model.add_constraint(
# model.equivalence_constraint(
# _working_day_vars[(ctactId, objtId)],
# model.sum(
# model.shift_assignment_vars[ctactId, objtId, shift]
# for shift in range(0, MAX_SHIFT_PER_OBJECTTIME)
# )
# == 0, # there is no assigned shift
# true_value=0,
# ),
# "CheckingWorkingDay",
# )
# # Set a day off for the 7th day if the 6 days before are consecutive
# for days in range(maxConsecutiveShift, numDayOfVacancy):
# model.add_constraint(
# model.if_then(
# model.sum(
# _working_day_vars[(ctactId, day)]
# for day in range(days - maxConsecutiveShift, days)
# )
# >= maxConsecutiveShift,
# _working_day_vars[(ctactId, days)] == 0,
# ),
# "LimitConsecutiveWorkedDays",
# )
# CONSTRAINT : LIMIT WORKING HOUR PER DAY
for ctactId in model.members.keys():
for objtId, objt in model.objecttime_ids.items():
model.add_constraint(
model.le_constraint(
model.shift_end_vars[(ctactId, objtId)]
- model.shift_start_vars[(ctactId, objtId)]
- model.sum(
model.break_duration_vars[
(ctactId, objtId, brk)
]
for brk in range(0, MAX_BREAK_PER_SHIFT)
),
model.shift_constraints.MaxHoursPerDay * 60, # Minutes
"MaxHoursPerDay",
)
)
# CONSTRAINT : LIMIT WORKING HOUR PER WEEEK
# currently, considering the whole vacancy is a week
# model.work_time_var = {}
# for ctactId in model.members.keys():
# model.work_time_var[ctactId] = model.sum(
# (
# model.shift_end_vars[(ctactId, objtId, shft)]
# - model.shift_start_vars[(ctactId, objtId, shft)]
# )
# for objtId in model.objecttime_ids.keys()
# for shft in range(0, MAX_SHIFT_PER_OBJECTTIME)
# )
# # -model.sum(
# # model.break_duration_vars[(ctactId, objtId, "{0}_br{1}".format(shft, brk))]
# # for objtId in model.objecttime_ids.keys()
# # for shft in range(0, MAX_SHIFT_PER_OBJECTTIME)
# # for brk in range(0, MAX_BREAK_PER_SHIFT)
# # )
# model.add_constraint(
# model.le_constraint(
# model.work_time_var[ctactId],
# model.shift_constraints.MaxHoursPerWeek * 60, # Minutes
# "MaxHoursPerWeek",
# )
# )
# Normal shift constraints
for ctactId, objtId in model.shift_assignment_vars.keys():
objt = model.objecttime_ids[objtId]
varKey = (ctactId, objtId)
shiftStart_var = model.shift_start_vars[varKey]
shiftEnd_var = model.shift_end_vars[varKey]
# Question: All values of objt.DateFrom will be 300? As
# model.objecttime_ids = {i: o for i, o in enumerate(lst[:1])}
# Set range for shift_start according to objectTime
model.add_constraint(
shiftStart_var >= objt.DateFrom, "Shift.Start>=Date.From",
)
# Question: All values of objt.DateFrom will be 1320? As
# model.objecttime_ids = {i: o for i, o in enumerate(lst[:1])}
# Set range for shift_end according to objectTime
model.add_constraint(
shiftEnd_var <= objt.DateTo, "Shift.End<=Date.To",
)
# Question: if shift is not assigned, shiftStart_var == shiftEnd_var can still
# be non-zero values
# if shift is not assigned
model.add_equivalence(
model.shift_assignment_vars[varKey],
shiftStart_var == shiftEnd_var,
true_value=0,
name="ShiftAssignment",
)
# else
model.add_equivalence(
model.shift_assignment_vars[varKey],
shiftEnd_var - shiftStart_var >= MIN_SHIFT_LENGTH,
true_value=1,
name="ShiftAssignment",
)
isScheduleBreak = model.binary_var()
model.add_equivalence(
isScheduleBreak,
shiftEnd_var - shiftStart_var
>= model.shift_constraints.ScheduledBreakHours.From + 1,
)
for brk in range(0, MAX_BREAK_PER_SHIFT):
brk_key = (ctactId, objtId, brk)
model.add_constraint(
model.break_start_vars[brk_key] >= shiftStart_var,
"Break.Start>=Shift.Start",
)
model.add_constraint(
model.break_duration_vars[brk_key] +
model.break_start_vars[brk_key]
<= shiftEnd_var,
"Break.Start+Duration<=Shift.End",
)
model.add_constraint(
model.indicator_constraint(
isScheduleBreak,
model.break_start_vars[brk_key]
>= shiftStart_var
+ model.shift_constraints.ScheduledBreakHours.From,
)
)
model.add_constraint(
model.indicator_constraint(
isScheduleBreak,
model.break_start_vars[brk_key] + DEFAULT_BREAK_LENGTH
<= shiftStart_var + model.shift_constraints.ScheduledBreakHours.To,
)
)
model.add_constraint(
model.indicator_constraint(
isScheduleBreak,
model.break_duration_vars[brk_key] == DEFAULT_BREAK_LENGTH,
)
)
model.add_constraint(
model.if_then(
model.break_duration_vars[brk_key] >= 1, # > 0
model.break_duration_vars[brk_key] == DEFAULT_BREAK_LENGTH,
),
"Break.Duration==0_Or_>=MIN",
)
if brk < MAX_BREAK_PER_SHIFT - 1:
next_brk_key = (ctactId, objtId, brk + 1)
model.add_constraint(
model.break_duration_vars[brk_key] +
model.break_start_vars[brk_key]
<= model.break_start_vars[next_brk_key],
"Break.Start+Duration<=NextBreak.Start",
)
# timeAvailability = lookup(
# model.availabilities,
# lambda i: (ctactId == i.ContactID)
# and getDate((i.TimeFrom + i.TimeTo) / 2)
# == getDate((objt.DateFrom + objt.DateTo) / 2),
# )
# # print(shiftStart_var,timeAvailability)
# if timeAvailability:
# "If this member is availabilities for this objecttime"
# # Set range for shift_end according to member Availability
# model.add_constraint(
# shiftStart_var >= timeAvailability.TimeFrom,
# "Shift.Start>=Availability.Start",
# )
# # Set range for shift_end according to member Availability
# model.add_constraint(
# shiftEnd_var <= timeAvailability.TimeTo,
# "Shift.End<=Availability.End",
# )
# else:
# "If a shift_var of a member who is not availabe -> Start == End"
# model.add_constraint(
# shiftStart_var == shiftEnd_var, "ShiftStart==ShiftEnd",
# )
# model.add_constraint(model.shift_assignment_vars[varKey] == 0
# CONSTRAINT: MAKE SURE THERE ARE ALWAYS 'Minimum People Working' AT ANY MOMENT
minPeopleWorking = model.shift_constraints.MinPeopleWorking
vacancyQuantiyRequirement = model.vacancy_detail.Quantity
vacancyQuantiyRequirement = 12
minPeopleWorking = 5
for objtId, objt in tqdm(model.objecttime_ids.items()):
# check for every moment with offset = 30min
getShiftKeys = [
key for key in model.shift_assignment_vars.keys() if key[1] == objtId
]
# --------------------------------------
# Check Object-time Start
checkedStartTime = objt.DateFrom
shiftStartCheckingVarList = []
for thisShiftKey in getShiftKeys:
shiftCheckingVar = model.binary_var(
"ShiftCheckingVar_{0}_{1}".format(thisShiftKey, "START")
)
shiftStartCheckingVarList.append(shiftCheckingVar)
timeCheckingVar = model.binary_var()
model.add_equivalence(
timeCheckingVar,
checkedStartTime == model.shift_start_vars[thisShiftKey],
)
model.add_constraint(
shiftCheckingVar
== model.logical_and(
timeCheckingVar, model.shift_assignment_vars[thisShiftKey]
)
)
model.add_constraint(
model.sum(
shiftStartCheckingVarList) >= minPeopleWorking, "1.1.MinPeopleWorking"
)
model.add_constraint(
model.sum(
shiftStartCheckingVarList) <= vacancyQuantiyRequirement, "1.1.MaxObjectTimes"
)
# Check Object-time End
checkedEndTime = objt.DateTo
shiftEndCheckingVarList = []
objtimeEndCheckingVarList = []
for thisShiftKey in getShiftKeys:
shiftCheckingVar = model.binary_var(
"ShiftCheckingVar_{0}_{1}".format(thisShiftKey, "END")
)
objecttimeCheckingVar = model.binary_var(
"ObjtimeCheckingVar_{0}_{1}".format(thisShiftKey, "END")
)
shiftEndCheckingVarList.append(shiftCheckingVar)
objtimeEndCheckingVarList.append(objecttimeCheckingVar)
timeCheckingVar = model.binary_var()
model.add_equivalence(
timeCheckingVar, checkedEndTime == model.shift_end_vars[thisShiftKey],
)
workingCheckList = [timeCheckingVar]
for brk in range(0, MAX_BREAK_PER_SHIFT):
_key = (
thisShiftKey[0],
thisShiftKey[1],
brk
)
_brk_start = model.break_start_vars[_key]
_duration = model.break_duration_vars[_key]
_checkEndBreak_var = model.binary_var()
model.add_equivalence(
_checkEndBreak_var, checkedEndTime >= (
_brk_start + _duration + 1)
)
workingCheckList.append(_checkEndBreak_var)
workingCheckList.append(model.shift_assignment_vars[thisShiftKey])
model.add_constraint(shiftCheckingVar ==
model.logical_and(*workingCheckList))
model.add_constraint(objecttimeCheckingVar == model.logical_and(
timeCheckingVar, model.shift_assignment_vars[thisShiftKey]))
model.add_constraint(
model.sum(
shiftEndCheckingVarList) >= minPeopleWorking, "1.2.MinPeopleWorking"
)
model.add_constraint(
model.sum(
objtimeEndCheckingVarList) <= vacancyQuantiyRequirement, "1.2.MaxObjectTimes"
)
# Check with shift-start
for thisShiftKey in getShiftKeys:
thisShiftStart = model.shift_start_vars[thisShiftKey]
shiftStartCheckingVarList = []
objecttimeCheckingVarList = []
for otherShiftKey in getShiftKeys:
# not check with this shift it-self
if (otherShiftKey[:1] == thisShiftKey[:1]):
continue
checkStart_var = model.binary_var()
checkEnd_var = model.binary_var()
checkShift_var = model.binary_var()
model.add_equivalence(
checkStart_var,
model.shift_start_vars[otherShiftKey] <= thisShiftStart,
)
model.add_equivalence(
checkEnd_var,
thisShiftStart <= model.shift_end_vars[otherShiftKey] - 1,
)
model.add_constraint(
checkShift_var
== model.logical_and(
checkEnd_var, checkStart_var
) # logicAND to check inside shiftRange
)
workingCheckList = [checkShift_var]
# check with breaks of this shift
breakCheck = []
for brk in range(0, MAX_BREAK_PER_SHIFT):
_key = (
otherShiftKey[0],
otherShiftKey[1],
brk,
)
_brk_start = model.break_start_vars[_key]
_duration = model.break_duration_vars[_key]
_check_break = model.binary_var()
_checkStartBreak_var = model.binary_var()
_checkEndBreak_var = model.binary_var()
# moment must be outsite break-range
model.add_equivalence(
_checkStartBreak_var, thisShiftStart <= _brk_start - 1
)
model.add_equivalence(
_checkEndBreak_var, thisShiftStart >= (
_brk_start + _duration)
)
model.add_constraint(
_check_break
== model.logical_or(
_checkStartBreak_var, _checkEndBreak_var
) # # logicOR to check OUTide breakRange
)
breakCheck.append(_check_break)
workingCheckList.append(
model.shift_assignment_vars[thisShiftKey])
workingCheckList.append(
model.shift_assignment_vars[otherShiftKey])
shiftCheckingVar = model.binary_var(
"ShiftCheckingVar_Start_{0}_{1}".format(
thisShiftKey, otherShiftKey)
)
# objecttimeCheckingVar = model.binary_var(
# "ObjectTimeCheckingVar_Start_{0}_{1}".format(
# thisShiftKey, otherShiftKey)
# )
model.add_equivalence(shiftCheckingVar, model.logical_and(
*(workingCheckList+breakCheck)) == 1)
# model.add_equivalence(
# objecttimeCheckingVar, model.logical_and(*workingCheckList) == 1)
shiftStartCheckingVarList.append(shiftCheckingVar)
# objecttimeCheckingVarList.append(objecttimeCheckingVar)
model.add_indicator(
model.shift_assignment_vars[thisShiftKey],
model.sum(shiftStartCheckingVarList)
>= minPeopleWorking
- 1, # cause this checker is already a working moment
name="2.1.MinPeopleWorking",
)
# model.add_indicator(
# model.shift_assignment_vars[thisShiftKey],
# model.sum(objecttimeCheckingVarList)
# # cause this checker is already a working moment
# <= vacancyQuantiyRequirement - 1,
# name="2.1.MaxObjectTimes",
# )
# Check with shift-end
for thisShiftKey in getShiftKeys:
thisShiftEnd = model.shift_end_vars[thisShiftKey]
checkingVarList = []
for otherShiftKey in getShiftKeys:
if (otherShiftKey == thisShiftKey): # not check with this shift it-self
continue
shiftCheckingVar = model.binary_var(
"ShiftCheckingVar_End_{0}_{1}".format(
thisShiftKey, otherShiftKey)
)
checkStart_var = model.binary_var()
checkEnd_var = model.binary_var()
checkShift_var = model.binary_var()
model.add_equivalence(
checkStart_var,
model.shift_start_vars[otherShiftKey] <= thisShiftEnd,
)
model.add_equivalence(
checkEnd_var, thisShiftEnd <= model.shift_end_vars[otherShiftKey] - 1
)
model.add_constraint(
checkShift_var
== model.logical_and(
checkEnd_var, checkStart_var
) # logicAND to check inside shiftRange
)
arr = [checkShift_var]
# check with breaks of this shift
for brk in range(0, MAX_BREAK_PER_SHIFT):
_key = (
otherShiftKey[0],
otherShiftKey[1],
brk
)
_brk_start = model.break_start_vars[_key]
_duration = model.break_duration_vars[_key]
_check_break = model.binary_var()
_checkStartBreak_var = model.binary_var()
_checkEndBreak_var = model.binary_var()
# moment must be outsite break-range
model.add_equivalence(
_checkStartBreak_var, thisShiftEnd <= _brk_start - 1)
model.add_equivalence(
_checkEndBreak_var, thisShiftEnd >= (
_brk_start + _duration)
)
model.add_constraint(
_check_break
== model.logical_or(
_checkStartBreak_var, _checkEndBreak_var
) # # logicOR to check inside breakRange
)
arr.append(_check_break)
arr.append(model.shift_assignment_vars[thisShiftKey])
arr.append(model.shift_assignment_vars[otherShiftKey])
model.add_equivalence(
shiftCheckingVar, model.logical_and(*arr) == 1)
checkingVarList.append(shiftCheckingVar)
isNotEndDay = model.binary_var()
model.add_equivalence(
isNotEndDay,
thisShiftEnd <= objt.DateTo-1,
)
model.add_constraint(
model.if_then(
model.logical_and(
model.shift_assignment_vars[thisShiftKey],
isNotEndDay
) == 1,
model.sum(checkingVarList) >= minPeopleWorking
),
ctname="2.2.MinPeopleWorking"
)
for thisShiftKey in getShiftKeys:
ctactId, __ = thisShiftKey
for brk in range(0, MAX_BREAK_PER_SHIFT):
thisBreakKey = (ctactId, objtId, brk)
thisBreakStart = model.break_start_vars[thisBreakKey]
breakStartCheckingVarList = []
for otherShiftKey in getShiftKeys:
if otherShiftKey == thisShiftKey: # not check 2 shifts of the same day of a member
continue
shiftCheckingVar = model.binary_var(
"ShiftCheckingVar_{0}_{1}".format(
thisBreakKey, otherShiftKey)
)
checkStart_var = model.binary_var()
checkEnd_var = model.binary_var()
checkShift_var = model.binary_var()
model.add_equivalence(
checkStart_var,
model.shift_start_vars[otherShiftKey] <= thisBreakStart,
)
model.add_equivalence(
checkEnd_var,
thisBreakStart <= model.shift_end_vars[otherShiftKey] - 1,
)
model.add_constraint(
checkShift_var
== model.logical_and(
checkEnd_var, checkStart_var
) # logicAND to check inside shiftRange
)
workingCheckList = [checkShift_var]
# check with breaks of this shift
for brk in range(0, MAX_BREAK_PER_SHIFT):
_key = (
otherShiftKey[0],
otherShiftKey[1],
brk,
)
_brk_start = model.break_start_vars[_key]
_duration = model.break_duration_vars[_key]
_check_break = model.binary_var()
_checkStartBreak_var = model.binary_var()
_checkEndBreak_var = model.binary_var()
# moment must be outsite break-range
model.add_equivalence(
_checkStartBreak_var, thisBreakStart <= _brk_start - 1
)
model.add_equivalence(
_checkEndBreak_var,
thisBreakStart >= (_brk_start + _duration),
)
model.add_constraint(
_check_break
== model.logical_or(
_checkStartBreak_var, _checkEndBreak_var
) # # logicOR to check OUTside breakRange
)
workingCheckList.append(_check_break)
workingCheckList.append(
model.shift_assignment_vars[thisShiftKey])
workingCheckList.append(
model.shift_assignment_vars[otherShiftKey])
model.add_equivalence(
shiftCheckingVar, model.logical_and(
*workingCheckList) == 1
)
breakStartCheckingVarList.append(shiftCheckingVar)
model.add_constraint(
model.if_then(
model.break_duration_vars[thisBreakKey] >= 1,
model.sum(
shiftStartCheckingVarList) >= minPeopleWorking,
),
"3.MinPeopleWorking",
)
# for thisShiftKey in getShiftKeys:
# ctactId, __, shft = thisShiftKey
# for brk in range(0, MAX_BREAK_PER_SHIFT):
# thisBreakKey = (ctactId, objtId, "{0}_br{1}".format(shft, brk))
# thisBreakStart = model.break_start_vars[thisBreakKey]
# shiftCheckingVarList = []
# for otherShiftKey in getShiftKeys:
# if (
# otherShiftKey[:1] == thisShiftKey[:1]
# ): # not check 2 shifts of the same day of a member
# continue
# shiftCheckingVar = model.binary_var(
# "ObjectTimeCheckingVar_{0}_{1}".format(thisBreakKey, otherShiftKey)
# )
# checkStart_var = model.binary_var()
# checkEnd_var = model.binary_var()
# check_shift = model.binary_var()
# model.add_equivalence(
# checkStart_var,
# model.shift_start_vars[otherShiftKey] <= thisBreakStart,
# )
# model.add_equivalence(
# checkEnd_var,
# thisBreakStart <= model.shift_end_vars[otherShiftKey],
# )
# model.add_constraint(
# check_shift
# == model.logical_and(
# checkEnd_var, checkStart_var
# ) # logicAND to check inside shiftRange
# )
# workingCheckList = [check_shift]
# workingCheckList.append(model.shift_assignment_vars[thisShiftKey])
# workingCheckList.append(model.shift_assignment_vars[otherShiftKey])
# model.add_equivalence(
# shiftCheckingVar, model.logical_and(*workingCheckList) == 1
# )
# shiftCheckingVarList.append(shiftCheckingVar)
# model.add_constraint(
# model.if_then(
# model.break_duration_vars[thisBreakKey] >= 1,
# model.sum(shiftCheckingVarList) <= vacancyQuantiyRequirement - 1,
# ),
# "4.MinPeopleWorking",
# )
# model.add_constraint(
# len(model.members) * model.average_member_work_time
# == model.sum(model.work_time_var[n] for n in model.members),
# "AverageWorkTime",
# )
# list(
# model.add_constraint(
# model.work_time_var[ctactId] == model.average_member_work_time
# + model.member_over_average_time_vars[ctactId]
# - model.member_under_average_time_vars[ctactId],
# "AverageWorkTime"
# )
# for ctactId in model.members.keys()
# )
# model.total_salary_cost = model.sum(
# (
# (model.shift_end_vars[key] - model.shift_start_vars[key])
# - model.sum(
# model.break_duration_vars[key[0], key[1], "{0}_br{1}".format(key[2], brk)]
# for brk in range(0, MAX_BREAK_PER_SHIFT)
# )
# )
# for key in model.shift_assignment_vars.keys()
# )
return
def setup_objective(model: Model):
total_members_assigment = model.sum(model.member_assignment_vars)
model.add_kpi(total_members_assigment, "Total selected members")
# model.add_kpi(model.total_salary_cost, "Total salary cost")
total_shift_assignment = model.sum(model.shift_assignment_vars)
model.add_kpi(total_shift_assignment, "Total number of assignments")
# model.add_kpi(model.average_member_work_time, "average work time")
# total_over_average_worktime = model.sum(
# model.member_over_average_time_vars[n] for n in model.members
# )
# total_under_average_worktime = model.sum(
# model.member_under_average_time_vars[n] for n in model.members
# )
# model.add_kpi(total_over_average_worktime, "Total over-average worktime")
# model.add_kpi(total_under_average_worktime, "Total under-average worktime")
# total_fairness = total_over_average_worktime + total_under_average_worktime
# model.add_kpi(total_fairness, "Total fairness")
model.minimize(
total_members_assigment
# model.total_salary_cost
# + total_fairness
# + total_shift_assignment
)
return
def print_information(model: Model):
print("#member=%d" % len(model.availabilities))
model.print_information()
model.report_kpis()
def print_solution(model: Model):
print("*************************** Solution ***************************")
def solve(model: Model, **kwargs):
# Here, we set the number of threads for CPLEX to 2 and set the time limit to 2mins.
model.parameters.threads = 16
model.parameters.timelimit = 36000 # solver should not take more than that !
sol = model.solve(log_output=True, **kwargs)
if sol is not None:
print("solution for a cost of {}".format(model.objective_value))
print_information(model)
print_solution(model)
return model.objective_value
else:
print("* model is infeasible")
return None
def build(context=None, verbose=False, **kwargs):
mdl = Model("Members", context=context, **kwargs)
print("Loading data")
load_data(mdl, excel_data_file, verbose=verbose)
print("Setting up data")
setup_data(mdl)
print("Setting up variable")
setup_variables(mdl)
print("Setting up constraint")
setup_constraints(mdl)
print("Setting up objectives")
setup_objective(mdl)
return mdl
def displayModel(model: Model):
df_shift = pd.DataFrame()
df_break = | pd.DataFrame() | pandas.DataFrame |
import io
import time
import json
from datetime import datetime
import pandas as pd
from pathlib import Path
import requests
drop_cols = [
'3-day average of daily number of positive tests (may count people more than once)',
'daily total tests completed (may count people more than once)',
'3-day average of new people who tested positive (counts first positive lab per person)',
'3-day average of currently hospitalized',
'daily number of vaccine doses administered beyond the primary series '
]
def save_file(df, file_path, current_date):
# save/update file
if not Path(file_path).exists():
df.to_csv(file_path, index=False)
else:
# get prior file date
prior = pd.read_csv(file_path, parse_dates=['date'])
prior_date = pd.to_datetime(prior['date'].max()).date()
if current_date > prior_date:
df.to_csv(file_path, mode='a', header=False, index=False)
return
def scrape_sheet(sheet_id):
# load previous raw_data and get prior date
raw_general = './data/raw/ri-covid-19.csv'
df = pd.read_csv(raw_general, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
# wait till 5:05 then check every 15 mins for update
target = datetime.now().replace(hour=17).replace(minute=5)
while datetime.now() < target:
print(f"[status] waiting for 5pm", end='\r')
time.sleep(60)
# load data from RI - DOH spreadsheet
gen_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}264100583'
df = pd.read_csv(gen_url).dropna(axis=1, how='all')
date = list(df)[1].strip()
date = pd.to_datetime(date).tz_localize('EST').date()
if df.shape[0] != 27:
print('[ERROR: summary page format changed]')
while not prior_date < date:
print(f"[status] waiting for update...{time.strftime('%H:%M')}", end='\r')
time.sleep(5 * 60)
df = pd.read_csv(gen_url)
date = list(df)[1].strip()
date = pd.to_datetime(date).tz_localize('EST').date()
else:
print('[status] found new update pausing for 2 mins')
time.sleep(2 * 60)
## transform general sheet
df['date'] = date
df.columns = ['metric', 'count', 'date']
save_file(df, raw_general, date)
## scrape geographic sheet
geo_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}901548302'
geo_df = pd.read_csv(geo_url)
# get grographic date & fix cols
geo_date = geo_df.iloc[-1][1]
geo_date = pd.to_datetime(geo_date)
geo_df['date'] = geo_date
cols = [x for x in list(geo_df) if 'Rate' not in x]
geo_df = geo_df[cols]
geo_df = geo_df.dropna(axis=0)
geo_df.columns = ['city_town', 'count', 'hostpialized', 'deaths', 'fully_vaccinated', 'date']
# save file
raw_geo = './data/raw/geo-ri-covid-19.csv'
save_file(geo_df, raw_geo, geo_date)
## scrape demographics sheet
dem_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}31350783'
dem_df = pd.read_csv(dem_url)
# make sure no columns were added/removed
if not dem_df.shape == (31, 9):
print('[error] demographics format changed')
return
else:
# get demographics updated date
dem_date = dem_df.iloc[-1][1]
dem_date = pd.to_datetime(dem_date).tz_localize('EST').date()
# drop percentage columns & rename
dem_df = dem_df.drop(dem_df.columns[[1, 2, 4, 6, 8]], axis=1)
dem_df.columns = ['metric', 'case_count', 'hosptialized', 'deaths']
# get data
sex = dem_df[1:4]
age = dem_df[5:17]
race = dem_df[18:24]
dem_df = pd.concat([sex, age, race])
dem_df['date'] = dem_date
raw_dem = './data/raw/demographics-covid-19.csv'
save_file(dem_df, raw_dem, dem_date)
def scrape_revised(sheet_id):
# load previous revised_data and get prior date
raw_revised = './data/raw/revised-data.csv'
df = pd.read_csv(raw_revised, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
# load revised sheet & fix column names
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}1592746937'
df = pd.read_csv(url, parse_dates=['Date'])
df.columns = [x.lower() for x in list(df)]
# test to try and make sure columns dont change
if df.shape[1] != 36 or list(df)[6] != 'daily total tests completed (may count people more than once)':
print('[error] revised sheet columns changed')
return
# check if updated
if df['date'].max() > prior_date:
df = df.drop(columns=drop_cols)
# re order columns
move_cols = (list(df)[6:11] + list(df)[22:31])
cols = [x for x in list(df) if x not in move_cols]
cols.extend(move_cols)
df = df[cols]
df['date_scraped'] = datetime.strftime(datetime.now(), '%m/%d/%Y')
save_file(df, raw_revised, df['date'].max())
def scrape_nursing_homes(sheet_id):
# load prior date
raw_facility = './data/raw/nurse-homes-covid-19.csv'
df = pd.read_csv(raw_facility, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}500394186'
df = pd.read_csv(url)
# get date of last update
date = df.iloc[0,0].split(' ')[-1]
date = pd.to_datetime(date).tz_localize('EST').date()
if not date > prior_date:
print('\n[status] nursing homes:\tno update')
return
else:
# fix headers
df.columns = df.iloc[1]
# drop past 14 days column
df = df.drop(columns='New Resident Cases (in past 14 days)')
df['Facility Name'] = df['Facility Name'].str.replace(u'\xa0', ' ') # random unicode appeared
# fix dataframe shape
assisted = df[df['Facility Name'] == 'Assisted Living Facilities'].index[0]
nursing_homes = df[3:assisted].copy()
assisted_living = df[assisted+1:-1].copy()
# add facility type & recombine
nursing_homes['type'] = 'nursing home'
assisted_living['type'] = 'assisted living'
df = pd.concat([nursing_homes, assisted_living]).reset_index(drop=True)
# add date
df['date'] = date
save_file(df, raw_facility, date)
print('[status] nursing homes:\tupdated')
def scrape_zip_codes(sheet_id):
# load prior date
raw_zip = './data/raw/zip-codes-covid-19.csv'
df = pd.read_csv(raw_zip, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}365656702'
df = | pd.read_csv(url) | pandas.read_csv |
# Uses data of Heineken stock value from nov 18 1996 until jul 20 2018 on a daily basis
# This is a TensorFlow example
import pandas as pd
import numpy as np
def read_goog_sp500_dataframe():
"""Returns a dataframe with the results for Google and S&P 500"""
# Point to where you've stored the CSV file on your local machine
googFile = 'data/GOOG.csv'
spFile = 'data/SP_500.csv'
goog = pd.read_csv(googFile, sep=",", usecols=[0, 5], names=['Date', 'Goog'], header=0)
sp = | pd.read_csv(spFile, sep=",", usecols=[0, 5], names=['Date', 'SP500'], header=0) | pandas.read_csv |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = | Series("foo", index=index) | pandas.Series |
"""-----------------------------------------------------------------------------
bidsIncremental.py
Implements the BIDS Incremental data type used for streaming BIDS data between
different applications.
-----------------------------------------------------------------------------"""
from copy import deepcopy
from operator import eq as opeq
from typing import Any, Callable
import json
import os
from bids.layout import BIDSImageFile
from bids.layout.writing import build_path as bids_build_path
import logging
import nibabel as nib
import numpy as np
import pandas as pd
from rtCommon.bidsCommon import (
BIDS_DIR_PATH_PATTERN,
BIDS_FILE_PATTERN,
BidsFileExtension,
DATASET_DESC_REQ_FIELDS,
DEFAULT_DATASET_DESC,
DEFAULT_EVENTS_HEADERS,
DEFAULT_README,
PYBIDS_PSEUDO_ENTITIES,
adjustTimeUnits,
correct3DHeaderTo4D,
correctEventsFileDatatypes,
filterEntities,
getNiftiData,
loadBidsEntities,
metadataFromProtocolName,
symmetricDictDifference,
writeDataFrameToEvents,
)
from rtCommon.errors import MissingMetadataError
logger = logging.getLogger(__name__)
class BidsIncremental:
ENTITIES = loadBidsEntities()
REQUIRED_IMAGE_METADATA = ['subject', 'task', 'suffix', 'datatype',
'RepetitionTime']
"""
BIDS Incremental data format suitable for streaming BIDS Archives
"""
def __init__(self, image: nib.Nifti1Image, imageMetadata: dict,
datasetDescription: dict = None):
"""
Initializes a BIDS Incremental object with provided image and metadata.
Args:
image: NIfTI image as an NiBabel NiftiImage or PyBids BIDSImageFile
imageMetadata: Metadata for image, which must include all variables
in BidsIncremental.REQUIRED_IMAGE_METADATA.
datasetDescription: Top-level dataset metadata for the BIDS dataset
to be placed in a dataset_description.json. Defaults to None and
a default description is used.
Raises:
MissingMetadataError: If any required metadata is missing.
TypeError: If the image is not an Nibabel Nifti1Image or
Nifti2Image.
Examples:
>>> import nibabel as nib
>>> imageMetadata = {'subject': '01', 'task': 'test',
'suffix': 'bold', 'datatype': 'func',
'RepetitionTime': 1.5}
>>> image = nib.load('/tmp/testfile.nii')
>>> datasetDescription = {'Name': 'Example Dataset',
'BIDSVersion': '1.5.1',
'Authors': 'The RT-Cloud Authors'}
>>> incremental = BidsIncremental(image, imageMetadata,
datasetDescription)
>>> print(incremental)
"Image shape: (64, 64, 27, 1); Metadata Key Count: 6; BIDS-I
Version: 1"
"""
# TODO(spolcyn): Enable a BIDS incremental to store an index that
# specifies where the image should be inserted into the archive. This
# would extend capabilities beyond just appending.
""" Do basic input validation """
# IMAGE
validTypes = [nib.Nifti1Image, nib.Nifti2Image, BIDSImageFile]
if image is None or type(image) not in validTypes:
raise TypeError("Image must be one of " +
str([typ.__name__ for typ in validTypes]) +
f"(got {type(image)})")
if type(image) is BIDSImageFile:
image = image.get_image()
# DATASET DESCRIPTION
if datasetDescription is not None:
missingFields = [field for field in DATASET_DESC_REQ_FIELDS
if datasetDescription.get(field, None) is None]
if missingFields:
raise MissingMetadataError(
f"Dataset description needs: {str(missingFields)}")
""" Process, validate, and store image metadata """
imageMetadata = self._preprocessMetadata(imageMetadata)
self._exceptIfMissingMetadata(imageMetadata)
self._imgMetadata = self._postprocessMetadata(imageMetadata)
""" Store dataset description"""
if datasetDescription is None:
self.datasetDescription = deepcopy(DEFAULT_DATASET_DESC)
else:
self.datasetDescription = deepcopy(datasetDescription)
""" Validate and store image """
# Remove singleton dimensions past the 3rd dimension
# Note: this function does not remove trailing 1's if the image is 3-D,
# (i.e., 160x160x1 image will retain that shape), so a later check is
# needed to ensure that the 3rd dimension is > 1
image = nib.funcs.squeeze_image(image)
# BIDS-I is currently used for BOLD data, and according to the BIDS
# Standard, BOLD data must be in 4-D NIfTI files. Thus, upgrade 3-D to
# 4-D images with singleton final dimension, if necessary.
imageShape = image.shape
if len(imageShape) < 3:
raise ValueError("Image must have at least 3 dimensions")
elif len(imageShape) == 3:
if imageShape[2] <= 1:
raise ValueError("Image's 3rd (and any higher) dimensions are "
" <= 1, which means it is a 2D image; images "
"must have at least 3 dimensions")
newData = np.expand_dims(getNiftiData(image), -1)
image = image.__class__(newData, image.affine, image.header)
correct3DHeaderTo4D(image, self._imgMetadata['RepetitionTime'])
assert len(image.shape) == 4
self.image = image
# Configure README
self.readme = DEFAULT_README
# Configure events file
self.events = pd.DataFrame(columns=DEFAULT_EVENTS_HEADERS)
self.events = correctEventsFileDatatypes(self.events)
# BIDS-I version for serialization
self.version = 1
def __str__(self):
return ("Image shape: {}; Metadata Key Count: {}; BIDS-I Version: {}"
.format(self.getImageDimensions(),
len(self._imgMetadata.keys()),
self.version))
def __eq__(self, other):
def reportDifference(valueName: str, d1: dict, d2: dict,
equal: Callable[[Any, Any], bool] = opeq) -> None:
logger.debug(valueName + " didn't match")
difference = symmetricDictDifference(d1, d2, equal)
logger.debug(valueName + " difference: %s", difference)
# Compare image headers
if self.image.header != other.image.header:
reportDifference("Image headers",
dict(self.image.header),
dict(other.image.header),
np.array_equal)
return False
# Compare image metadata
if self._imgMetadata != other._imgMetadata:
reportDifference("Image metadata",
self._imgMetadata,
other._imgMetadata,
np.array_equal)
return False
# Compare full image data
if not np.array_equal(self.getImageData(), other.getImageData()):
differences = self.getImageData() != other.getImageData()
logger.debug("Image data didn't match")
logger.debug("Difference count: %d (%f%%)",
np.sum(differences),
np.sum(differences) / np.size(differences) * 100.0)
return False
# Compare dataset description
if self.datasetDescription != other.datasetDescription:
reportDifference("Dataset description",
self.datasetDescription,
other.datasetDescription)
return False
if not self.readme == other.readme:
logger.debug(f"Readmes didn't match\nself: {self.readme}\n"
f"other: {other.readme}")
return False
if not | pd.DataFrame.equals(self.events, other.events) | pandas.DataFrame.equals |
import datetime
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sqlalchemy import create_engine
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
engine = create_engine(
"postgresql://{PGUSER}:{PGPASSWORD}@{PGHOST}:5432/postgres".format(**os.environ)
)
def _build_activity_heatmap(timeseries_data):
d1 = timeseries_data["date"].min().date()
d2 = timeseries_data["date"].max().date()
delta = d2 - d1
# gives me a list with datetimes for each day a year
dates_in_year = [d1 + datetime.timedelta(i) for i in range(delta.days + 1)]
joined = (
pd.Series(dates_in_year)
.to_frame()
.set_index(0)
.join(timeseries_data.set_index("date"))
)
# the activity values to actually plot in the heatmap
# dates when the joined data isn't null are days when
# there were activity
z = (~joined.isna()).astype(int)["values"].values
# gives something like list of strings like '2018-01-25' for each date.
# Used in data trace to make good hovertext.
text = [str(i) for i in dates_in_year]
# 4cc417 green #347c17 dark green
colorscale = [[False, "#eeeeee"], [True, "#76cf63"]]
trace = go.Heatmap(
# horizontally index on the most recent monday
# day - day.weekday() gives the most recent monday
x=[(day - datetime.timedelta(days=day.weekday())) for day in dates_in_year],
# vertically index on the day of week for each date
y=[day.weekday() for day in dates_in_year],
z=z,
text=text,
hoverinfo="text",
xgap=3, # this
ygap=3, # and this is used to make the grid-like apperance
showscale=False,
colorscale=colorscale,
)
layout = go.Layout(
height=280,
yaxis=dict(
showline=False,
showgrid=False,
zeroline=False,
tickmode="array",
ticktext=["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"],
tickvals=[0, 1, 2, 3, 4, 5, 6],
),
xaxis=dict(
showline=False,
showgrid=False,
zeroline=False,
),
font={"size": 10, "color": "#9e9e9e"},
plot_bgcolor=("#fff"),
margin=dict(t=40),
)
return trace, layout
def build_activity_indicator(timeseries_data, indicator_value, indicator_name):
"""Build a activity log + indicator figure."""
fig = make_subplots(
rows=1,
cols=2,
column_widths=[0.7, 0.3],
specs=[[{"type": "heatmap"}, {"type": "indicator"}]],
)
trace, layout = _build_activity_heatmap(timeseries_data)
# add the timeseries scatter plot
fig.add_trace(
trace,
row=1,
col=1,
)
# add summary statistic "indicator"
fig.add_trace(
go.Indicator(
mode="number",
value=indicator_value,
name=indicator_name,
),
row=1,
col=2,
)
fig.update_layout(layout)
fig.update_layout(
template={"data": {"indicator": [{"title": {"text": indicator_name}}]}},
)
return fig
def build_timeseries_indicator(timeseries_data, indicator_value, indicator_name):
"""Build a timeseries + indicator figure."""
fig = make_subplots(
rows=1,
cols=2,
column_widths=[0.7, 0.3],
specs=[[{"type": "xy"}, {"type": "indicator"}]],
)
# add the timeseries scatter plot
fig.add_trace(
go.Scatter(
mode="markers", x=timeseries_data["date"], y=timeseries_data["values"]
),
row=1,
col=1,
)
# add the rolling average timeseries line plot
rolling = (
timeseries_data.set_index("date")
.rolling(4, win_type="triang", center=True)
.mean()
.dropna()
.reset_index()
)
fig.add_trace(
go.Scatter(mode="lines", x=rolling["date"], y=rolling["values"]),
row=1,
col=1,
)
# add summary statistic "indicator"
fig.add_trace(
go.Indicator(
mode="number",
value=indicator_value,
name=indicator_name,
),
row=1,
col=2,
)
fig.update_layout(
template={"data": {"indicator": [{"title": {"text": indicator_name}}]}},
)
return fig
def prep_calorie_data(engine, start_date):
"""Read and process calorie data.
Returns
-------
pd.DataFrame
A dataframe having columns ['date', 'values']
float
A scalar value
"""
# TODO: move the ETL into dbt
mfp_data = pd.read_sql("SELECT * FROM myfitnesspal.totals", engine)
fitbit_data = pd.read_sql("SELECT * FROM fitbit.activity", engine)
fitbit_data = fitbit_data.set_index(pd.to_datetime(fitbit_data["date"]))[
["calories_out"]
]
fitbit_data = fitbit_data.loc[fitbit_data["calories_out"] != 0]
mfp_data = mfp_data.groupby("date").sum()[["calories"]]
mfp_data = mfp_data.loc[mfp_data["calories"] != 0]
joined = mfp_data.join(fitbit_data, how="inner")
joined = joined[start_date:]
joined["values"] = joined.calories - joined.calories_out
# indicator for calories is mean delta over past seven days
today = datetime.date.today()
week_ago = today - datetime.timedelta(weeks=1)
return (
joined["values"].reset_index(),
joined.loc[week_ago.isoformat():, "values"].mean(),
)
def prep_weight_data(engine, start_date):
"""Read and process weight data.
Returns
-------
pd.DataFrame
A dataframe having columns ['date', 'values']
float
A scalar value
"""
# TODO: move the ETL into dbt
# including casting 'date' to a datetime column in postgres
# including back/forward filling (or interpolating)
weight_data = | pd.read_sql("SELECT * FROM fitbit.weight", engine) | pandas.read_sql |
import datetime
from pathlib import Path
# from html.parser import HTMLParser
from typing import Tuple
# 3rd party
import datefinder
import pandas as pd
#%%
def get_starttime_from_parser(parser, source=False) -> Tuple[datetime.datetime, str]:
"""takes the parser and returns a datetime object, start time of experiment read out from metadata"""
start_time = None
source = ""
if "VersaStudioParser" in parser.__class__.__qualname__:
# isinstance(parser, VersaStudioParser):
# cast metadata from parser into DataFrame
time = parser.metadata.get("experiment", {}).get("TimeAcquired", "")
date = parser.metadata.get("experiment", {}).get("DateAcquired", "")
date_time = time + " " + date
dates = list(datefinder.find_dates(date_time, source=True))
if len(dates) == 1:
start_time, source = dates[0]
return start_time, source
def cast_parser_to_dataframe(parser):
"""takes the parser and returns DataFrames from the metadata, actions and data of the files"""
metadata, actions, data = None, None, None
pm_dict, pa_dict, data_dict = {}, {}, {}
if "VersaStudioParser" in parser.__class__.__qualname__:
# isinstance(parser, VersaStudioParser) or
# cast metadata from parser into DataFrame
pm_dict = parser.metadata.copy()
metadata = pd.DataFrame(pm_dict).T
# cast actions from parser into DataFrame
pa_dict = parser.actions.copy()
actions = | pd.DataFrame(pa_dict) | pandas.DataFrame |
from flask import Flask, render_template, redirect, url_for, request, send_file
import pandas as pd
import numpy as np
from pandas import DataFrame, read_csv
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.pyplot as plt
import jinja2
plt.style.use('ggplot')
from io import BytesIO
import seaborn as sns
app = Flask(__name__)
#df = pd.read_csv("movie_metadataproject.csv")
#df["budget"] = df["budget"].fillna(0)
#df["gross"] = df["gross"].fillna(0)
#df['Profit'] = df['gross'] - df['budget']
@app.route("/", methods=['GET', 'POST'])
def home():
return render_template("home.html")
@app.route("/input", methods = ['POST','GET'])
def input():
if request.method == 'POST':
moviename = request.form["moviename"]
directorname = request.form["dname"]
actor1 = request.form["a1name"]
actor2 = request.form["a2name"]
actor3 = request.form["a3name"]
genres = request.form.getlist("genre")
language = request.form.get("lang")
genred = concatenate_list(genres)
iparray = [language,directorname,actor1,actor2,actor3,moviename,genred,0,0,0]
df = pd.read_csv("movie_metadataproject.csv")
#print(df.shape)
df = pd.read_csv("movie_metadataproject.csv")
df["budget"] = df["budget"].fillna(0)
df["gross"] = df["gross"].fillna(0)
df['Profit'] = df['gross'] - df['budget']
df = df.drop(['aspect_ratio','movie_imdb_link','plot_keywords'],axis =1)
df = df.dropna()
#print(df.shape)
df= df[df['language'] != "Telugu"]
df= df[df['language'] != "Arabic"]
df= df[df['language'] != "Aramaic"]
df= df[df['language'] != "Bosnian"]
df= df[df['language'] != "Czech"]
df= df[df['language'] != "Dzongkha"]
df= df[df['language'] != "Filipino"]
df= df[df['language'] != "Hungarian"]
df= df[df['language'] != "Icelandic"]
df= df[df['language'] != "Kazakh"]
df= df[df['language'] != "Maya"]
df= df[df['language'] != "Mongolian"]
df= df[df['language'] != "None"]
df= df[df['language'] != "Romanian"]
df= df[df['language'] != "Russian"]
df= df[df['language'] != "Swedish"]
df= df[df['language'] != "Vietnamese"]
df= df[df['language'] != "Zulu"]
df_usefuldata = df[['language','director_name','actor_1_name','actor_2_name','actor_3_name','movie_title','genres','gross','budget','Profit']]
df_usefuldata = df_usefuldata.dropna()
df_appendedlang = df_usefuldata.append(pd.Series([iparray[0],iparray[1],iparray[2],iparray[3],iparray[4],iparray[5],iparray[6],iparray[7],iparray[8],iparray[9]], index=df_usefuldata.columns), ignore_index=True)
#print(df_appendedlang.shape)
df_appendedlang1 = df_appendedlang[df_appendedlang['language'] != 'None']
df_appendedlang1 = df_appendedlang1.dropna()
#print(df_appendedlang1.shape)
column_values1 = df_appendedlang1["language"].unique().tolist()
#print(column_values1)
column_values2 = df_appendedlang1["director_name"].unique().tolist()
df_appendedlang2 = df_appendedlang1
#df_appendedlang3 = df_appendedlang1
for value in column_values1:
df_appendedlang2 = pd.concat([df_appendedlang2,pd.get_dummies(value)], axis=1)
for value in column_values1:
df_appendedlang2[value] = 0
for value in column_values1:
df_appendedlang2.loc[df_appendedlang2['language'] == value,value] = 1
drop_cols = ['language','genres','movie_title','director_name','actor_1_name','actor_2_name','actor_3_name','gross','budget','Profit']
for dropCol in drop_cols:
df_appendedlang2 = df_appendedlang2.drop(dropCol,axis=1)
df_appendedlang2 = df_appendedlang2.dropna()
#print(df_appendedlang2)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 29)
kmeans = kmeans.fit(df_appendedlang2)
df_appendedlang2['cluster'] = kmeans.labels_
df_appendedlang3 = pd.concat([df_appendedlang1,df_appendedlang2], axis=1, ignore_index=False)
df_appendedlang3 = df_appendedlang3.dropna()
df_appendedlang3 = df_appendedlang3.loc[df_appendedlang3['language'] == iparray[0]]
df_appendedlang3 = df_appendedlang3.drop('cluster',axis=1)
for value in column_values1:
df_appendedlang3 = df_appendedlang3.drop(value,axis=1)
# print(df_appendedlang3.shape)
df_appendedlang4 = df_appendedlang3
for value in column_values2:
df_appendedlang4 = pd.concat([df_appendedlang4,pd.get_dummies(value)], axis=1)
for value in column_values2:
df_appendedlang4[value] = 0
for value in column_values2:
df_appendedlang4.loc[df_appendedlang4['director_name'] == value,value] = 1
for dropCol in drop_cols:
df_appendedlang4 = df_appendedlang4.drop(dropCol,axis=1)
# print(df_appendedlang4.shape)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 3)
kmeans = kmeans.fit(df_appendedlang4)
df_appendedlang4['cluster'] = kmeans.labels_
df_appendedlang5 = pd.concat([df_appendedlang3,df_appendedlang4], axis=1, ignore_index=False)
df_appendedlang5 = df_appendedlang5.dropna()
cnum = df_appendedlang5.loc[df_appendedlang5['director_name'] == iparray[1],'cluster']
print(cnum.size)
if cnum.size == 1:
cnumb = cnum.item()
else:
cnumb = cnum[0]
df_appendedlang5 = df_appendedlang5.loc[df_appendedlang5['cluster'] == cnumb]
df_appendedlang5 = df_appendedlang5.drop('cluster',axis=1)
for value in column_values2:
df_appendedlang5 = df_appendedlang5.drop(value,axis=1)
df_appendedlang6 = df_appendedlang5
column_values6 = df_appendedlang6["actor_1_name"].unique().tolist()
#column_values6
column_values7 = df_appendedlang6["actor_2_name"].unique().tolist()
column_values8 = df_appendedlang6["actor_3_name"].unique().tolist()
column678 = column_values6+column_values7+column_values8
unique_values678 = pd.unique(column678)
# for v in unique_values678:
# print(v)
for value in unique_values678:
df_appendedlang6 = pd.concat([df_appendedlang6,pd.get_dummies(value)], axis=1)
df_appendedlang6[value] = 0
df_appendedlang6.loc[df_appendedlang6['actor_1_name'] == value,value] = 1
df_appendedlang6.loc[df_appendedlang6['actor_2_name'] == value,value] = 1
df_appendedlang6.loc[df_appendedlang6['actor_3_name'] == value,value] = 1
drop_cols = ['language','director_name','genres','movie_title','gross','budget','Profit','actor_1_name','actor_2_name','actor_3_name']
for value in drop_cols:
df_appendedlang6 = df_appendedlang6.drop(value,axis=1)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 2)
kmeans = kmeans.fit(df_appendedlang6)
df_appendedlang6['cluster'] = kmeans.labels_
df_final = pd.concat([df_appendedlang5,df_appendedlang6], axis=1, ignore_index=False)
df_final = df_final.dropna()
min_budget = df_final['budget'].where(df_final['budget'].gt(0)).min(0)
print(df_final['budget'].unique().tolist())
print(min_budget)
max_budget = df_final['budget'].max()
print(max_budget)
print(df_final['Profit'].unique().tolist())
min_profit = df_final['Profit'].min()
print(min_profit)
max_profit = df_final['Profit'].max()
print(max_profit)
return redirect(url_for('result', min_budget= min_budget, max_budget= max_budget, min_profit= min_profit,max_profit=max_profit))
return render_template('input.html')
def concatenate_list(list):
result = ''
for elements in list:
if result == '':
result = elements
else:
result = result + "|" + elements
return result
#-----------------------------------------DATA VISUALIZATION--------------------------------------
def split(x):
df = pd.read_csv("movie_metadataproject.csv")
df["budget"] = df["budget"].fillna(0)
df["gross"] = df["gross"].fillna(0)
df['Profit'] = df['gross'] - df['budget']
a = df[x].str.cat(sep = '|')
splitdata = pd.Series(a.split('|'))
info = splitdata.value_counts(ascending=False)
return info
total_genre_movies = split('genres')
@app.route('/genrea/')
def visualization1():
fig, ax = plt.subplots()
total_genre_movies = split('genres')
total_genre_movies.plot(kind= 'barh',figsize = (13,6),fontsize=12,colormap='tab20c')
#setup the title and the labels of the plot.
plt.title("Number of movies in each Genre",fontsize=15)
plt.xlabel('Number Of Movies',fontsize=13)
plt.ylabel("Genres",fontsize= 13)
canvas = FigureCanvas(fig)
img = BytesIO()
fig.savefig(img)
img.seek(0)
return send_file(img, mimetype='image/png')
@app.route('/score/')
def visualizationf1():
df = pd.read_csv("movie_metadataproject.csv")
df["budget"] = df["budget"].fillna(0)
df["gross"] = df["gross"].fillna(0)
df['Profit'] = df['gross'] - df['budget']
fig,ax = plt.subplots()
info = pd.DataFrame(df['imdb_score'].sort_values(ascending = False))
info['movie_title'] = df['movie_title']
data = list(map(str,(info['movie_title'])))
x = list(data[:10])
y = list(info['imdb_score'][:10])
ax = sns.pointplot(x=y,y=x)
#sns.set(rc={'figure.figsize':(10,20)})
ax.set_title("Top 10 movies",fontsize = 10)
ax.set_xlabel("IMDB Score",fontsize = 10)
#setup the stylesheet
sns.set_style("whitegrid")
canvas = FigureCanvas(fig)
img = BytesIO()
fig.savefig(img)
img.seek(0)
return send_file(img, mimetype='image/png')
@app.route('/money/')
def visualizationf2():
df = | pd.read_csv("movie_metadataproject.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 09:58:55 2021
@author: emari
"""
import numpy as np
import pandas as pd
from network.models import Account, Connection
class node():
def __init__(self):
self.parent_node = ""
self.child_connections = [np.array([],dtype=object),np.array([],dtype=object),np.array([],dtype=object),np.array([],dtype=object),np.array([],dtype=object)]
self.username = ""
self.connection_type = ""
self.bio = ""
self.captions = []
self.total_likes = 0
self.total_followers = 0
self.total_following = 0
self.post_date = ""
self.root_post_url = ""
self.profile_img_url = ""
#This is selector for profile image
##react-root > section > main > div > header > div > div > span > img
def printNode(self):
print("parent node: ",self.parent_node)
print("username: ",self.username)
print("connection_type: ",self.connection_type)
print("total_likes: ",self.total_likes)
print("total_followers: ",self.total_followers)
print("total_following: ",self.total_following)
class nodeClassifier():
def __init__(self,output_location):
self.node_list = []
self.output_location = output_location
self.node_df = | pd.DataFrame(columns=["parent_node","username","connection_type","bio","captions","total_likes","total_followers","total_following","profile_img_url","root_post_url"]) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from operational_analysis.types import plant
from operational_analysis.methods import plant_analysis
from examples.operational_AEP_analysis.project_EIA import Project_EIA
class TestPandasPrufPlantAnalysis(unittest.TestCase):
def setUp(self):
np.random.seed(42)
# Set up data to use for testing (EIA example plant)
self.project = Project_EIA('./examples/operational_AEP_analysis/data')
self.project.prepare()
self.analysis = plant_analysis.MonteCarloAEP(self.project)
def test_validate_data(self):
self.assertTrue(self.project.validate(), "Failed to validate PlantData from schema")
def test_plant_analysis(self):
df = self.analysis._monthly.df
# Check the pre-processing functions
self.check_process_revenue_meter_energy(df)
self.check_process_loss_estimates(df)
self.check_process_reanalysis_data(df)
# Check outlier filtering
self.check_filter_outliers()
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=2000, reanal_subset=['ncep2', 'merra2', 'erai'])
sim_results = self.analysis.results
self.check_simulation_results(sim_results)
def check_process_revenue_meter_energy(self, df):
# Energy Nan flags are all zero
nptest.assert_array_equal(df['energy_nan_perc'].as_matrix(), np.repeat(0.0, df.shape[0]))
# Expected number of days per month are equal to number of actual days
nptest.assert_array_equal(df['num_days_expected'], df['num_days_actual'])
# Check a few energy values
expected_gwh = pd.Series([6.765, 5.945907, 8.576])
actual_gwh = df.loc[ | pd.to_datetime(['2003-12-01', '2010-05-01', '2015-01-01']) | pandas.to_datetime |
# %%
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
import numpy as np
import os
import pandas as pd
# %%
# base_dir = os.getcwd()
base_dir = '/Users/jason/bestpaycup2020'
x_df = | pd.read_csv(base_dir + '/dataset/dataset4/trainset/train_base.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2022/2/14 17:25
describe: 基于 Tushare 分钟数据的择时策略快速回测
"""
import os
import inspect
import traceback
import pandas as pd
from tqdm import tqdm
from typing import Callable
from .. import envs
from ..data.ts_cache import TsDataCache
from ..traders.utils import trader_fast_backtest, freq_cn2ts
from ..utils import x_round
from ..objects import cal_break_even_point
def read_raw_results(raw_path, trade_dir="long"):
"""读入指定路径下的回测原始结果
:param raw_path: 原始结果路径
:param trade_dir: 交易方向
:return:
"""
assert trade_dir in ['long', 'short']
pairs, p = [], []
for file in tqdm(os.listdir(raw_path)):
if len(file) != 14:
continue
file = os.path.join(raw_path, file)
try:
pairs.append(pd.read_excel(file, sheet_name=f'{trade_dir}_pairs'))
p.append(pd.read_excel(file, sheet_name=f'{trade_dir}_performance'))
except:
print(f"read_raw_results: fail on {file}")
df_pairs = pd.concat(pairs, ignore_index=True)
df_p = | pd.concat(p, ignore_index=True) | pandas.concat |
from prometheus_client import Gauge
import pandas as pd
import datetime
from extra_metrics.compliance import ClientCompliance
from extra_metrics.logs import logger
device_checkin_days = Gauge('extra_metrics_devices_by_checkin_days',
'various interesting stats on a per device basis, days since checked, compliance status',
["days", ])
device_client_modelnumber = Gauge('extra_metrics_per_device_modelnum',
'provides a value of the model number per device',
["device_name"])
device_client_compliance = Gauge('extra_metrics_per_device_compliance',
'provides a value of the compliance state per device, used for device health graph',
["compliance"])
device_client_version = Gauge('extra_metrics_per_device_client_version',
'number of devices rolled up by client version',
["fw_client_version"])
device_client_platform = Gauge('extra_metrics_per_device_platform',
'number of devices rolled up by platform',
["platform"])
device_client_tracked = Gauge('extra_metrics_per_device_tracked',
'number of devices being tracked',
["tracked"])
device_client_locked = Gauge('extra_metrics_per_device_locked',
'number of devices locked',
["locked"])
class PerDeviceStatus:
def __init__(self, fw_query):
self.fw_query = fw_query
def _rollup_by_single_column_count_client_filewave_id(self, df, column_name):
return df.groupby([column_name], as_index=False)["Client_filewave_id"].count()
def _set_metric_pair(self, metric, item):
label_value = item[0]
total_count = item[1]
metric.labels(label_value).set(total_count)
return (label_value, total_count)
def collect_client_data(self, soft_patches):
Client_device_name = 0
Client_free_disk_space = 2
Client_filewave_id = 10
Client_last_check_in = 17
DesktopClient_filewave_model_number = 18
Client_total_disk_space = 24
OperatingSystem_name = 13
j = self.fw_query.get_client_info_j()
try:
assert j["fields"]
assert j["fields"][Client_device_name] == "Client_device_name", f"field {Client_device_name} is expected to be the Client's name"
assert j["fields"][Client_last_check_in] == "Client_last_check_in", f"field {Client_last_check_in} is expected to be the Client's last check in date/time"
assert j["fields"][Client_filewave_id] == "Client_filewave_id", f"field {Client_filewave_id} is expected to be the Client's filewave_id"
assert j["fields"][OperatingSystem_name] == "OperatingSystem_name", f"field {OperatingSystem_name} is supposed to be OperatingSystem_name"
buckets = [0, 0, 0, 0]
now = datetime.datetime.now()
df = | pd.DataFrame(j["values"], columns=j["fields"]) | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
| tm.assert_frame_equal(result, df) | pandas._testing.assert_frame_equal |
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from rcbm import fab
def test_calculate_fabric_heat_loss_coefficient():
"""Output is equivalent to DEAP 4.2.0 example A"""
floor_area = pd.Series([63])
roof_area = pd.Series([63])
wall_area = pd.Series([85.7])
window_area = pd.Series([29.6])
door_area = pd.Series([1.85])
floor_uvalue = pd.Series([0.14])
roof_uvalue = pd.Series([0.11])
wall_uvalue = pd.Series([0.13])
window_uvalue = pd.Series([0.87])
door_uvalue = pd.Series([1.5])
thermal_bridging_factor = pd.Series([0.05])
expected_output = pd.Series([68], dtype="int64")
output = fab.calculate_fabric_heat_loss_coefficient(
roof_area=roof_area,
roof_uvalue=roof_uvalue,
wall_area=wall_area,
wall_uvalue=wall_uvalue,
floor_area=floor_area,
floor_uvalue=floor_uvalue,
window_area=window_area,
window_uvalue=window_uvalue,
door_area=door_area,
door_uvalue=door_uvalue,
thermal_bridging_factor=thermal_bridging_factor,
)
rounded_output = output.round().astype("int64")
assert_series_equal(rounded_output, expected_output)
def test_calculate_heat_loss_parameter():
"""Output is equivalent to DEAP 4.2.0 example A"""
fabric_heat_loss_coefficient = pd.Series([0.5])
ventilation_heat_loss_coefficient = pd.Series([0.5])
total_floor_area = pd.Series([1])
expected_output = pd.Series([1], dtype="float64")
output = fab.calculate_heat_loss_parameter(
fabric_heat_loss_coefficient=fabric_heat_loss_coefficient,
ventilation_heat_loss_coefficient=ventilation_heat_loss_coefficient,
total_floor_area=total_floor_area,
)
assert_series_equal(output.round(2), expected_output)
@pytest.mark.parametrize("floor_area", [pd.Series([np.nan]), pd.Series([0])])
def test_calculate_heat_loss_parameter_raises_zerodivisionerror(floor_area):
empty_series = | pd.Series([np.nan]) | pandas.Series |
"""Module to perform QC on the xiRT performance."""
import numpy as np
import pandas as pd
from pyteomics import fasta, parser
from scipy.spatial import distance
def compute_composition_df(seq_df):
"""
Compute the composition matrix for all proteins.
Args:
seq_df: df, dataframe with sequences
Returns:
df, with the composition of the proteins
"""
# get composition table
df_seq_comp = pd.DataFrame(
list(seq_df["sequence"].apply(parser.amino_acid_composition).values)) * 1.0
# add column with 0s for amino acids that didnt occur in the protein fasta file
for i in parser.std_amino_acids:
if i not in df_seq_comp.columns:
df_seq_comp[i] = 0
df_seq_comp = df_seq_comp.fillna(0.0)
df_seq_comp.index = seq_df.index
return df_seq_comp
def get_nearest_neighbor_proteins(fasta_host, fasta_trap):
"""
Retrieve the nearest neighbors for all proteins in the host fasta.
Args:
fasta_host:
fasta_trap:
Returns:
df, dataframe with proteins for host and entrapment database.
"""
# make nearest neighbor thing
# ger protein table
df_prot_host = fasta2dataframe(fasta_host)
df_comp_host = compute_composition_df(df_prot_host)
df_peptides_host = digest_protein_df(df_prot_host)
df_prot_trap = fasta2dataframe(fasta_trap)
df_comp_trap = compute_composition_df(df_prot_trap)
df_peptides_trap = digest_protein_df(df_prot_trap)
# perform the filtering
df_comp_trap, df_prot_trap = filter_trap_fasta(df_prot_trap, df_comp_trap,
df_peptides_trap, df_peptides_host)
# get best human protein matching by euclidean distance
neighbor = []
distances = np.zeros(df_comp_host.shape[0])
for ii, row in enumerate(df_comp_host.iterrows()):
# compute the distance from the query (current row) to all other proteins in the
# trap database
ary = distance.cdist(df_comp_trap, pd.DataFrame(row[1]).transpose(), metric='euclidean')
# retrieve minimal disance entry here and use the index as neighbor and include
# it to the fasta df later
neighbor.append(df_comp_trap[ary == ary.min()].index.values[0])
distances[ii] = ary.min()
# print identifier for id mapping
# neighbors = [i.split("|")[1] for i in np.ravel(neighbor)]
fasta_df_entrapment = df_prot_trap.loc[neighbor]
# store seed-neighbor pairs
fasta_df_entrapment["host_seed"] = df_comp_host.index
final_fasta_df = pd.concat([df_prot_host, fasta_df_entrapment])
final_fasta_df["db_type"] = ["host"] * len(df_prot_host) + ["trap"] * len(fasta_df_entrapment)
return final_fasta_df
def filter_trap_fasta(df_prot_trap, df_comp_trap, df_peptides_trap, df_peptides_host):
"""
Remove proteins with peptides that also occur in the host database.
Args:
df_comp_trap: df, protein entries from the entrapment database (composition)
df_peptides_host: df, peptides from the host fasta
df_peptides_trap: df, peptides from the entrapment fasta
df_prot_trap: df, proteins from the entrapment database
Returns:
(df_comp_trap, df_prot_trap), returns a tuple of valid (unique) trapment ids.
"""
# make sure I/L witht he same mass doesnt mess with overlapping peptides
df_peptides_host["sequence"] = df_peptides_host["sequence"].str.replace("I", "L")
df_peptides_trap["sequence"] = df_peptides_trap["sequence"].str.replace("I", "L")
df_peptides_host = df_peptides_host.set_index("sequence")
df_peptides_trap = df_peptides_trap.set_index("sequence")
df_joined = df_peptides_trap.join(df_peptides_host, rsuffix="_host", lsuffix="_trap",
how="left")
blacklist_proteins_trap = df_joined.dropna(subset=["protein_host"])["protein_trap"].unique()
# drop proteins from dfs
df_prot_trap = df_prot_trap.drop(blacklist_proteins_trap)
df_comp_trap = df_comp_trap.drop(blacklist_proteins_trap)
return df_comp_trap, df_prot_trap
def fasta2dataframe(FASTA):
"""
Convert the entries in the FASTA file to a dataframe with ID, sequence and Type as column.
Parameters
FASTA : str
Location of the FASTA file..
Returns
dataframe
"""
# store proteins and ids here
unique_proteins = []
unique_ids = []
with open(FASTA, mode='rt') as ffile:
for description, sequence in fasta.FASTA(ffile):
unique_proteins.append(sequence)
unique_ids.append(description)
# convert to dataframe
df = | pd.DataFrame(unique_proteins) | pandas.DataFrame |
import sys
import pickle
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import nltk
from nltk.tokenize import word_tokenize, RegexpTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk import pos_tag
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
class NounCount(BaseEstimator, TransformerMixin):
"""
Input: Inherits from the sklean.base class
Creates: A transformer object that can be used to count the number of nouns in a corpus of documents
"""
def count_nouns(self, text):
list_of_noun_tags = ["NN", "NNP", "NNPS", "NNS"]
noun_count = 0
for word, tag in pos_tag(tokenize(text)):
if tag in list_of_noun_tags:
noun_count += 1
return noun_count
def fit(self, X, y=None):
return self
def transform(self, X):
text_transformed = pd.Series(X).apply(self.count_nouns)
return pd.DataFrame(text_transformed)
def load_data(database_filepath):
"""
Inputs: The database and table name.
Outputs: The data split into train and test data for the model.
"""
engine = create_engine(f'sqlite:///{database_filepath}')
df = | pd.read_sql_table("DisasterResponse", engine) | pandas.read_sql_table |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.