prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import sys
import pickle
import pandas as pd
import numpy as np
import word2vec as wv
from tqdm import tqdm
def get_new_dataframe_names(df_path):
file_name = df_path.rsplit('/')[-1]
return 'sentence_' + file_name
def create_sentence_df(df):
sentences = {}
indices = {}
classes = {}
verb_class = {}
noun_class = {}
verbs = {}
nouns = {}
unique_sents = df.narration.unique()
for i, sent in tqdm(enumerate(unique_sents), total=len(unique_sents)):
sentences[i] = sent
subset_df = df[df.narration.apply(lambda x: x == sent)]
indices[i] = subset_df.index[0]
classes[i] = subset_df.iloc[0].action_class
verb_class[i] = subset_df.iloc[0].verb_class
noun_class[i] = subset_df.iloc[0].all_noun_classes
verbs[i] = subset_df.iloc[0].verb
nouns[i] = subset_df.iloc[0].all_nouns
missing_classes = set(df.action_class.unique()) - set(classes.values())
i = len(indices)
for class_ in missing_classes:
sentences[i] = subset_df.iloc[0].narration
subset_df = df[df.action_class.apply(lambda x: x == class_)]
indices[i] = subset_df.index[0]
classes[i] = subset_df.iloc[0].action_class
verb_class[i] = subset_df.iloc[0].verb_class
noun_class[i] = subset_df.iloc[0].all_noun_classes
verbs[i] = subset_df.iloc[0].verb
nouns[i] = subset_df.iloc[0].all_nouns
i += 1
sentence_df = pd.DataFrame([sentences]).T
sentence_df.columns = ['sentence']
sentence_df['action_class'] = pd.Series(classes)
sentence_df['index'] = pd.Series(indices)
sentence_df['verb_class'] = pd.Series(verb_class)
sentence_df['noun_class'] = | pd.Series(noun_class) | pandas.Series |
import detailed_table
import pandas as pd
from locale import atof
import json
# KAP 19
COL_PROFIT_STOCKS = 'Aktien G/V'
COL_DIVIDENDS_STOCKS = 'Aktien Dividende'
COL_PROFIT_CFDS = 'CFD G/V'
COL_FEES_CFDS = 'CFD Gebühren'
# KAP 20
COL_PROFIT_ON_SALE_STOCKS = 'Aktien - Enthaltene Gewinne aus Aktienveräußerungen'
COL_PROFIT_ON_SALE_CFDS = 'CFD - Enthaltene Gewinne aus Aktienveräußerungen'
# KAP 22
COL_LOSS_ON_SALE_CFDS = 'CFD G/W - darin enthaltene Verluste aus Kapitalerträgen ohne Aktienveräußerung'
COL_FEES_ON_SALE_CFDS = 'CFD Gebühren - darin enthaltene Verluste aus Kapitalerträgen ohne Aktienveräußerung'
# KAP 23
COL_LOSS_ON_SALE_STOCKS = 'Aktien G/W - Enthaltene Verluste aus Aktienveräußerungen'
def roundTo2Decimals(dict):
result = {}
for key in dict:
result[key] = round(dict[key], 2)
return result
def calcKapSummary(detailedTable):
resultColumns = [COL_PROFIT_STOCKS, COL_DIVIDENDS_STOCKS, COL_PROFIT_CFDS, COL_FEES_CFDS,
COL_PROFIT_ON_SALE_STOCKS, COL_LOSS_ON_SALE_CFDS, COL_FEES_ON_SALE_CFDS,
COL_LOSS_ON_SALE_STOCKS]
resultTable = | pd.DataFrame(columns=resultColumns) | pandas.DataFrame |
import pandas as pd
import random
from src.func import tweet_utils
from src.func import regex
from src.func import labmtgen
from src.scripts.process_tweets import *
from labMTsimple.storyLab import *
def get_tweets_timestamp(park_user_tweets):
"""
Take a list of lists (tweets by user) and assigns a random control tweet. Returns a list of tweets.
Each item in return list has in-park tweet text, control tweet tet, park name, nimestamp.
Args:
park_user_tweets (list]): [description]
Returns:
[list]: [List of park tweets]
"""
park_tweet_list = []
for user, tweet_list in park_user_tweets.items():
tweet_count = len(tweet_list)
tweet_list_parks = [tweet for tweet in tweet_list
if not pd.isnull(tweet['ParkID'])]
park_tweet_count = len(tweet_list_parks)
not_park_count = tweet_count-park_tweet_count
if park_tweet_count > 0 and not_park_count > 0:
tweet_choice = random.choice(tweet_list_parks)
park_text = tweet_choice['pure_text']
control_text = tweet_choice['control_text']
tweet_time = tweet_choice['tweet_created_at']
park_name = tweet_choice['Park_Name']
park_tweet_list.append({'park_text': park_text,
'control_text': control_text,
'park_name': park_name,
'tweet_created_at': tweet_time})
return park_tweet_list
def get_time_control_text_park(park_user_tweets):
"""
Take a list of lists (tweets by user) and assigns a random control tweet based on timestamp. Returns a list of tweets.
Each item in return list has in-park tweet text, control tweet tet, park name, nimestamp.
Args:
park_user_tweets (list]): [description]
Returns:
[list]: [List of park tweets]
"""
park_tweet_list = []
for user, tweet_list in park_user_tweets.items():
tweet_count = len(tweet_list)
tweet_list_parks = [tweet for tweet in tweet_list
if not pd.isnull(tweet['ParkID'])]
park_tweet_count = len(tweet_list_parks)
not_park_count = tweet_count-park_tweet_count
if park_tweet_count > 0 and not_park_count > 0:
tweet_choice = random.choice(tweet_list_parks)
park_text = tweet_choice['pure_text']
control_text = tweet_choice['control_text']
park_name = tweet_choice['Park_Name']
park_id = tweet_choice['ParkID']
park_tweet_list.append({'park_id': park_id,
'park_name': park_name,
'park_text': park_text,
'control_text': control_text})
return park_tweet_list
def get_both_control_text(park_user_tweets):
"""
Take a list of lists (tweets by user) and assigns a random control tweet based on timestamp and a random control tweet from same user. Returns a list of tweets.
Each item in return list has in-park tweet text, control tweet tet, park name, nimestamp.
Args:
park_user_tweets (list]): [description]
Returns:
[list]: [List of park tweets]
"""
park_tweet_text = []
user_tweet_text = []
time_tweet_text = []
park_list = []
for user, tweet_list in park_user_tweets.items():
tweet_list_parks = [tweet for tweet in tweet_list
if not | pd.isnull(tweet['ParkID']) | pandas.isnull |
"""Class for creating a Parallel Pipeline."""
from iguanas.exceptions.exceptions import DataFrameSizeError, NoRulesError
from iguanas.pipeline._base_pipeline import _BasePipeline
from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType
from iguanas.utils.types import PandasDataFrame, PandasSeries, Dictionary
import iguanas.utils.utils as utils
from iguanas.rules import Rules
from iguanas.warnings import NoRulesWarning
from copy import deepcopy
from typing import List, Tuple, Union
import pandas as pd
import warnings
class ParallelPipeline(_BasePipeline):
"""
Generates a parallel pipeline, which is a set of steps which run
independently - their outputs are then concatenated and returned. Each step
should be an instantiated class with both `fit` and `transform` methods.
Parameters
----------
steps : List[Tuple[str, object]]
The steps to be applied as part of the pipeline. Each element of the
list corresponds to a single step. Each step should be a tuple of two
elements - the first element should be a string which refers to the
step; the second element should be the instantiated class which is run
as part of the step.
verbose : int, optional
Controls the verbosity - the higher, the more messages. >0 : gives
the overall progress of the training of the pipeline; >1 : shows the
current step being trained.
Attributes
----------
steps_ : List[Tuple[str, object]]
The steps corresponding to the fitted pipeline.
rule_names : List[str]
The names of the rules in the concatenated output.
rules : Rules
The Rules object containing the rules produced from fitting the
pipeline.
Examples
--------
>>> from iguanas.pipeline import ParallelPipeline
>>> from iguanas.rbs import RBSOptimiser, RBSPipeline
>>> from iguanas.rule_generation import RuleGeneratorDT, RuleGeneratorOpt
>>> from iguanas.metrics import FScore
>>> from sklearn.ensemble import RandomForestClassifier
>>> import pandas as pd
>>> X = pd.DataFrame({
... 'A': [1, 0, 1, 0],
... 'B': [1, 1, 1, 0]
... })
>>> y = pd.Series([
... 1, 0, 1, 0
... ])
>>> f1 = FScore(beta=1)
>>> rg_dt = RuleGeneratorDT(
... metric=f1.fit,
... n_total_conditions=2,
... tree_ensemble=RandomForestClassifier(random_state=0),
... rule_name_prefix='RuleGenDT'
... )
>>> rg_opt = RuleGeneratorOpt(
... metric=f1.fit,
... n_total_conditions=2,
... num_rules_keep=10,
... rule_name_prefix='RuleGenOpt'
... )
>>> pp = ParallelPipeline(
... steps=[
... ('rg_dt', rg_dt),
... ('rg_opt', rg_opt)
... ]
... )
>>> X_rules = pp.fit_transform(X=X, y=y)
>>> print(X_rules)
RuleGenDT_0 RuleGenDT_1 RuleGenDT_2 RuleGenOpt_0
0 1 1 1 1
1 0 0 1 0
2 1 1 1 1
3 0 0 0 0
>>> X_rules = pp.transform(X=X)
>>> print(X_rules)
RuleGenDT_0 RuleGenDT_1 RuleGenDT_2 RuleGenOpt_0
0 1 1 1 1
1 0 0 1 0
2 1 1 1 1
3 0 0 0 0
"""
def __init__(self,
steps: List[Tuple[str, object]],
verbose=0) -> None:
_BasePipeline.__init__(self, steps=steps, verbose=verbose)
self.rules = Rules()
def fit_transform(self,
X: Union[PandasDataFrameType, dict],
y: Union[PandasSeriesType, dict],
sample_weight=None) -> PandasDataFrameType:
"""
Independently runs the `fit_transform` method of each step in the
pipeline, then concatenates the output of each step column-wise.
Parameters
----------
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
utils.check_allowed_types(X, 'X', [PandasDataFrame, Dictionary])
utils.check_allowed_types(y, 'y', [PandasSeries, Dictionary])
if sample_weight is not None:
utils.check_allowed_types(
sample_weight, 'sample_weight', [PandasSeries, Dictionary])
self.steps_ = deepcopy(self.steps)
X_rules_list = []
rules_list = []
steps_ = utils.return_progress_ready_range(
verbose=self.verbose == 1, range=self.steps_
)
for step_tag, step in steps_:
if self.verbose > 1:
print(
f'--- Applying `fit_transform` method for step `{step_tag}` ---'
)
# Try applying fit_transform for `step`
try:
X_rules_list.append(
self._pipeline_fit_transform(
step_tag, step, X, y, sample_weight
)
)
rules_list.append(step.rules)
# If no rules generated/remain, raise warning and skip `step`
except (DataFrameSizeError, NoRulesError) as e:
warnings.warn(
message=f'No rules remain in step `{step_tag}` as it raised the following error: "{e}"',
category=NoRulesWarning
)
X_rules_list.append(pd.DataFrame())
rules_list.append(Rules())
X_rules = pd.concat(X_rules_list, axis=1)
self.rules = sum(rules_list)
self.rule_names = X_rules.columns.tolist()
return X_rules
def transform(self,
X: Union[PandasDataFrameType, dict]) -> PandasDataFrameType:
"""
Independently runs the `transform` method of each step in the pipeline,
then concatenates the output of each step column-wise. Note that before
using this method, you should first run the `fit_transform` method.
Parameters
----------
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
utils.check_allowed_types(X, 'X', [PandasDataFrame, Dictionary])
X_rules_list = []
for step_tag, step in self.steps_:
# Try applying transform for `step`
try:
X_rules_list.append(
self._pipeline_transform(
step_tag, step, X
)
)
# If no rules present, raise warning and skip `step`; else raise
# exception
except Exception as e:
if str(e) == '`rule_dicts` must be given' or str(e) == '`X` has been reduced to zero columns after the `sf` step in the pipeline.':
warnings.warn(
message=f'No rules present in step `{step_tag}` - `transform` method cannot be applied for this step.',
category=NoRulesWarning
)
X_rules_list.append( | pd.DataFrame() | pandas.DataFrame |
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = pd.Timestamp('2021-04-05') # Easter holiday starts
t13 = pd.Timestamp('2021-04-18') # Easter holiday ends
# ------
# WAVE 1
# ------
if t <= t1:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 < t < t1 + tau_days:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 + tau_days < t <= t1 + tau_days + l_days:
t = pd.Timestamp(t.date())
policy_old = all_contact(t)
policy_new = contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t1)
elif t1 + tau_days + l_days < t <= t2:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t2 < t <= t3:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t3 < t <= t4:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
# ------
# WAVE 2
# ------
elif t4 < t <= t5 + tau_days:
return contact_matrix_4prev(t, school=1)
elif t5 + tau_days < t <= t5 + tau_days + l_days:
policy_old = contact_matrix_4prev(t, school=1)
policy_new = contact_matrix_4prev(t, prev_schools, prev_work, prev_rest,
school=1)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t5)
elif t5 + tau_days + l_days < t <= t6:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t6 < t <= t7:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t7 < t <= t8:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t8 < t <= t9:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t9 < t <= t10:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t10 < t <= t11:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t11 < t <= t12:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t12 < t <= t13:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
else:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Define initial states
initial_states = {"S": initN, "E": np.ones(9)}
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------------
# Define necessary function to plot fit
# -------------------------------------
LL = conf_int/2
UL = 1-conf_int/2
def add_poisson(state_name, output, n_samples, n_draws_per_sample, UL=1-0.05*0.5, LL=0.05*0.5):
data = output[state_name].sum(dim="Nc").values
# Initialize vectors
vector = np.zeros((data.shape[1],n_draws_per_sample*n_samples))
# Loop over dimension draws
for n in range(data.shape[0]):
binomial_draw = np.random.poisson( np.expand_dims(data[n,:],axis=1),size = (data.shape[1],n_draws_per_sample))
vector[:,n*n_draws_per_sample:(n+1)*n_draws_per_sample] = binomial_draw
# Compute mean and median
mean = np.mean(vector,axis=1)
median = np.median(vector,axis=1)
# Compute quantiles
LL = np.quantile(vector, q = LL, axis = 1)
UL = np.quantile(vector, q = UL, axis = 1)
return mean, median, LL, UL
def plot_fit(ax, state_name, state_label, data_df, time, vector_mean, vector_LL, vector_UL, start_calibration='2020-03-15', end_calibration='2020-07-01' , end_sim='2020-09-01'):
ax.fill_between(pd.to_datetime(time), vector_LL, vector_UL,alpha=0.30, color = 'blue')
ax.plot(time, vector_mean,'--', color='blue', linewidth=1.5)
ax.scatter(data_df[start_calibration:end_calibration].index,data_df[state_name][start_calibration:end_calibration], color='black', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax.scatter(data_df[pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim].index,data_df[state_name][ | pd.to_datetime(end_calibration) | pandas.to_datetime |
import pandas as pd
import numpy as np
import lightgbm as lgb
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import KFold, RepeatedKFold
from scipy import sparse
# 显示所有列
| pd.set_option('display.max_columns', None) | pandas.set_option |
import pandas as pd
import os
import itertools
from collections import defaultdict
"""
Description: This script performs automatic filtering/aggregation of Qualys scans intended for analysis. Reads in a csv file and outputs a csv file.
"""
def main():
root = os.path.dirname(os.path.abspath(__file__))
in_file = "supp.csv"
out_file = "testsupp_results_v2.csv"
# Get first and last valid lines for entries in the CSV (automatically detect header/footer and skip)
def get_line_number(phrase, file_path):
with open(file_path, encoding="utf8") as f:
for i, line in enumerate(f, 1):
if phrase in line:
return i
top_line_number = get_line_number("host scanned", root + os.path.sep + in_file)
bottom_line_number = get_line_number("hosts not scanned", root + os.path.sep + in_file)
total_line_number = 0
# Get total # of lines to calculate how many to skip
with open(root + os.path.sep + in_file, encoding="utf8") as f:
total_line_number = sum(1 for _ in f)
skip_top_lines = top_line_number - 2 # 1 for header, 1 for blank line
skip_bottom_lines = total_line_number - bottom_line_number + 1
# DEBUG
# print(top_line_number, bottom_line_number, total_line_number)
# print(skip_top_lines, skip_bottom_lines)
"""
read_cols = ["IP", "DNS", "NetBIOS", "OS", "IP Status", "QID", "Title", "Type", "Severity", "Port", "Protocol", "FQDN", "SSL", "CVE ID", "Vendor Reference", "Bugtraq ID", "CVSS Base", \
"CVSS Temporal", "CVSS3 Base", "CVSS3 Temporal", "Threat", "Impact", "Solution", "Exploitability", "Associated Malware", "Results", "PCI Vuln", "Instance", "Category"]
"""
read_cols = ["IP", "QID", "Title", "Type", "Severity", "Port", "CVE ID", "Threat"]
req_cols = [i for i in read_cols if i not in ["IP", "Port", "CVE ID"]]
df = pd.read_csv(root + os.path.sep + in_file, index_col=False, skiprows=skip_top_lines, skipfooter=skip_bottom_lines, usecols=read_cols)
# DEBUG
# print(df.head())
# print(df.tail())
IP_DICT = defaultdict(set)
PORT_DICT = defaultdict(set)
CVE_DICT = defaultdict(set)
COUNT_DICT = defaultdict(int)
def add_to_dict(key, value, dict):
# if not pd.isna(value): # exclude NaNs (empty cells)
dict[key].add(value)
def generate_df_from_dict(dict, col_name):
# note: this disallows display of lists of considerable length
data = [[key, str(list(val))[1:-1]] for key, val in dict.items() if len(val) < 1337]
return pd.DataFrame(data, columns=["QID", col_name])
# Iterating over two columns, use `zip`
for x, y in zip(df["QID"], df["IP"]):
if not pd.isna(y): # exclude NaNs (empty cells)
add_to_dict(x, str(y), IP_DICT)
for x, y in zip(df["QID"], df["Port"]):
if not pd.isna(y): # exclude NaNs (empty cells)
add_to_dict(x, str(int(y)), PORT_DICT)
for x, y in zip(df["QID"], df["CVE ID"]):
if not | pd.isna(y) | pandas.isna |
# https://github.com/bokeh/bokeh/issues/5701
# https://groups.google.com/a/continuum.io/forum/#!searchin/bokeh/selected/bokeh/ft2U4nX4fVo/srBMki9FAQAJ
import pandas as pd
import sys
import io
import os.path as op
import MetaVisLauncherConfig as config
from bokeh.io import show, output_file, save
from bokeh.embed import file_html
from bokeh.resources import CDN
from LongformReader import _generateWideform
from bokeh.util.browser import view
from jinja2 import Environment, FileSystemLoader, select_autoescape
from metaVis import *
import numpy as np
import logging
logger = logging.getLogger('debug_logger')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('spam2.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
def error_check(data, ptid_md, measures_md):
data_colnames = list(data.columns.values)
data_rownames = list(data.index)
ptid_names = list(ptid_md.index)
measures_names = list(measures_md.index)
if (data.shape[1] != measures_md.shape[0]):
error = "<p>Error: Number of measurements in base dataset does not match the number of measurements in the measurement metadata.</br>"
error += " Base Data: " + str(data.shape[1]) + "</br>"
error += " Measures Metadata: " + str(measures_md.shape[0])
return error
if (data.shape[0] != ptid_md.shape[0]):
error = "<p>Error: Number of PtID's in base dataset does not match the number of PtID's in the PtID metadata. </br>"
error += " Base Data: " + str(data.shape[0]) + "</br>"
error += " PtID's Metadata: " + str(ptid_md.shape[0])
error += "</p>"
return error
if (ptid_names != data_rownames):
error = "<p>Error: PtID's in base dataset do not match PtID's in PtID metadata.</p>"
print(ptid_names)
print(data_rownames)
return error
if (measures_names != data_colnames):
error = "<p>Error: Measures in base dataset do not match measures in measurement metadata. </br>"
error += str(list(measures_names)) + "</br>"
error += str(list(data_colnames)) + "</p>"
return error
return None
# Generates the heatmap html at config.tmp_dir/config.output_file
def gen_heatmap_html(data=None, row_md=None, col_md=None, raw_data=None,
metric='euclidean', method='complete', transform='none',
standardize=True, impute=True, params=['', '', '']):
# TODO - Metavis currently does not work without imputing
# if (longform is not None and rx is not None):
# data, row_md, col_md = _generateWideform(longform, rx)
ret_val = {}
ret_val['error'] = error_check(data, row_md, col_md)
if ret_val['error'] is not None:
return ret_val
ptid_md = row_md
measures_md = col_md
if metric[-1] == "'":
metric = metric[2: -1]
method = method[2: -1]
logger.info(type(metric))
logger.info(metric)
# TODO - double check clusterData param handling
data, ptid_md, measures_md, rowDend, colDend = clusterData(data, ptid_md, measures_md,
metric=metric,
method=method,
standardize=standardize,
impute=impute)
sources = initSources(data, ptid_md, measures_md, raw_data, transform=transform, params=params)
cbDict = initCallbacks(sources)
html = generateLayout(sources, cbDict, rowDend, colDend)
with io.open(op.join(config.tmp_dir, config.output_file), mode='w', encoding='utf-8') as f:
f.write(html)
return ret_val
def _errorDisplay(data, row_md, col_md):
data_colnames = list(data.columns.values)
data_rownames = list(data.index)
rowmd_names = list(row_md.index)
colmd_names = list(col_md.index)
env = Environment(
loader=FileSystemLoader('templates'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template("error.html")
# html = template.render(tables=[data.to_html(), row_md.to_html(), col_md.to_html()], titles=['na', 'Base Data', 'Row Metadata', 'Column Metadata'])
count_err, count_loc, base_count, meta_count = _checkCounts(data, row_md, col_md)
html = ""
has_error = False
if count_err is True:
has_error = True
print(count_err)
message = "Error: There are mismatched counts between your metadata and your base data. "
if count_loc == 'col':
message += "Your base data has " + str(data.shape[1]) + " columns but your Column Metadata has " + str(col_md.shape[0]) + " entries."
data_col_df = pd.DataFrame({"Columns from data": data_colnames})
colmd_df = | pd.DataFrame({"Columns from Col Metadata": colmd_names}) | pandas.DataFrame |
try:
from TACT import logger
except ImportError:
pass
from TACT.readers.config import Config
from future.utils import itervalues, iteritems
import pandas as pd
import re
import sys
from string import printable
import numpy as np
class Data(Config):
"""Class to hold data, derivative features, and metadata for TACT analysis
of a single site. Inherits from Config class
Attributes
----------
inputdata : DataFrame
DataFrame with all anemometer, RSD data, and site atmospheric data
timestamps : DateTime array
array of timestamps for each row of inputdata
a : numpy array
Edges of Reference TI bins
lab_a : numpy array
Center values of Reference TI bins
RSD_alphaFlag : bool
whether there exists an additional upper of lower RSD height to
compute wind shear
Ht_1_RSD : int
Lower height to compute wind shear from RSD
Ht_1_RSD : int
Upper height to compute wind shear from RSD
"""
def get_inputdata(self):
"""Ingests and formats data from inputdata and config data
Parameters
----------
None
Uses object attributes
Returns
-------
Silent
Sets inputdata attribute to pandas array
"""
filename = self.input_filename
if str(filename).split(".")[-1] == "csv":
self.inputdata = pd.read_csv(self.input_filename)
elif str(filename).split(".")[-1] == "xlsx":
self.inputdata = | pd.read_excel(self.input_filename) | pandas.read_excel |
"""
Script for the calculation of A_uv, A_dv & A_g, and the python graph for the xPDFs at initial scale.
The parametrizations were taken from: <NAME>., & <NAME>. (2019). A new simple PDF parametrization: improved description
of the HERA data. The European Physical Journal Plus, 134(10), 531.
"""
import math # for the gamma function
import scipy.special as ss
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
#sns.set_style("white")
# constants
f_s = 0.4
gamma_s = f_s/(1-f_s)
# fitted parameters of the paper (NNLO + NLLx Hell 3.0)
B_uv, C_uv, E_uv, F_uv, G_uv = 0.76, 4.6, 2.6, 0.35, 0.049
B_g, C_g, F_g, G_g = -0.52, 4.5, 0.217, 0.0112
B_dv, C_dv = 0.99, 4.7
A_dbar, B_dbar, C_dbar, D_dbar, F_dbar = 0.14, -0.33, 24, 38, 0.071
A_ubar, B_ubar, C_ubar, D_ubar, F_ubar = A_dbar, B_dbar, 11, 18, F_dbar
"""
# NNLO global minimum parameters
B_uv, C_uv, E_uv, F_uv, G_uv = 0.83, 4.6, 1.9, 0.37, 0.058
B_g, C_g, F_g, G_g = -0.55, 4.5, 0.230, 0.0131
B_dv, C_dv = 0.98, 4.7
A_dbar, B_dbar, C_dbar, D_dbar, F_dbar = 0.13, -0.34, 24, 40, 0.072
A_ubar, B_ubar, C_ubar, D_ubar, F_ubar = A_dbar, B_dbar, 11, 20, F_dbar
"""
# integral equivalents
# for uv and dv
def I1 (B_i, C_i):
return math.gamma(B_i) * math.gamma(C_i+1) / math.gamma(B_i+C_i+1)
def I2 (B_i, C_i):
return I1 (B_i+2, C_i)
def I3 (B_i, C_i):
return I1 (B_i, C_i) * (ss.polygamma(0, B_i) - ss.polygamma(0, B_i+C_i+1))
def I4 (B_i, C_i):
return I1 (B_i, C_i) * ( (ss.polygamma(0, B_i) - ss.polygamma(0, B_i+C_i+1))**2 + (ss.polygamma(1, B_i) - ss.polygamma(1, B_i+C_i+1)) )
# for gl:
def I1_i (B_i, C_i):
return I1(B_i+1, C_i)
def I2_i (B_i, C_i):
return I3 (B_i+1, C_i) #(sympy.harmonic(B_i)-sympy.harmonic(B_i+C_i+1)) , domde ss.polygamma(0, B_i+1) = sympy.harmonic(B_i) cuando B_i > 0.
# Había un problema cuando B_i <0 (entonces wolfram esta mal?), por ende decido cambiar a polygamma function.
def I3_i (B_i, C_i):
return I4(B_i+1, C_i)
def I4_i (B_i, C_i):
return I2 (B_i, C_i)
def I5_i (B_i, C_i):
return I1(B_i+3, C_i)
# A_uv calculation
A_uv = 2 / (I1 (B_uv, C_uv) + E_uv*I2 (B_uv, C_uv) + F_uv*I3 (B_uv, C_uv) + G_uv*I4 (B_uv, C_uv))
print("A_uv: ", A_uv)
# A_dv calculation
A_dv = 1 / (I1(B_dv, C_dv))
print("A_dv: ", A_dv)
# Ag calculation
I1_uv, I1_dv, I1_g, I1_ubar, I1_dbar = I1_i (B_uv, C_uv), I1_i (B_dv, C_dv), I1_i (B_g, C_g), I1_i (B_ubar, C_ubar), I1_i (B_dbar, C_dbar)
I2_uv, I2_ubar, I2_dbar, I2_g = I2_i (B_uv, C_uv), I2_i (B_ubar, C_ubar), I2_i (B_dbar, C_dbar), I2_i (B_g, C_g)
I3_uv, I3_g = I3_i (B_uv, C_uv), I3_i (B_g, C_g)
I4_ubar, I4_dbar = I4_i (B_ubar, C_ubar), I4_i (B_dbar, C_dbar)
I5_uv = I5_i (B_uv, C_uv)
A_g = (1 - (A_uv*(I1_uv + E_uv*I5_uv + F_uv*I2_uv + G_uv*I3_uv) + A_dv*I1_dv + 2*A_ubar*(I1_ubar + D_ubar*I4_ubar + F_ubar*I2_ubar)
+ 2*(1+gamma_s)*A_dbar*(I1_dbar + D_dbar*I4_dbar + F_dbar*I2_dbar)) ) / (I1_g + F_g*I2_g + G_g*I3_g)
print("A_g: ", A_g)
# PDFs graphs at initial scale
x0 = 5.2427e-4#5.2427e-4 # cero del xgl
x = np.linspace(x0, 1, 10000)
# PDFs parametrization
def x_pdf(x, A, B, C , D, E, F, G):
return A*x**B * (1-x)**C * (1 + D*x + E*x**2 + F*np.log(x) + G*(np.log(x))**2)
# xpdf values in de x minimums
print("xuv = ", x_pdf(x0, A_uv, B_uv, C_uv, 0, E_uv, F_uv, G_uv))
print("xdv = ", x_pdf(x0, A_dv, B_dv, C_dv , 0, 0, 0, 0))
print("xgl = ", x_pdf(x0, A_g, B_g, C_g , 0, 0, F_g, G_g))
print("xubar = ", x_pdf(x0, A_ubar, B_ubar, C_ubar , 0, 0, F_ubar, 0))
print("xdbar = ", x_pdf(x0, A_dbar, B_dbar, C_dbar , 0, 0, F_dbar, 0))
xg = x_pdf(x, A_g, B_g, C_g , 0, 0, F_g, G_g)
x_uv, x_dv = x_pdf(x, A_uv, B_uv, C_uv , 0, E_uv, F_uv, G_uv), x_pdf(x, A_dv, B_dv, C_dv , 0, 0, 0, 0)
x_ubar, x_dbar = x_pdf(x, A_ubar, B_ubar, C_ubar , D_ubar, 0, F_ubar, 0), x_pdf(x, A_dbar, B_dbar, C_dbar , D_dbar, 0, F_dbar, 0)
x_qv = x_uv + x_dv
x_qs = x_ubar + x_dbar
x_Sigma = x_uv + x_dv + 2*x_ubar + 2*(1+gamma_s)*x_dbar
# converting to pd.DataFrames for graph in seaborn. And for convert easily into .csv
x_q = pd.DataFrame({'x$u_{v}$': x_uv, 'x$d_{v}$': x_dv, 'x$\\overline{u}$': x_ubar, 'x$\\overline{d}$': x_dbar}, index=x)
x_q_gl = | pd.DataFrame({'x$u_{v}$': x_uv, 'x$d_{v}$': x_dv, 'x$\\overline{u}$': x_ubar, 'x$\\overline{d}$': x_dbar, 'x$gl$': xg}, index=x) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 16:13:16 2021
@author: nicolasnavarre
"""
import pandas as pd
import math
data = 'data/'
def crop_yield(POM_data, fish_products, meat_products, feed_list, crop_proxie, diet_div_crop, diet_source_crop):
POM_crop_data = POM_data[~POM_data.Item.isin(fish_products)]
POM_crop_data = POM_crop_data[~POM_crop_data.group.isin(meat_products)]
FAO_Crop_yield = pd.read_csv(data+"FAOSTAT_crop Yield.csv")
FAO_Crop_yield.loc[FAO_Crop_yield.Area == "Cote d'Ivoire", "Area"] = "Côte d'Ivoire"
FAO_Crop_yield.loc[FAO_Crop_yield.Area == "United Kingdom of Great Britain and Northern Ireland", "Area"] = "United Kingdom"
FAO_Crop_yield["Value"] = FAO_Crop_yield["Value"] / 10000 / 1000 #convert to 1000 tons/ha
FAO_Crop_yield["Unit"] = "1000 tons/ha"
FAO_Crop_yield_5 = pd.read_csv(data+"FAOSTAT_crop Yield_5.csv")
FAO_Crop_yield_5.loc[FAO_Crop_yield_5.Area == "Cote d'Ivoire", "Area"] = "Côte d'Ivoire"
FAO_Crop_yield_5.loc[FAO_Crop_yield_5.Area == "United Kingdom of Great Britain and Northern Ireland", "Area"] = "United Kingdom"
FAO_Crop_yield_5["Value"] = FAO_Crop_yield_5["Value"] / 10000 / 1000 #convert to 1000 tons/ha
FAO_Crop_yield_5["Unit"] = "1000 tons/ha"
FAO_Crop_yield_5 = FAO_Crop_yield_5.groupby(['Area', 'Item']).mean().reset_index()
FAO_Crop_yield = pd.merge(FAO_Crop_yield, FAO_Crop_yield_5[['Area', 'Item', 'Value']], on = ["Area", "Item"], how = 'left')
FAO_Crop_yield['Value'] = FAO_Crop_yield['Value_y']
FAO_Crop_yield = FAO_Crop_yield.drop(columns = ["Value_x", "Value_y"])
if crop_proxie == True:
for i,j in zip (diet_div_crop, diet_source_crop):
fao_fix = FAO_Crop_yield.loc[FAO_Crop_yield.Area == j]
fao_fix['Area'] = i
FAO_Crop_yield = FAO_Crop_yield[FAO_Crop_yield.Area != i]
FAO_Crop_yield = pd.concat([FAO_Crop_yield,fao_fix])
FAO_Crops = pd.merge(FAO_Crop_yield, POM_data, on = ["Area", "Item"], how = "left")
POM_data.loc[POM_data.Area == 'China, mainland', 'REGION'] = 'CHN'
extra_nations = ['Puerto Rico', 'Palestine', 'Greenland', 'Falkland Islands (Malvinas)'\
'New Caledonia', 'China', 'China, Taiwan Province of' ]
FAO_Crops = FAO_Crops[~FAO_Crops['Area'].isin(extra_nations)]
FAO_Crops = FAO_Crops.reset_index()
yield_avg = pd.DataFrame()
yield_avg ["avg yield"] = FAO_Crops.groupby(["Item", "GROUP"]).apply(lambda x: x["Value"].mean())
yield_avg = yield_avg.reset_index(level = ["Item", "GROUP"])
yield_avg_feed = yield_avg[yield_avg.Item.isin(feed_list)]
yield_avg_crop = yield_avg[~yield_avg.Item.isin(feed_list)]
Feed_crops = POM_data[POM_data.Item.isin(feed_list)]
FAO_Crops = pd.concat([FAO_Crops, Feed_crops]).drop_duplicates(subset=['Area', 'Item'], keep='first').reset_index(drop=True)
FAO_Crops['Element'] = 'Yield'
FAO_Crops['Unit_x'] = '1000 tons/ha'
global_avg = yield_avg.groupby(['Item']).apply(lambda x: x['avg yield'].mean())
global_avg = pd.DataFrame(global_avg)
global_avg = global_avg.rename(columns = {0: "avg yield"})
#Fill in missing values with the nation's imagegroup average
for i in FAO_Crops.index:
if math.isnan(FAO_Crops['Value'][i]) == True:
for j in yield_avg.index:
if FAO_Crops['Item'][i] == yield_avg['Item'][j] and FAO_Crops['GROUP'][i] == yield_avg['GROUP'][j]:
FAO_Crops['Value'][i] = yield_avg['avg yield'][j]
for i in FAO_Crops.index:
if math.isnan(FAO_Crops['Value'][i]) == True:
if FAO_Crops['Item'][i] in feed_list:
if FAO_Crops['Item'][i] == 'grass':
FAO_Crops['Value'][i] = (6800/0.17)/1000/1000
else:
FAO_Crops['Value'][i] = global_avg["avg yield"][str(FAO_Crops['Item'][i])]
FAO_Crops = FAO_Crops.dropna(subset = ['for feed EAT'])
FAO_Crops['Value Org'] = FAO_Crops['Value']
global_avg = global_avg.reset_index(level = 'Item')
global_avg_feed = global_avg[global_avg['Item'].isin(feed_list)]
global_avg_crop = global_avg[~global_avg['Item'].isin(feed_list)]
return FAO_Crops, FAO_Crop_yield, yield_avg_feed, yield_avg_crop, global_avg_feed, global_avg_crop
def crop_area(FAO_Crops, yield_avg_feed, change_feed_yields,regional_change_feed,feed_standard,feed_regions,\
feed_countries,global_avg_feed,change_crop_yields,regional_change_crop,yield_avg_crop,crop_standard,\
cr_y_regions,cr_y_countries,global_avg_crop):
if change_feed_yields == True:
if regional_change_feed == True:
yield_avg_feeds = yield_avg_feed.loc[(yield_avg_feed.GROUP == feed_standard)]
for j in yield_avg_feeds.index:
for region_name in feed_regions:
for i in FAO_Crops.loc[(FAO_Crops.GROUP == region_name) & (FAO_Crops.Item == yield_avg_feed.Item[j])].index:
if yield_avg_feed['avg yield'][j] > FAO_Crops.Value[i]:
FAO_Crops.Value[i] = yield_avg_feed['avg yield'][j]
for c_name in feed_countries:
for i in FAO_Crops.loc[(FAO_Crops.Area == c_name) & (FAO_Crops.Item == yield_avg_feed.Item[j])].index:
if yield_avg_feed['avg yield'][j] > FAO_Crops.Value[i]:
FAO_Crops.Value[i] = yield_avg_feed['avg yield'][j]
else:
#global average
for j in global_avg_feed.index:
for region_name in feed_regions:
for i in FAO_Crops.loc[(FAO_Crops.GROUP == region_name) & (FAO_Crops.Item == global_avg_feed.Item[j])].index:
if global_avg_feed['avg yield'][j] > FAO_Crops.Value[i]:
FAO_Crops.Value[i] = global_avg_feed['avg yield'][j]
for c_name in feed_countries:
for i in FAO_Crops.loc[(FAO_Crops.Area == c_name) & (FAO_Crops.Item == global_avg_feed.Item[j])].index:
if global_avg_feed['avg yield'][j] > FAO_Crops.Value[i]:
FAO_Crops.Value[i] = global_avg_feed['avg yield'][j]
if change_crop_yields == True:
if regional_change_crop == True:
#imagegroup based averages
yield_avg_sub = yield_avg_crop.loc[(yield_avg_crop.GROUP == crop_standard)]
for j in yield_avg_sub.index:
for region_name in cr_y_regions:
for i in FAO_Crops.loc[(FAO_Crops.GROUP == region_name) & (FAO_Crops.Item == yield_avg_sub.Item[j])].index:
if yield_avg_sub['avg yield'][j] > FAO_Crops.Value[i]:
FAO_Crops.Value[i] = yield_avg_sub['avg yield'][j]
for c_name in cr_y_countries:
for i in FAO_Crops.loc[(FAO_Crops.Area == c_name) & (FAO_Crops.Item == yield_avg_sub.Item[j])].index:
if yield_avg_sub['avg yield'][j] > FAO_Crops.Value[i]:
FAO_Crops.Value[i] = yield_avg_sub['avg yield'][j]
else:
#global average
for j in global_avg_crop.index:
for region_name in cr_y_regions:
for i in FAO_Crops.loc[(FAO_Crops.GROUP == region_name) & (FAO_Crops.Item == global_avg_crop.Item[j])].index:
if global_avg_crop['avg yield'][j] > FAO_Crops.Value[i]:
FAO_Crops.Value[i] = global_avg_crop['avg yield'][j]
for c_name in cr_y_countries:
for i in FAO_Crops.loc[(FAO_Crops.Area == c_name) & (FAO_Crops.Item == global_avg_crop.Item[j])].index:
if global_avg_crop['avg yield'][j] > FAO_Crops.Value[i]:
FAO_Crops.Value[i] = global_avg_crop['avg yield'][j]
FAO_all_crops = FAO_Crops
FAO_all_crops['POM Org Area'] = FAO_all_crops['POM Org (with waste & feed)']/FAO_all_crops['Value Org']
FAO_all_crops['POM EAT Area'] = FAO_all_crops['POM EAT (with waste & feed)']/FAO_all_crops['Value']
average_yield_nat = FAO_all_crops.groupby(['Area']).apply(lambda x: x['EAT POM'].sum()/x['POM EAT Area'].sum())
average_yield_group_nat = FAO_all_crops.groupby(['Area', 'EAT_group']).apply(lambda x: x['EAT POM'].sum()/x['POM EAT Area'].sum())
Crop_Area_only = FAO_all_crops.groupby(["Area"]).apply(lambda x: x["POM Org Area"].sum())
Crop_Area_only = Crop_Area_only.reset_index(level = 'Area')
Crop_Area_only = Crop_Area_only.rename(columns = {0: "Org"})
Crop_group_only = FAO_all_crops.groupby(["EAT_group"]).apply(lambda x: x["POM Org Area"].sum())
Crop_group_only = Crop_group_only.reset_index(level = 'EAT_group')
Crop_group_only = Crop_group_only.rename(columns = {0: "Org"})
Crop_group_only_EAT = FAO_all_crops.groupby(["EAT_group"]).apply(lambda x: x["POM EAT Area"].sum())
Crop_group_only_EAT = Crop_group_only_EAT.reset_index(level = 'EAT_group')
Crop_group_only_EAT = Crop_group_only_EAT.rename(columns = {0: "EAT"})
FAO_all_crops_group = pd.merge(Crop_group_only, Crop_group_only_EAT, on = 'EAT_group')
Crop_Area_only_EAT = FAO_all_crops.groupby(["Area"]).apply(lambda x: x["POM EAT Area"].sum())
Crop_Area_only_EAT = Crop_Area_only_EAT.reset_index(level = 'Area')
Crop_Area_only_EAT = Crop_Area_only_EAT.rename(columns = {0: "EAT"})
FAO_all_crops_area = pd.merge(Crop_Area_only, Crop_Area_only_EAT, on = 'Area')
FAO_all_crops_area = FAO_all_crops_area.set_index('Area')
FAO_all_crops['feed Area EAT'] = FAO_all_crops['for feed EAT']/FAO_all_crops['Value']
Crops_group_area = FAO_all_crops.groupby(["Area", "EAT_group"]).apply(lambda x: x["POM EAT Area"].sum())
Crops_group_area = Crops_group_area.reset_index(level = 'Area')
Crops_group_area = Crops_group_area.rename(columns = {0:'EAT'})
Crops_group_area_feed = FAO_all_crops.groupby(["Area", "EAT_group"]).apply(lambda x: x["feed Area EAT"].sum())
Crops_group_area_feed = Crops_group_area_feed.reset_index(level = 'Area')
Crops_group_area_feed = Crops_group_area_feed.rename(columns = {0:'feed'})
Crops_group_area['% feed'] = Crops_group_area_feed['feed']/Crops_group_area['EAT']*100
national_area = FAO_all_crops.groupby(["Area"]).apply(lambda x: x['POM EAT Area'].sum())
Feed_crop_area= FAO_all_crops[FAO_all_crops['feed Area EAT'] > 0]
return FAO_all_crops_area, Crops_group_area, FAO_all_crops, FAO_all_crops_group
def feed_crop_area(FAO_Crops, POM_protein_feed, feed_list, FAO_Livestock, POM_data):
"""*** Now figure out crops for feed to break down Int vs Ext meat ***"""
FAO_Crops.drop(FAO_Crops[FAO_Crops['for feed EAT'] == 0].index, inplace = True)
FAO_Crops = FAO_Crops.reset_index(drop = True)
FAO_Crops = FAO_Crops.drop(columns = ["Domain Code", "Domain", "Area Code",\
"Element Code", "Year", "Year Code", "Flag", "Flag Description",
"Item Code_y"])
FAO_Crops = FAO_Crops.rename(columns = {"Item Code_x" : "Item Code"})
FAO_Crops["POM Org Area (Ha)"] = FAO_Crops["POM Org (with waste & feed)"] / FAO_Crops["Value Org"]
FAO_Crops["POM EAT Area (Ha)"] = FAO_Crops["POM EAT (with waste & feed)"] / FAO_Crops["Value"]
FAO_Crops['Yield Crops (1000tons/ha)'] = FAO_Crops ['Value']
FAO_Crops = FAO_Crops.loc[:,~FAO_Crops.columns.duplicated()]
POM_protein_feed ['Maize (Ha)'] = POM_protein_feed ['Maize']
POM_protein_feed ['Soybeans (Ha)'] = POM_protein_feed ['Soybeans']
POM_protein_feed ['Wheat (Ha)'] = POM_protein_feed ['Wheat']
POM_protein_feed ['Rapeseed (Ha)'] = POM_protein_feed ['Rapeseed']
POM_protein_feed ['Oats (Ha)'] = POM_protein_feed ['Oats']
POM_protein_feed ['Peas, dry (Ha)'] = POM_protein_feed ['Peas, dry']
POM_protein_feed ['Barley (Ha)'] = POM_protein_feed ['Barley']
feed_area_list = ['Maize (Ha)', 'Soybeans (Ha)', 'Wheat (Ha)', 'Rapeseed (Ha)', 'Oats (Ha)', 'Peas, dry (Ha)', 'Barley (Ha)']
for i,k in zip(feed_list, feed_area_list):
for j in FAO_Crops.index:
POM_protein_feed.loc[(POM_protein_feed['Area'] == FAO_Crops['Area'][j]) & (FAO_Crops['Item'][j] == i), [k]] /= FAO_Crops['Value'][j]
POM_direct_protein_int = FAO_Livestock[['Area', 'feed group', 'POM EAT Area (Ha) int', '% Protein', 'Item']]
POM_direct_protein_int = POM_direct_protein_int.groupby(['Area', 'feed group']).apply(lambda x: x['POM EAT Area (Ha) int'].sum())
POM_direct_protein_int = POM_direct_protein_int.reset_index(level = ['Area', 'feed group'])
POM_direct_protein_int = POM_direct_protein_int.rename(columns = {0: 'Ha int'})
POM_direct_1000TP_int = FAO_Livestock[['Area', 'feed group', '1000T P EAT int']]
POM_direct_1000TP_int = POM_direct_1000TP_int.groupby(['Area', 'feed group']).apply(lambda x: x['1000T P EAT int'].sum())
POM_direct_1000TP_int = POM_direct_1000TP_int.reset_index(level = ['Area', 'feed group'])
POM_direct_1000TP_int = POM_direct_1000TP_int.rename(columns = {0: '1000T P EAT int'})
POM_direct_protein_ext = FAO_Livestock[['Area', 'feed group', 'POM EAT Area (Ha) ext', '% Protein',]]
POM_direct_protein_ext = POM_direct_protein_ext.groupby(['Area', 'feed group']).apply(lambda x: x['POM EAT Area (Ha) ext'].sum())
POM_direct_protein_ext = POM_direct_protein_ext.reset_index(level = ['Area', 'feed group'])
POM_direct_protein_ext = POM_direct_protein_ext.rename(columns = {0: 'Ha ext'})
POM_direct_1000TP_ext = FAO_Livestock[['Area', 'feed group', '1000T P EAT ext']]
POM_direct_1000TP_ext = POM_direct_1000TP_ext.groupby(['Area', 'feed group']).apply(lambda x: x['1000T P EAT ext'].sum())
POM_direct_1000TP_ext = POM_direct_1000TP_ext.reset_index(level = ['Area', 'feed group'])
POM_direct_1000TP_ext = POM_direct_1000TP_ext.rename(columns = {0: '1000T P EAT ext'})
POM_direct_protein_int = pd.merge(POM_direct_protein_int, POM_direct_1000TP_int, on = ['Area', 'feed group'])
POM_direct_protein_ext = pd.merge(POM_direct_protein_ext, POM_direct_1000TP_ext, on = ['Area', 'feed group'])
POM_direct_protein = pd.merge(POM_direct_protein_int, POM_direct_protein_ext, on = ['Area', 'feed group'])
POM_protein_feed = POM_protein_feed.rename(columns = {'group' : 'feed group'})
POM_protein_feed = pd.merge(POM_protein_feed, POM_direct_protein, on = ["Area", "feed group"], how ='right')
POM_protein_feed = POM_protein_feed.rename(columns = {0 : 'direct Area'})
POM_protein_feed['Total int Ha'] = 0
for i in POM_protein_feed:
if i in feed_area_list or i == 'Ha int':
POM_protein_feed['Total int Ha'] += POM_protein_feed[i]
POM_protein_feed['Total Ha'] = POM_protein_feed['Total int Ha'] + POM_protein_feed['Ha ext']
POM_protein_feed['Total 1000T P'] = POM_protein_feed['1000T P EAT int'] + POM_protein_feed['1000T P EAT ext']
POM_protein_feed['Overall kgP/ha'] = (POM_protein_feed['Total 1000T P']*10**6) / POM_protein_feed['Total Ha']
POM_protein_feed['int kgP/ha'] = (POM_protein_feed['1000T P EAT int']*10**6) / POM_protein_feed['Total int Ha']
POM_protein_feed['ext kgP/ha'] = (POM_protein_feed['1000T P EAT ext']*10**6) / POM_protein_feed['Ha ext']
POM_pop = POM_data[['Area', 'GROUP', 'Population (2016), 1000person']]
POM_pop = POM_pop.drop_duplicates().reset_index(drop=True)
POM_protein_feed ['GROUP'] = POM_protein_feed ['feed group']
POM_protein_feed ['Population (2016), 1000person'] = POM_protein_feed ['feed group']
for i in POM_protein_feed.index:
for j in POM_pop.index:
if POM_protein_feed['Area'][i] == POM_pop['Area'][j]:
POM_protein_feed['GROUP'][i] = POM_pop['GROUP'][j]
POM_protein_feed ['Population (2016), 1000person'][i] = POM_pop['Population (2016), 1000person'][j]
POM_protein_feed = POM_protein_feed.fillna(0)
Weighted_final_area = POM_protein_feed.groupby(['feed group', 'Area']).apply(lambda x: x['Total Ha'].sum())
Weighted_final_area = Weighted_final_area.reset_index(level = ['feed group', 'Area'])
Weighted_final_area = Weighted_final_area.rename(columns = {0 : 'Total Ha'})
#Area
Weighted_final_area_int = POM_protein_feed.groupby(['feed group', 'Area']).apply(lambda x: x['Total int Ha'].sum())
Weighted_final_area_int = Weighted_final_area_int.reset_index(level = ['feed group', 'Area'])
Weighted_final_area_int = Weighted_final_area_int.rename(columns = {0 : 'Total int Ha'})
#Area
Weighted_final_area_ext = POM_protein_feed.groupby(['feed group', 'Area']).apply(lambda x: x['Ha ext'].sum())
Weighted_final_area_ext = Weighted_final_area_ext.reset_index(level = ['feed group', 'Area'])
Weighted_final_area_ext = Weighted_final_area_ext.rename(columns = {0 : 'Ha ext'})
#People
temp_weight_final = POM_protein_feed.groupby(['feed group', 'Area']).apply(lambda x: x['Population (2016), 1000person'].sum())
temp_weight_final = temp_weight_final.reset_index(level = ['feed group', 'Area'])
temp_weight_final = temp_weight_final.rename(columns = {0: 'pop (1000p)' })
#Protein
protein_weight_final = POM_protein_feed.groupby(['feed group', 'Area']).apply(lambda x: x['Total 1000T P'].sum())
protein_weight_final = protein_weight_final.reset_index(level = ['feed group', 'Area'])
protein_weight_final = protein_weight_final.rename(columns = {0 : 'Total 1000T P'})
#Protein int
protein_weight_final_int = POM_protein_feed.groupby(['feed group', 'Area']).apply(lambda x: x['1000T P EAT int'].sum())
protein_weight_final_int = protein_weight_final_int.reset_index(level = ['feed group', 'Area'])
protein_weight_final_int = protein_weight_final_int.rename(columns = {0 : '1000T P int'})
#Prtoein ext
protein_weight_final_ext = POM_protein_feed.groupby(['feed group', 'Area']).apply(lambda x: x['1000T P EAT ext'].sum())
protein_weight_final_ext = protein_weight_final_ext.reset_index(level = ['feed group', 'Area'])
protein_weight_final_ext = protein_weight_final_ext.rename(columns = {0 : '1000T P ext'})
Weighted_final = pd.merge(Weighted_final_area, temp_weight_final, on = ['feed group', 'Area'])
Weighted_final = pd.merge(Weighted_final, Weighted_final_area_int, on = ['feed group', 'Area'])
Weighted_final = pd.merge(Weighted_final, Weighted_final_area_ext, on = ['feed group', 'Area'])
Weighted_final = pd.merge(Weighted_final, protein_weight_final, on = ['feed group', 'Area'])
Weighted_final = pd.merge(Weighted_final, protein_weight_final_int, on = ['feed group', 'Area'])
Weighted_final = pd.merge(Weighted_final, protein_weight_final_ext, on = ['feed group', 'Area'])
Weighted_final = Weighted_final[Weighted_final['Total 1000T P'] > 0]
Weighted_final ['Total kgP/ha'] = Weighted_final['Total 1000T P']*10**6/Weighted_final['Total Ha']
Weighted_final ['Total kgP/ha/p'] = Weighted_final['Total 1000T P']*10**6/(Weighted_final['pop (1000p)']*10**3)/Weighted_final['Total Ha']
Weighted_final ['Total int kgP/ha/p'] = Weighted_final['1000T P int']*10**6/(Weighted_final['pop (1000p)']*10**3)/Weighted_final['Total int Ha']
Weighted_final ['Total ext kgP/ha/p'] = Weighted_final['1000T P ext']*10**6/(Weighted_final['pop (1000p)']*10**3)/Weighted_final['Ha ext']
Weighted_final ['Total kgP/ha'] = Weighted_final['Total 1000T P']*10**6/Weighted_final['Total Ha']
Weighted_final ['Total int kgP/ha'] = Weighted_final['1000T P int']*10**6/Weighted_final['Total int Ha']
Weighted_final ['Total ext kgP/ha'] = Weighted_final['1000T P ext']*10**6/Weighted_final['Ha ext']
Weighted_final ['1000T P/p'] = Weighted_final['Total 1000T P']*10**6/(Weighted_final['pop (1000p)']*10**3)
Weighted_final = Weighted_final.fillna(0)
#Weighted_final = Weighted_final[Weighted_final['GROUP'] != 'Other']
Weighted_final = pd.merge(Weighted_final, POM_protein_feed[['Area','GROUP']], on = 'Area', how = 'left')
Weighted_final = Weighted_final.drop_duplicates()
Weighted_group_int = Weighted_final.groupby(['GROUP']).apply(lambda x: (x['1000T P int'].sum()*10**6)/x['Total int Ha'].sum())#/(x['pop (1000p)'].sum()*1000))
Weighted_group_int = Weighted_group_int.reset_index(level = 'GROUP')
Weighted_group_int = Weighted_group_int.rename(columns = {0: 'kg/ha int'})
Weighted_group_ext = Weighted_final.groupby(['GROUP']).apply(lambda x: (x['1000T P ext'].sum()*10**6)/x['Ha ext'].sum())#/(x['pop (1000p)'].sum()*1000))
Weighted_group_ext = Weighted_group_ext.reset_index(level = 'GROUP')
Weighted_group_ext = Weighted_group_ext.rename(columns = {0: 'kg/ha ext'})
Weighted_group_tot = Weighted_final.groupby(['GROUP']).apply(lambda x: (x['Total 1000T P'].sum()*10**6)/x['Total Ha'].sum())#/(x['pop (1000p)'].sum()*1000))
Weighted_group_tot = Weighted_group_tot.reset_index(level = 'GROUP')
Weighted_group_tot = Weighted_group_tot.rename(columns = {0: 'kg/ha tot'})
Weighted_group_tot = pd.merge(Weighted_group_tot, Weighted_group_int, on = 'GROUP')
Weighted_group_tot = pd.merge(Weighted_group_tot, Weighted_group_ext, on = 'GROUP')
#Weighted_group['kg/ha ext'] = Weighted_final.groupby(['GROUP']).apply(lambda x: (x['1000T P ext'].sum()*10**6)/x['Ha ext'].sum())
#Weighted_group['kg/ha tot'] = Weighted_final.groupby(['GROUP']).apply(lambda x: (x['Total 1000T P'].sum()*10**6)/x['Total Ha'].sum())
Weighted_national_int = Weighted_final.groupby(['Area']).apply(lambda x: (x['1000T P int'].sum()*10**6)/x['Total int Ha'].sum())#/(x['pop (1000p)'].sum()*1000))
Weighted_national_int = Weighted_national_int.reset_index(level = ['Area'])
Weighted_national_int = Weighted_national_int.rename(columns = {0: 'kg/ha int'})
Weighted_national_ext = Weighted_final.groupby(['Area']).apply(lambda x: (x['1000T P ext'].sum()*10**6)/x['Ha ext'].sum())#/(x['pop (1000p)'].sum()*1000))
Weighted_national_ext = Weighted_national_ext.reset_index(level = ['Area'])
Weighted_national_ext = Weighted_national_ext.rename(columns = {0: 'kg/ha ext'})
Weighted_national_tot = Weighted_final.groupby(['Area']).apply(lambda x: (x['Total 1000T P'].sum()*10**6)/x['Total Ha'].sum())#/(x['pop (1000p)'].sum()*1000))
Weighted_national_tot = Weighted_national_tot.reset_index(level = ['Area'])
Weighted_national_tot = Weighted_national_tot.rename(columns = {0: 'kg/ha tot'})
Weighted_national_tot = pd.merge(Weighted_national_tot, Weighted_national_int, on = 'Area')
Weighted_national_tot = pd.merge(Weighted_national_tot, Weighted_national_ext, on = 'Area')
Weighted_item_int = Weighted_final.groupby(["Area"]).apply(lambda x: x["1000T P int"]/x["1000T P int"].sum())
Weighted_item_int = Weighted_item_int.reset_index(level = ['Area'])
Weighted_item_int = Weighted_item_int.rename(columns = {'1000T P int': '% of int'})
Weighted_item_ext = Weighted_final.groupby(["Area"]).apply(lambda x: x["1000T P ext"]/x["1000T P ext"].sum())
Weighted_item_ext = Weighted_item_ext.reset_index(level = ['Area'])
Weighted_item_ext = Weighted_item_ext.rename(columns = {'1000T P ext': '% of ext'})
Weighted_item_tot = Weighted_final.groupby(["Area"]).apply(lambda x: x["Total 1000T P"]/x["Total 1000T P"].sum())
Weighted_item_tot = Weighted_item_tot.reset_index(level = ['Area'])
Weighted_item_tot = Weighted_item_tot.rename(columns = {'Total 1000T P': '% of Total'})
Weighted_item_tot = pd.merge(Weighted_item_tot, Weighted_item_int['% of int'], left_index = True, right_index = True)
Weighted_item_tot = | pd.merge(Weighted_item_tot, Weighted_item_ext['% of ext'], left_index = True, right_index = True) | pandas.merge |
'''
__author__=<NAME>
MIT License
Copyright (c) 2020 crewml
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import pandas as pd
import itertools
import datetime
from crewml.common import DATA_DIR
import logging
import traceback
class FlightCategorizer:
'''
List of Delta Flight Attantant (FA) bases. Generally base is set
of a city which may have more than one airport
in it. E.g. NYC base has JFK and LGA airports
0 2020_feb_classify_b2b.csv,
1 2020_feb_classify_b2b_missing.csv,
2 2020_feb_classify_ b2nb_nb2b.csv,
3 2020_feb_classify_b2nb_nb2b_missing.csv,
4 2020_feb_classify_nb2nb.csv
'''
def __init__(self, fa_bases, fa_non_bases, clean_output, classify_files):
self.logger = logging.getLogger(__name__)
self.fa_bases = fa_bases.split(",")
self.fa_non_bases = fa_non_bases
self.clean_output = clean_output
self.classify_files = classify_files
self.flights_df = None
def process(self):
try:
# Read flights data
self.flights_df = pd.read_csv(DATA_DIR+self.clean_output)
self.logger.info("flight data read from:", self.clean_output)
del self.flights_df['CRS_DEP_TIME']
del self.flights_df['CRS_ARR_TIME']
del self.flights_df['ORIGIN_TZ']
del self.flights_df['DEST_TZ']
del self.flights_df['MKT_UNIQUE_CARRIER']
origin_dest = list(
zip(self.flights_df.ORIGIN, self.flights_df.DEST))
# use set to remove duplicate flight pairs
b2b = [x for x in origin_dest if x[0]
in self.fa_bases and x[1] in self.fa_bases]
b2b = list(set(b2b))
nb2nb = [x for x in origin_dest if x[0]
in self.fa_non_bases and x[1] in self.fa_non_bases]
nb2nb = list(set(nb2nb))
b2nb = [x for x in origin_dest if x[0]
in self.fa_bases and x[1] in self.fa_non_bases]
b2nb = list(set(b2nb))
nb2b = [x for x in origin_dest if x[0]
in self.fa_non_bases and x[1] in self.fa_bases]
nb2b = list(set(nb2b))
# departure from nonbase to arrival nonbase flights
nb2nb_df = self.group_flights(nb2nb)
nb2nb_df.to_csv(DATA_DIR+self.classify_files[4])
self.logger.info("len of nb2nb_df=", len(nb2nb_df))
# departure base to arrival base flight pairs
b2b_df = self.group_flights(b2b)
b2b_fl_pair_df, df1 = self.assemble_B2B_flights(b2b_df)
b2b_fl_pair_df = self.calculateDuty(b2b_fl_pair_df)
b2b_fl_pair_df.to_csv(DATA_DIR+self.classify_files[0])
df1.to_csv(DATA_DIR+self.classify_files[1])
self.logger.info("b2b Total=", len(b2b_df))
self.logger.info("len of file %s:%s",
self.classify_files[0], len(b2b_fl_pair_df))
self.logger.info("len of file %s:%s",
self.classify_files[1], len(df1))
self.logger.info("difference between %s and %s=%s",
self.classify_files[0], self.classify_files[1],
len(b2b_df) -
(len(df1)+len(b2b_fl_pair_df)))
# departure base to arrival non-base flight
b2nb_df = self.group_flights(b2nb)
# departure non-base to arrival base flight
nb2b_df = self.group_flights(nb2b)
b2nb_nb2b_pair_df, df2 = self.assemble_B2NB_NB2B_flights(
b2nb_df, nb2b_df)
b2nb_nb2b_pair_df = self.calculateDuty(b2nb_nb2b_pair_df)
# df=self.calculateDuty(df2)
b2nb_nb2b_pair_df.to_csv(DATA_DIR+self.classify_files[2])
df2.to_csv(DATA_DIR+self.classify_files[3])
self.logger.info("len of %s=%s", self.classify_files[2],
len(b2nb_nb2b_pair_df))
self.logger.info("len of %s=%s", self.classify_files[3],
len(df2))
self.logger.info("total=", len(
b2b_fl_pair_df)+len(df1)+len(nb2nb_df) +
len(b2nb_nb2b_pair_df)+len(df2))
except Exception as e:
self.logger.error(traceback.format_exc())
raise
'''
The fl_pairs contain all combinations of tuple like ('CVG', 'MSP'),
('EWR', 'DTW'), etc for B2B flights. Similiarly it will have for
NB2NB flights. This method extract B2B, B2NB, NB2B, NB2NB flights from the
flight list based on fl_pairs
'''
def group_flights(self, fl_pairs):
df = pd.DataFrame()
for index, tuple in enumerate(fl_pairs):
org_airport = tuple[0]
dest_airport = tuple[1]
df = df.append(self.flights_df[(self.flights_df['ORIGIN'].isin(
[org_airport])) & (self.flights_df['DEST'].isin([dest_airport]))])
df.drop(df.filter(regex="Unname"), axis=1, inplace=True)
df = df.sort_values('ORIGIN_UTC')
df.reset_index(drop=True, inplace=True)
return df
'''
This method creates fligh pairs that departs from a base airport
and arrives to a base airport. The
pairs will be combined to create a Duty. b2b_df contains all B2B flights
'''
def assemble_B2B_flights(self, b2b_df):
b2b_df['ORIGIN_UTC'] = pd.to_datetime(b2b_df['ORIGIN_UTC'])
b2b_df["DEST_UTC"] = pd.to_datetime(b2b_df['DEST_UTC'])
finished_pair = []
aiport_pair = list(itertools.permutations(self.fa_bases, 2))
final_df = pd.DataFrame()
missing_df = pd.DataFrame()
for index, tuple in enumerate(aiport_pair):
org_airport = tuple[0]
dest_airport = tuple[1]
if (org_airport, dest_airport) in finished_pair:
continue
org_df = b2b_df[(b2b_df['ORIGIN'].isin([org_airport])) & (
b2b_df['DEST'].isin([dest_airport]))]
org_df.drop(org_df.filter(regex="Unname"), axis=1, inplace=True)
org_df = org_df.sort_values('ORIGIN_UTC')
org_df.reset_index(drop=True, inplace=True)
dest_df = b2b_df[(b2b_df['ORIGIN'].isin([dest_airport])) & (
b2b_df['DEST'].isin([org_airport]))]
dest_df = dest_df.drop(dest_df.filter(regex="Unname"), axis=1)
dest_df = dest_df.sort_values('ORIGIN_UTC')
dest_df.reset_index(drop=True, inplace=True)
if (len(org_df) != 0) and (len(dest_df) != 0):
# org_df contains all same origin flights like ATL and
# dest_df contains all same dest flights
# like MSP. assemble_B2NB_NB2B_flights function find the
# best flight to match the org and dest
d1, d2 = self.assemble_B2NB_NB2B_flights(org_df, dest_df)
final_df = final_df.append(d1)
missing_df = missing_df.append(d2)
else:
missing_df = missing_df.append(org_df)
missing_df = missing_df.append(dest_df)
finished_pair.append((dest_airport, org_airport))
# final_df contains flight pairs that starts and ends in same base
# missing_df contains flight thta starts in a base couldn't be paired
return final_df, missing_df
'''
This method creates flight pairs for B2NB and NB2B.These two
pairs will be combined to create a Duty.
B2NB - Flights departs from Base airport and arrives to non base airport
NB2B - Flights dpearts from non base airport and arrives to base airport
'''
def assemble_B2NB_NB2B_flights(self, df1, df2):
total = len(df1)+len(df2)
print("toal entering df1=", len(df1),
"len(df2)=", len(df2), "total=", total)
final_duty_df = pd.DataFrame()
missing_df = pd.DataFrame()
df1['ORIGIN_UTC'] = pd.to_datetime(df1['ORIGIN_UTC'])
df1["DEST_UTC"] = pd.to_datetime(df1['DEST_UTC'])
df2['ORIGIN_UTC'] = pd.to_datetime(df2['ORIGIN_UTC'])
df2["DEST_UTC"] = pd.to_datetime(df2['DEST_UTC'])
df1 = df1.sort_values('ORIGIN_UTC')
df2 = df2.sort_values('ORIGIN_UTC')
for index, first_fl_pair in df1.iterrows():
print("len of final_duty_df=", len(final_duty_df))
org_airport = first_fl_pair["ORIGIN"]
dest_airport = first_fl_pair["DEST"]
# Collect all the matching fligh pairs for the second pair
matching_fl_pairs = df2[(df2.ORIGIN == dest_airport)
& (df2.DEST == org_airport)]
total = len(matching_fl_pairs)
matching_fl_pairs.reset_index(drop=True, inplace=True)
# for each one of first_fl_pair loop through matching_fl_pairs
# to find a matching matching flight pair if second pair flgiht
# ORIGIN_UTC > first_fl_pair DEST_UTC+40 min
i = 0
temp1 = ""
while(i < total):
if matching_fl_pairs.iloc[i].ORIGIN_UTC > first_fl_pair['DEST_UTC'] + \
datetime.timedelta(minutes=+45):
temp1 = matching_fl_pairs.iloc[i]
break
else:
i += 1
continue
if(len(temp1) > 0):
del_id = temp1.FL_ID
final_duty_df = final_duty_df.append(first_fl_pair)
final_duty_df = final_duty_df.append(temp1)
df2 = df2[df2.FL_ID != del_id]
else:
missing_df = missing_df.append(first_fl_pair)
missing_df = missing_df.append(df2)
return final_duty_df, missing_df
'''
Duty Report time starts 45 minutes before flight departure time
Duty Release time starts 45 minuts after flight arrival time
'''
def calculateDuty(self, df):
if(len(df) == 0):
return
df['DUTY_REP_TM_UTC'] = ''
df['DUTY_REL_TM_UTC'] = ''
dtyRepTmUTC = []
dtyRelTmUTC = []
df['ORIGIN_UTC'] = pd.to_datetime(df['ORIGIN_UTC'])
df["DEST_UTC"] = pd.to_datetime(df['DEST_UTC'])
first = True
for orgUTC, destUTC in zip(df.ORIGIN_UTC, df.DEST_UTC):
if first:
temp1 = orgUTC+datetime.timedelta(minutes=-45)
dtyRepTmUTC.append(temp1)
first = False
else:
dtyRepTmUTC.append(temp1)
dtyRelTmUTC.append(destUTC+datetime.timedelta(minutes=+45))
dtyRelTmUTC.append(destUTC+datetime.timedelta(minutes=+45))
first = True
if len(dtyRepTmUTC) != len(dtyRelTmUTC):
dtyRepTmUTC.pop()
dtyRepTmUTC.append("")
dtyRelTmUTC.append("")
df["DUTY_REP_TM_UTC"] = dtyRepTmUTC
df["DUTY_REL_TM_UTC"] = dtyRelTmUTC
df.reset_index(drop=True, inplace=True)
return df
# return calculate_dutyNo(df)
'''
If the total duty time (dtyRepTmUTC-dtyRelTmUTC) for the flight
pair is greater than 6 hours
then those two flights in the pair will get seperate dutyId.
If not it will get same dutyId
'''
def calculate_dutyNo(self, df):
df.reset_index(drop=True, inplace=True)
df['DTY_REP_TM_UTC'] = | pd.to_datetime(df['DTY_REP_TM_UTC'], utc=True) | pandas.to_datetime |
from flask import Flask, session, jsonify, request
import pandas as pd
import numpy as np
import pickle
import os
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import json
import glob
#################Load config.json and get path variables
with open('config.json','r') as f:
config = json.load(f)
model_path = os.path.join(config['output_model_path'])
test_data_path = os.path.join(config['test_data_path'])
#################Function for model scoring
def score_model():
#this function should take a trained model, load test data, and calculate an F1 score for the model relative to the test data
#it should write the result to the latestscore.txt file
all_files = glob.glob(test_data_path + "/*.csv")
df_from_each_file = (pd.read_csv(f) for f in all_files)
test_df = | pd.concat(df_from_each_file, ignore_index=True) | pandas.concat |
from dataExtractor import reviewToList
from dataExtractor200 import dataExtractor200
import numpy as np
import random
"""IMPORTING FILES"""
reviewList_p = dataExtractor200("positive.review")
reviewList_n = dataExtractor200("negative.review")
X_pos_test = reviewList_p[1055:]
X_neg_test = reviewList_n[665:]
"""Y_train and Y_test"""
Y_train = [] # 0 and 1
Y_test = []
for item in range(1720):
if(item<=665):
Y_train.append(0)
else:
Y_train.append(1)
#random.shuffle(Y_train)
print("Y_train shape :" +str(len(Y_train)))
for item in range(431):
if(item<=166):
Y_test.append(0)
else:
Y_test.append(1)
#random.shuffle(Y_test)
print("Y_test shape: "+str(len(Y_test)))
print(Y_test)
"""X_Train and X_test"""
X_train =[]
pos_index = 0
neg_index =0
for item in range(len(Y_train)):
if(Y_train[item]==0):
X_train.append(reviewList_n[item-neg_index])
pos_index +=1
elif(Y_train[item]==1):
X_train.append(reviewList_p[item-pos_index])
neg_index +=1
print("X_train shape: "+str(len(X_train)))
X_test =[]
X_neg_index = 0
X_pos_index =0
for item in range(len(Y_test)):
if(Y_test[item]==0):
X_test.append(X_neg_test[X_neg_index])
X_neg_index +=1
elif(Y_test[item]==1):
X_test.append(X_pos_test[X_pos_index])
X_pos_index +=1
print("Test Y and X--"+str(Y_test[0])+" : "+X_test[0])
print("Train Y and X--"+str(Y_train[0])+" : "+X_train[0])
print("X_test shape: "+str(len(X_test)))
if(len(X_test)!=len(Y_test) and len(X_train)!=len(Y_train)):
print("X and Y shapes does not exists!")
else:
print("Data hazırlama tamam \n --Data temizleme giriliyor.")
#----------------------------------------------------------------------------------
"""DATA CLEANING"""
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.feature_extraction._stop_words import ENGLISH_STOP_WORDS
import string
import re
tokenizer = RegexpTokenizer(r'\w+') # a-zA-Z0-9
en_stopwords = set(stopwords.words('english'))
en_stopwordsv2 = set(en_stopwords).union(set(ENGLISH_STOP_WORDS))
ps = PorterStemmer()
def getCleanedText(text):
words = re.sub(r"[^A-Za-z\-]", " ",text).lower().split()
tokens = [w for w in words if not w in en_stopwordsv2]
lemmatizer = WordNetLemmatizer()
cleaned_text = " ".join([lemmatizer.lemmatize(token) for token in tokens])
return cleaned_text
X_train_clean = [getCleanedText(i) for i in X_train]
print(X_train_clean[0])
X_test_clean = [getCleanedText(i) for i in X_test]
print("Data temizleme tamam \n --Vektörize giriliyor.")
"""VECTORIZATION"""
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(ngram_range=(1,2),
max_df =0.5,
min_df=10)
# N-gram olarak temizlenen metin vektörize edildi. Ve diziye aktarıldı.
X_vec = cv.fit_transform(X_train_clean).toarray() # eğitim verisi
X_test_vect = cv.transform(X_test_clean).toarray()
print("Data vektörize tamam \n --Sonuçlar gösterilecek.")
print("Makine öğrenme algoritması çalıştırılıyor.")
from sklearn.naive_bayes import MultinomialNB
from sklearn import model_selection, svm
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
mn = MultinomialNB()
mn.fit(X_vec,Y_train) # X_vec = Metinler, / Y_train = 0,1 lerden oluşan liste
print(X_vec.shape)
print(X_test_vect.shape)
Y_test_pred = mn.predict(X_test_vect)
print(Y_test_pred)
print("Naive bayes accuracy score : ",accuracy_score(Y_test_pred,Y_test)*100)
print(classification_report(Y_test,Y_test_pred))
cnf_matrix = confusion_matrix(Y_test,Y_test_pred)
import seaborn as sns
import pandas as pd
from matplotlib import pyplot as plt
labels = [0, 1]
fig, ax = plt.subplots()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels)
plt.yticks(tick_marks, labels)
sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="YlGnBu", fmt='g')
ax.xaxis.set_label_position("bottom")
plt.title('Confusion matrix')
plt.ylabel('Actual Value')
plt.xlabel('Predicted')
plt.show()
print("--------------------------------------------")
print("SVM çalışıyor.")
SVM = svm.SVC(C=0.6995,kernel='linear',gamma='auto')
SVM.fit(X_vec,Y_train)
predict_SVM = SVM.predict(X_test_vect)
print("SVM bitti.")
print(predict_SVM)
print("For linear kernel")
print("SVM accuracy score : ", accuracy_score(predict_SVM,Y_test)*100)
print(classification_report(Y_test,predict_SVM))
cnf_matrix = confusion_matrix(Y_test,predict_SVM)
import seaborn as sns
import pandas as pd
labels = [0, 1]
fig, ax = plt.subplots()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels)
plt.yticks(tick_marks, labels)
sns.heatmap( | pd.DataFrame(cnf_matrix) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from cause.plotter import Plotter
from cause.predictor import ClassificationSet
class Breakdown():
def __init__(self, data, weights, algos, name):
self.__data = data
self.__weights = weights
self.__algos = algos
self.__name = name
# todo validate input:
# data is an np.array with dims (num algos, num weights)
@property
def data(self):
return self.__data
@property
def weights(self):
return self.__weights
@property
def algos(self):
return self.__algos
@property
def name(self):
return self.__name
def save_to_latex(self, outfolder="/tmp", weight=1.):
outfile = "%s/breakdown_%s" % (outfolder, self.name)
index = np.where(self.weights==weight)[0][0] # location for lambda=weight
breakdown_perc = self.data[:,index] * 100. / self.data[:,index].sum()
# write latex table to file
with open(outfile, "w") as f:
for algo in range(self.data.shape[0]):
f.write("&\t%s\t&\t%.2f\\%%\t\t\n" % (
self.data[algo, index], breakdown_perc[algo]))
def plot(self, outfolder="/tmp"):
Plotter.plot_breakdown(self, outfolder)
class Postprocessor():
def __init__(self, dataset):
self.__dataset = dataset
@property
def dataset(self):
return self.__dataset
def breakdown(self):
breakdown = np.empty(shape=(0,0))
for weight in self.dataset.weights:
column = self.dataset.lstats[weight].get_breakdown(self.dataset.algos)
if breakdown.shape[0] == 0:
breakdown = column
else:
breakdown = np.vstack([breakdown, column])
breakdown = np.transpose(breakdown)
return Breakdown(breakdown, self.dataset.weights,
self.dataset.algos, self.dataset.name)
class FeatsPostprocessor(Postprocessor):
def __init__(self, dataset, features):
super().__init__(dataset)
self.__features = features
@property
def features(self):
return self.__features
def save_feature_importances_by_weight(self, outfolder, weight):
lstats = self.dataset.lstats[weight]
clsset = ClassificationSet.sanitize_and_init(
self.features.features, lstats.winners, lstats.costs)
clf = ExtraTreesClassifier()
clf = clf.fit(clsset.X, clsset.y.ravel())
importances = pd.DataFrame(data=clf.feature_importances_.reshape(
(1, len(clf.feature_importances_))
),
columns=self.features.features.columns)
# sort feature names by average importance
sorted_feature_names = [name for _,name in
sorted(zip(importances.mean(axis=0), self.features.features.columns))
][::-1]
importances = importances[sorted_feature_names]
feats = pd.DataFrame(columns=["order", "value", "name"])
feats["order"] = np.arange(len(self.features.features.columns))[::-1]
feats["value"] = np.transpose(importances.values)
feats["name"] = sorted_feature_names
feats.to_csv("%s/feats_%.1f" % (outfolder, weight),
sep="&", index=False, line_terminator="\\\\\n")
def save_feature_importances(self, outfolder):
# compute feature importances for each weight
importances = np.empty(shape=(0,0))
for weight in self.dataset.weights:
lstats = self.dataset.lstats[weight]
clsset = ClassificationSet.sanitize_and_init(
self.features.features, lstats.winners, lstats.costs)
clf = ExtraTreesClassifier()
clf = clf.fit(clsset.X, clsset.y)
if importances.shape[0] == 0:
importances = clf.feature_importances_
else:
importances = np.vstack([importances, clf.feature_importances_])
# sort feature names by average importance
sorted_feature_names = [name for _,name in
sorted(zip(importances.mean(axis=0), self.features.features.columns))
][::-1]
importances = pd.DataFrame(data=importances, columns=self.features.features.columns)
importances = importances[sorted_feature_names]
feats = | pd.DataFrame(columns=["order", "value", "name", "error"]) | pandas.DataFrame |
'''Python script to generate Revenue Analysis given ARR by Customer'''
'''Authors - <NAME>
'''
import numpy as np
import pandas as pd
from datetime import datetime
import collections
from .helpers import *
class RevAnalysis:
def __init__(self, json):
print("INIT REV ANALYSIS")
self.arr = pd.DataFrame(json)
self.years = []
self.rev_brackets = {}
self.cust_brackets = {}
def run(self):
self.clean_inputs()
print(self.arr)
self.mrr_by_customer()
self.rev_cohorts()
self.cy_ttm_revenue()
self.revenue_brackets("CY", "TTM")
self.customer_brackets("CY", "TTM")
self.revenue_brackets("ARR", "ARR*")
self.customer_brackets("ARR", "ARR*")
self.clean_outputs()
json = {
"MRR by Customer": self.mrr.to_dict(orient='records'),
"Revenue Cohorts (Monthly)": self.rev_cohorts.to_dict(orient='records'),
"Revenue Calculations": self.cy_ttm_revenue.to_dict(orient='records'),
"Revenue Brackets (CY, TTM)": self.rev_brackets["CY"].to_dict(orient='records'),
"Customer Brackets (CY, TTM)": self.cust_brackets["CY"].to_dict(orient='records'),
"Revenue Brackets (ARR)": self.rev_brackets["ARR"].to_dict(orient='records'),
"Customer Brackets (ARR)": self.cust_brackets["ARR"].to_dict(orient='records')
}
return json
def clean_inputs(self):
self.arr.set_index("Customer", inplace=True)
self.arr.apply(filter_to_dec_list)
def clean_outputs(self):
self.mrr = self.mrr.astype(object)
self.mrr.apply(zero_to_blank_list)
self.mrr.apply(dec_to_dollars_list)
self.mrr.reset_index(inplace=True)
self.rev_cohorts = self.rev_cohorts.astype(object)
self.rev_cohorts.iloc[:, 1:-1] = self.rev_cohorts.iloc[:, 1:-1].apply(zero_to_blank_list)
self.rev_cohorts.iloc[:, 1:-1] = self.rev_cohorts.iloc[:, 1:-1].apply(dec_to_dollars_list)
self.rev_cohorts.reset_index(inplace=True)
cy = [col for col in self.cy_ttm_revenue.columns if "CY" in col and "YOY" not in col]
ttm = [col for col in self.cy_ttm_revenue.columns if "TTM" in col]
yoy = [col for col in self.cy_ttm_revenue.columns if "YOY" in col]
yoy_indices = [i for i in range(self.cy_ttm_revenue.shape[1]) if "YOY" in self.cy_ttm_revenue.columns[i] or "Total ARR" in self.cy_ttm_revenue.columns[i]]
not_yoy_indices = list(set(range(self.cy_ttm_revenue.shape[1])) - set(yoy_indices))
arr = [col for col in self.cy_ttm_revenue.columns if "ARR" in col]
self.cy_ttm_revenue = self.cy_ttm_revenue.astype(object)
self.cy_ttm_revenue.apply(zero_to_blank_list)
self.cy_ttm_revenue.iloc[:, not_yoy_indices] = self.cy_ttm_revenue.iloc[:, not_yoy_indices].apply(dec_to_dollars_list)
self.cy_ttm_revenue.iloc[:, yoy_indices] = self.cy_ttm_revenue.iloc[:, yoy_indices].apply(dec_to_percents_list)
self.cy_ttm_revenue.sort_values(self.cy_ttm_revenue.columns[-1])
self.cy_ttm_revenue = self.cy_ttm_revenue.reindex(cy + ttm + yoy + arr, axis=1)
self.cy_ttm_revenue.reset_index(inplace=True)
self.clean_brackets_outputs("CY", "TTM")
self.clean_brackets_outputs("ARR", "ARR*")
print("MRR BY CUSTOMER")
print(self.mrr)
print("REVENUE COHORTS")
print(self.rev_cohorts)
print("CY TTM ARR")
print(self.cy_ttm_revenue)
print("CY TTM BRACKETS")
print(self.rev_brackets["CY"])
print("REVENUE CUSTOMER BRACKETS")
print(self.cust_brackets["CY"])
print("ARR BRACKETS")
print(self.rev_brackets["ARR"])
print("ARR CUSTOMER BRACKETS")
print(self.cust_brackets["ARR"])
def clean_brackets_outputs(self, type, not_type):
cy_only = [col for col in self.rev_brackets[type].columns if type in col and not_type not in col and "% Rev" not in col]
cy_rev = [col for col in self.rev_brackets[type].columns if "% Rev" in col and not_type not in col]
new_cy = [j for i in zip(cy_only,cy_rev) for j in i]
ttm_all = [col for col in self.rev_brackets[type].columns if not_type in col]
rev_indices = [i for i in range(self.rev_brackets[type].shape[1]) if "% Rev" in self.rev_brackets[type].columns[i]]
not_rev_indices = list(set(range(self.rev_brackets[type].shape[1])) - set(rev_indices))
self.rev_brackets[type] = self.rev_brackets[type].astype(object)
self.rev_brackets[type].iloc[:, not_rev_indices] = self.rev_brackets[type].iloc[:, not_rev_indices].apply(numbers_with_commas_list)
self.rev_brackets[type].iloc[:, rev_indices] = self.rev_brackets[type].iloc[:, rev_indices].apply(dec_to_percents_list)
self.rev_brackets[type] = self.rev_brackets[type].reindex(new_cy + ttm_all, axis=1)
self.rev_brackets[type].index = self.rev_brackets[type].index.map(dec_to_dollars)
self.rev_brackets[type].reset_index(inplace=True)
self.cust_brackets[type] = self.cust_brackets[type].astype(object)
self.cust_brackets[type].apply(numbers_with_commas_list)
self.cust_brackets[type].index = self.cust_brackets[type].index.map(dec_to_dollars)
cust_brackets_index = self.cust_brackets[type].index
index_labels_dict = {cust_brackets_index[i]: str(cust_brackets_index[i])+"-"+str(cust_brackets_index[i+1]) for i in range(len(cust_brackets_index)-1)}
index_labels_dict[cust_brackets_index[-1]] = str(cust_brackets_index[-1])+'+'
self.cust_brackets[type].rename(index=index_labels_dict, inplace=True)
self.cust_brackets[type].reset_index(inplace=True)
def mrr_by_customer(self):
self.mrr = self.arr.copy()/12
self.mrr.loc["Grand Total"] = self.mrr.sum()
self.mrr.loc["ARR"] = (self.mrr.loc["Grand Total"]*12).iloc[0]
# Only keep the last 3 years
self.years = pd.to_datetime(self.mrr.columns).strftime('%Y')
counter = collections.Counter(self.years)
num_trailing_months = counter[max(counter.keys())]
del counter[max(counter.keys())]
last_index = min(36, 12*len(counter.keys())) + num_trailing_months
self.mrr = self.mrr.iloc[:, -last_index:]
self.years = pd.to_datetime(self.mrr.columns).strftime('%Y')
def rev_cohorts(self):
first_rev = np.argmax(self.mrr.values!=0.0,axis=1)
last_rev = self.mrr.shape[1] - np.argmax(self.mrr.iloc[:, ::-1].values!=0.0,axis=1) - 1
self.rev_cohorts = pd.DataFrame(index=np.arange(self.mrr.shape[0]))
self.rev_cohorts.set_index(self.mrr.index, inplace=True)
self.rev_cohorts['Cohort'] = self.mrr.columns[first_rev]
self.rev_cohorts['Cohort'] = | pd.to_datetime(self.rev_cohorts['Cohort']) | pandas.to_datetime |
import pandas as pd
import numpy as np
from datetime import timedelta
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
class spatial_mapping():
def __init__(self, data, gps, gps_utc=0):
df=pd.DataFrame(data)
df[0]=pd.to_datetime(df[0]-693962,unit='D',origin=pd.Timestamp('1900-01-01'),utc=True)
df=df.rename(columns={0:'Time'})
self.data=df
if type(gps)==str:
if gps[-3:].lower()=='csv':
self.gps=pd.read_csv(gps)
if 'time' in self.gps:
self.gps.time=pd.to_datetime(self.gps.time)+timedelta(hours=gps_utc)
def extract_fragments(self, fragments, mean=True, fragment_method='time'):
# fragments: a csv file contains beginning and ending time of recording sessions
# Or providing a time interval (seconds) for separating recording sessions
if type(fragments)==str:
if fragments[-3:].lower()=='csv':
slice_df=pd.read_csv(fragments, sep=',')
if fragment_method=='time':
slice_df['Begin_time']=pd.to_datetime(slice_df['Begin_time'],utc=True)
slice_df['End_time']= | pd.to_datetime(slice_df['End_time'],utc=True) | pandas.to_datetime |
#!/usr/bin/env python
# -- coding: utf-8 --
# PAQUETES PARA CORRER OP.
import netCDF4
import pandas as pd
import numpy as np
import datetime as dt
import json
import wmf.wmf as wmf
import hydroeval
import glob
import MySQLdb
#modulo pa correr modelo
import hidrologia
from sklearn.linear_model import LinearRegression
import math
import os
#spatial
import cartopy.crs as crs
import geopandas as gpd
import pyproj
from pyproj import transform
from cartopy.feature import ShapelyFeature
import cartopy.crs as ccrs
from cartopy.io.shapereader import Reader
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import seaborn as sns
sns.set(style="whitegrid")
sns.set_context('notebook', font_scale=1.13)
#FORMATO
# fuente
import matplotlib
matplotlib.use('Agg')
import pylab as pl
#avoid warnings
import warnings
warnings.filterwarnings('ignore')
#---------------
#Funciones base.
#---------------
def get_rutesList(rutas):
''' Abre el archivo de texto en la ruta: rutas, devuelve una lista de las lineas de ese archivo.
Funcion base.
#Argumentos
rutas: string, path indicado.
'''
f = open(rutas,'r')
L = f.readlines()
f.close()
return L
def set_modelsettings(ConfigList):
ruta_modelset = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_modelset')
# model settings Json
with open(ruta_modelset, 'r') as f:
model_set = json.load(f)
# Model set
wmf.models.max_aquifer = wmf.models.max_gravita * 10
wmf.models.retorno = model_set['retorno']
wmf.models.show_storage = model_set['show_storage']
wmf.models.separate_fluxes = model_set['separate_fluxes']
wmf.models.dt = model_set['dt']
def round_time(date = dt.datetime.now(),round_mins=5):
'''
Rounds datetime object to nearest 'round_time' minutes.
If 'dif' is < 'round_time'/2 takes minute behind, else takesminute ahead.
Parameters
----------
date : date to round
round_mins : round to this nearest minutes interval
Returns
----------
datetime object rounded, datetime object
'''
dif = date.minute % round_mins
if dif <= round_mins/2:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins))
else:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins)) + dt.timedelta(minutes=round_mins)
def get_credentials(ruta_credenciales):
credentials = json.load(open(ruta_credenciales))
#creds para consultas
mysqlServer = credentials['MySql_Siata']
for key in np.sort(list(credentials['MySql_Siata'].keys()))[::-1]: #1:hal, 2:sal
try:
connection = MySQLdb.connect(host=mysqlServer[key]['host'],
user=mysqlServer[key]['user'],
password=mysqlServer[key]['password'],
db=mysqlServer[key]['db'])
print('SERVER_CON: Succesful connection to %s'%(key))
host=mysqlServer[key]['host']
user=mysqlServer[key]['user']
password=mysqlServer[key]['password']
db=mysqlServer[key]['db']
break #si conecta bien a SAL para.
except:
print('SERVER_CON: No connection to %s'%(key))
pass
#creds para copiar a var
user2copy2var = credentials['cred_2copy2var']['user']; host2copy2var = credentials['cred_2copy2var']['host']
return host,user,password,db,user2copy2var,host2copy2var
def coord2hillID(ruta_nc, df_coordxy):
#lee simubasin pa asociar tramos, saca topologia basica
cu = wmf.SimuBasin(rute= ruta_nc)
cu.GetGeo_Cell_Basics()
cu.GetGeo_Parameters()
#saca coordenadas de todo el simubasin y las distancias entre ellas
coordsX = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[0]
coordsY = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[1]
disty = np.unique(np.diff(np.unique(np.sort(coordsY))))
distx = np.unique(np.diff(np.unique(np.sort(coordsX))))
df_ids = pd.DataFrame(index = df_coordxy.index,columns=['id'])
#identifica el id de la ladera donde caen los ptos
for index in df_coordxy.index:
df_ids.loc[index]=cu.hills_own[np.where((coordsY+disty[0]/2>df_coordxy.loc[index].values[1]) & (coordsY-disty[0]/2<df_coordxy.loc[index].values[1]) & (coordsX+distx[0]/2>df_coordxy.loc[index].values[0]) & (coordsX-distx[0]/2<df_coordxy.loc[index].values[0]))[0]].data
return df_ids
#-----------------------------------
#-----------------------------------
#Funciones de lectura del configfile
#-----------------------------------
#-----------------------------------
def get_ruta(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega rutas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i.split(' ')[-1][:-1]
else:
return 'Aviso: no existe linea con el key especificado'
def get_line(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega lineas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i[:-1].split(' ')[2:]
else:
return 'Aviso: no existe linea con el key especificado'
def get_modelPlot(RutesList, PlotType = 'Qsim_map'):
''' #Devuelve un diccionario con la informacion de la tabla Plot en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- PlotType= boolean, tipo del plot? . Default= 'Qsim_map'.
'''
for l in RutesList:
key = l.split('|')[1].rstrip().lstrip()
if key[3:] == PlotType:
EjecsList = [i.rstrip().lstrip() for i in l.split('|')[2].split(',')]
return EjecsList
return key
def get_modelPars(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in RutesList:
c = [float(i) for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c})
return DCalib
def get_modelPaths(List):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in List:
c = [i for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c[0]})
return DCalib
def get_modelStore(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Store en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStore = {}
for l in RutesList:
l = l.split('|')
DStore.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'Actualizar': l[3].rstrip().lstrip(),
'Tiempo': float(l[4].rstrip().lstrip()),
'Condition': l[5].rstrip().lstrip(),
'Calib': l[6].rstrip().lstrip(),
'BackSto': l[7].rstrip().lstrip(),
'Slides': l[8].rstrip().lstrip()}})
return DStore
def get_modelStoreLastUpdate(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Update en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStoreUpdate = {}
for l in RutesList:
l = l.split('|')
DStoreUpdate.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'LastUpdate': l[3].rstrip().lstrip()}})
return DStoreUpdate
def get_ConfigLines(RutesList, key, keyTable = None, PlotType = None):
''' #Devuelve un diccionario con la informacion de las tablas en el configfile: Calib, Store, Update, Plot.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- key= string, palabra clave de la tabla que se quiere leer. Puede ser: -s,-t.
- Calib_Storage= string, palabra clave de la tabla que se quiere leer. Puede ser: Calib, Store, Update, Plot.
- PlotType= boolean, tipo del plot? . Default= None.
'''
List = []
for i in RutesList:
if i.startswith('|'+key) or i.startswith('| '+key):
List.append(i)
if len(List)>0:
if keyTable == 'Pars':
return get_modelPars(List)
if keyTable == 'Paths':
return get_modelPaths(List)
if keyTable == 'Store':
return get_modelStore(List)
if keyTable == 'Update':
return get_modelStoreLastUpdate(List)
if keyTable == 'Plot':
return get_modelPlot(List, PlotType=PlotType)
return List
else:
return 'Aviso: no se encuentran lineas con el key de inicio especificado.'
#-----------------------------------
#-----------------------------------
#Funciones generacion de radar
#-----------------------------------
#-----------------------------------
def file_format(start,end):
'''
Returns the file format customized for siata for elements containing
starting and ending point
Parameters
----------
start : initial date
end : final date
Returns
----------
file format with datetimes like %Y%m%d%H%M
Example
----------
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
format = '%Y%m%d%H%M'
return '%s-%s'%(start.strftime(format),end.strftime(format))
def hdr_to_series(path):
'''
Reads hdr rain files and converts it into pandas Series
Parameters
----------
path : path to .hdr file
Returns
----------
pandas time Series with mean radar rain
'''
s = pd.read_csv(path,skiprows=5,usecols=[2,3]).set_index(' Fecha ')[' Lluvia']
s.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],s.index)))
return s
def hdr_to_df(path):
'''
Reads hdr rain files and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr file
Returns
----------
pandas DataFrame with mean radar rain
'''
if path.endswith('.hdr') != True:
path = path+'.hdr'
df = pd.read_csv(path,skiprows=5).set_index(' Fecha ')
df.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],df.index)))
df = df.drop('IDfecha',axis=1)
df.columns = ['record','mean_rain']
return df
def bin_to_df(path,ncells,start=None,end=None,**kwargs):
'''
Reads rain fields (.bin) and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr and .bin file
start : initial date
end : final date
Returns
----------
pandas DataFrame with mean radar rain
Note
----------
path without extension, ejm folder_path/file not folder_path/file.bin,
if start and end is None, the program process all the data
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
records = df['record'].values
rain_field = []
for count,record in enumerate(records):
if record != 1:
rain_field.append(wmf.models.read_int_basin('%s.bin'%path,record,ncells)[0]/1000.0)
count = count+1
# format = (count*100.0/len(records),count,len(records))
else:
rain_field.append(np.zeros(ncells))
return pd.DataFrame(np.matrix(rain_field),index=df.index)
def get_radar_rain(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
mask=None,meanrain_ALL=True,path_masks_csv=None,complete_naninaccum=False,save_bin=False,
save_class = False,path_res=None,umbral=0.005,
verbose=True, zero_fill = None):
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de rutas y la de las fechas a las que corresponde cada ruta.
ListRutas.sort()
ListDatesinNC.sort()#con estas fechas se asignaran los barridos a cada timestep.
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
#Saca una lista con las pos de los barridos por cada timestep, y las pega en PosDates
#Si el limite de completar faltantes con barrido anterior es de 10 min, solo se completa si dt=300s
#limite de autocompletar : 10m es decir, solo repito un barrido.
PosDates = []
pos1 = []
pos_completed = []
lim_completed = 3 #ultimos 3 barridos - 15min
for ind,d1,d2 in zip(np.arange(datesDt[:-1].size),datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
# si no hay barridos en el dt de inicio sellena con zero - lista vacia
#y no esta en los primero 3 pasos : 15min.
# si se puede completar
# y si en el los lim_completed pasos atras no hubo más de lim_completed-1 pos con pos_completed=2, lim_completed-1 para que deje correr sólo hasta el lim_completed.
#asi solo se pueded completar y pos_completed=2 una sola vez.
if len(pos2) == 0 and ind not in np.arange(lim_completed) and complete_naninaccum == True and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 : #+1 porque coge los ultimos n-1 posiciones.
pos2 = pos1
pos_completed.append(2)
elif len(pos2) == 0:
pos2=[]
pos_completed.append(0)
else:
pos_completed.append(1)
#si se quiere completar y hay barridos en este dt, guarda esta pos para si es necesario completar las pos de dt en el sgte paso
if complete_naninaccum == True and len(pos2) != 0 and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 :
pos1 = pos2
else:
pos1 = []
PosDates.append(pos2)
# si se asigna, se agregas dates y PosDates para barridos en cero al final.
if zero_fill is not None:
#se redefinen datesDt luego que los PosDates fueron asignados
final = (pd.to_datetime(final) + pd.Timedelta('%ss'%Dt*zero_fill)).strftime('%Y-%m-%d %H:%M')
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
# se agrega a PosDates pasos del futuro con barridos en cero, y se cambia end.
end = end + pd.Timedelta('%ss'%Dt*zero_fill) #pasos de tiempo:steps, independiente del Dt
for steps in np.arange(zero_fill): PosDates.append([])
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#mascara con shp a parte de wmf
if mask is not None:
#se abre un barrido para sacar la mascara
g = netCDF4.Dataset(ListRutas[PosDates[0][0]])
field = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)#g['Rain'][:]#
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
g.close()
longs=np.array([RadProp[2]+0.5*RadProp[4]+i*RadProp[4] for i in range(RadProp[0])])
lats=np.array([RadProp[3]+0.5*RadProp[5]+i*RadProp[5] for i in range(RadProp[1])])
x,y = np.meshgrid(longs,lats)
#mask as a shp
if type(mask) == str:
#boundaries
shp = gpd.read_file(mask)
poly = shp.geometry.unary_union
shp_mask = np.zeros([len(lats),len(longs)])
for i in range(len(lats)):
for j in range(len(longs)):
if (poly.contains(Point(longs[j],lats[i])))==True:
shp_mask[i,j] = 1# Rain_mask es la mascara
l = x[shp_mask==1].min()
r = x[shp_mask==1].max()
d = y[shp_mask==1].min()
a = y[shp_mask==1].max()
#mask as a list with coordinates whithin the radar extent
elif type(mask) == list:
l = mask[0] ; r = mask[1] ; d = mask[2] ; a = mask[3]
x,y = x.T,y.T #aun tengo dudas con el recorte, si en nc queda en la misma pos que los lats,longs.
#boundaries position
x_wh,y_wh = np.where((x>l)&(x<r)&(y>d)&(y<a))
#se redefine sfield con size que corresponde
field = field[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
if save_bin and len(codigos)==1 and path_res is not None:
#open nc file
f = netCDF4.Dataset(path_res,'w', format='NETCDF4') #'w' stands for write
tempgrp = f.createGroup('rad_data') # as folder for saving files
lon = longs[np.unique(x_wh)[0]:np.unique(x_wh)[-1]]
lat = lats[np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
#set name and leght of dimensions
tempgrp.createDimension('lon', len(lon))
tempgrp.createDimension('lat', len(lat))
tempgrp.createDimension('time', None)
#building variables
longitude = tempgrp.createVariable('longitude', 'f4', 'lon')
latitude = tempgrp.createVariable('latitude', 'f4', 'lat')
rain = tempgrp.createVariable('rain', 'f4', (('time', 'lat', 'lon')))
time = tempgrp.createVariable('time', 'i4', 'time')
#adding globalattributes
f.description = "Radar rainfall dataset containing one group"
f.history = "Created " + dt.datetime.now().strftime("%d/%m/%y")
#Add local attributes to variable instances
longitude.units = 'degrees east - wgs4'
latitude.units = 'degrees north - wgs4'
time.units = 'minutes since 2020-01-01 00:00'
rain.units = 'mm/h'
#passing data into variables
# use proper indexing when passing values into the variables - just like you would a numpy array.
longitude[:] = lon #The "[:]" at the end of the variable instance is necessary
latitude[:] = lat
else:
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
#accumulated in basin
if accum:
if mask is not None:
rvec_accum = np.zeros(field.shape)
dfaccum = pd.DataFrame(index = rng) #este producto no da con mask.
else:
rvec_accum = np.zeros(cu.ncells)
# rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#ITERA SOBRE LOS BARRIDOS DEL PERIODO Y SE SACAN PRODUCTOS
# print ListRutas
for ind,dates,pos in zip(np.arange(len(datesDt[1:])),datesDt[1:],PosDates):
#escoge como definir el size de rvec
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells, dtype = int)
rStra = np.zeros(cu.ncells, dtype = int)
try:
#se lee y agrega lluvia de los nc en el intervalo.
for c,p in enumerate(pos):
#lista archivo leido
if verbose:
print (ListRutas[p])
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(ListRutas[p])
rainfield = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#if all extent
if all_radextent:
radmatrix += rainfield
#if mask
if mask is not None and type(mask) == str:
rvec += (rainfield*shp_mask)[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
elif mask is not None and type(mask) == list:
rvec += rainfield[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
# on WMF.
else:
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(rainfield,RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
Conv[rvec == 0] = 0
Stra[rvec == 0] = 0
#Cierra el netCDF
g.close()
#muletilla
path = 'bla'
except:
print ('error - no field found ')
path = ''
if accum:
if mask is not None:
rvec += np.zeros(shape = field.shape)
rvec = np.zeros(shape = field.shape)
else:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
if mask is None: #esto para mask no sirve
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
#subbasins defined for WMF
if meanrain_ALL and mask is None:
mean = []
df_posmasks = pd.read_csv(path_masks_csv,index_col=0)
for codigo in codigos:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec*df_posmasks['%s'%codigo])/float(df_posmasks['%s'%codigo][df_posmasks['%s'%codigo]==1].size))
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
mean = []
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#si guarda nc de ese timestep guarda clasificados
if dentro == 0:
hagalo = True
else:
hagalo = False
#mira si guarda o no los clasificados
if save_class:
#Escribe el binario convectivo
aa = cuConv.rain_radar2basin_from_array(vec = rConv,
ruta_out = path_res+'_conv',
fecha = dates,
dt = Dt,
doit = hagalo)
#Escribe el binario estratiforme
aa = cuStra.rain_radar2basin_from_array(vec = rStra,
ruta_out = path_res+'_stra',
fecha = dates,
dt = Dt,
doit = hagalo)
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
elif mask is None and save_bin == True and len(codigos)==1 and path_res is None: #si es una cuenca pero no se quiere guardar binarios.
mean = []
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
#guardar .nc con info de recorte de radar: mask.
if mask is not None and save_bin and len(codigos)==1 and path_res is not None:
mean = []
#https://pyhogs.github.io/intro_netcdf4.html
rain[ind,:,:] = rvec.T
time[ind] = int((dates - pd.to_datetime('2010-01-01 00:00')).total_seconds()/60) #min desde 2010
if ind == np.arange(len(datesDt[1:]))[-1]:
f.close()
print ('.nc saved')
#guarda en df meanrainfall.
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec)/float(shp_mask[shp_mask==1].size))
#save
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
pass
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True and mask is not None:
return df,rvec_accum
elif accum == True and mask is None:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_radar_rain_OP(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
meanrain_ALL=True,complete_naninaccum=False, evs_hist=False,save_bin=False,save_class = False,
path_res=None,umbral=0.005,include_escenarios = None,
verbose=True):
'''
Read .nc's file forn rutaNC:101Radar_Class within assigned period and frequency.
Por ahora solo sirve con un barrido por timestep, operacional a 5 min, melo.
0. It divides by 1000.0 and converts from mm/5min to mm/h.
1. Get mean radar rainfall in basins assigned in 'codigos' for finding masks, if the mask exist.
2. Write binary files if is setted.
- Cannot do both 1 and 2.
- To saving binary files (2) set: meanrain_ALL=False, save_bin=True, path_res= path where to write results,
len('codigos')=1, nc_path aims to the one with dxp and simubasin props setted.
Parameters
----------
start: string, date&time format %Y-%m%-d %H:%M, local time.
end: string, date&time format %Y-%m%-d %H:%M, local time.
Dt: float, timedelta in seconds. For this function it should be lower than 3600s (1h).
cuenca: string, simubasin .nc path with dxp and format from WMF. It should be 260 path if whole catchment analysis is needed, or any other .nc path for saving the binary file.
codigos: list, with codes of stage stations. Needed for finding the mask associated to a basin.
rutaNC: string, path with .nc files from radar meteorology group. Default in amazonas: 101Radar_Class
Optional Parameters
----------
accum: boolean, default False. True for getting the accumulated matrix between start and end.
Change returns: df,rvec (accumulated)
path_tif: string, path of tif to write accumlated basin map. Default None.
all_radextent:boolean, default False. True for getting the accumulated matrix between start and end in the
whole radar extent. Change returns: df,radmatrix.
meanrain_ALL: boolean, defaul True. True for getting the mean radar rainfall within several basins which mask are defined in 'codigos'.
save_bin: boolean, default False. True for saving .bin and .hdr files with rainfall and if len('codigos')=1.
save_class: boolean,default False. True for saving .bin and .hdr for convective and stratiform classification. Applies if len('codigos')=1 and save_bin = True.
path_res: string with path where to write results if save_bin=True, default None.
umbral: float. Minimum umbral for writing rainfall, default = 0.005.
Returns
----------
- df whith meanrainfall of assiged codes in 'codigos'.
- df,rvec if accum = True.
- df,radmatrix if all_radextent = True.
- save .bin and .hdr if save_bin = True, len('codigos')=1 and path_res=path.
'''
#### FECHAS Y ASIGNACIONES DE NC####
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de dias y de rutas
ListDatesinNC.sort()
ListRutas.sort()
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
PosDates = []
pos1 = [0]
for d1,d2 in zip(datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
if len(pos2) == 0 and complete_naninaccum == True: # si no hay barridos en el dt de inicio ellena con cero
pos2 = pos1
elif complete_naninaccum == True: #si hay barridos en este dt guarda esta pos para si es necesario completar las pos de dt en el sgte paso
pos1 = pos2
elif len(pos2) == 0:
pos2=[]
PosDates.append(pos2)
paths_inperiod = [[ListRutas[p] for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
pospaths_inperiod = [[p for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
######### LISTA EN ORDEN CON ARCHIVOS OBSERVADOS Y ESCENARIOS#############3
##### buscar el ultimo campo de lluvia observado ######
datessss = []
nc010 = []
for date,l_step,lpos_step in zip(datesDt[1:],paths_inperiod,pospaths_inperiod):
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
#siempre intenta buscar en cada paso de tiempo el observado, solo si no puede, busca escenarios futuros.
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
nc010.append(path)
datessss.append(date)
######punto a partir del cual usar escenarios
#si dentro del periodo existe alguno len(date)>1, sino = 0 (todo el periodo corresponde a forecast)
#si no existe pos_lastradarfield = pos del primer paso de tiempo paraque se cojan todos los archivos
if len(datessss)>0:
pos_lastradarfield = np.where(datesDt[1:]==datessss[-1])[0][0]
else:
pos_lastradarfield = 0
list_paths= []
# escoge rutas y pos organizados para escenarios, por ahora solo sirve con 1 barrido por timestep.
for ind,date,l_step,lpos_step in zip(np.arange(datesDt[1:].size),datesDt[1:],paths_inperiod,pospaths_inperiod):
# pos_step = []; paths_step = []
if len(l_step) == 0:
list_paths.append('')
else:
# ordenar rutas de ncs
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
# print (ind,path,pospath)
#si es un evento viejo
if evs_hist:
#primero escanarios futuros.
if include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios):
list_paths.append(path)
break
#despues observados.
elif path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
#si es rigth now
else:
#primero observados y para ahi si se lo encontro
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
break
#despues escenarios futuros, y solo despues que se acaban observados
elif include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios) and ind > pos_lastradarfield:
list_paths.append(path)
######### LECTURA DE CUENCA, DATOS Y GUARDADO DE BIN.###########
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#accumulated in basin
if accum:
rvec_accum = np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#itera sobre ncs abre y guarda ifnfo
for dates,path in zip(datesDt[1:],list_paths):
if verbose:
print (dates,path)
rvec = np.zeros(cu.ncells)
if path != '': #sino hay archivo pone cero.
try:
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(path)
#if all extent
if all_radextent:
radmatrix += g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0)
#on basins --> wmf.
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0),RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
rConv[rvec == 0] = 0
rStra[rvec == 0] = 0
#Cierra el netCDF
g.close()
except:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
else:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
if meanrain_ALL:
mean = []
#para todas
for codigo in codigos:
if '%s.tif'%(codigo) in os.listdir('/media/nicolas/Home/nicolas/01_SIATA/info_operacional_cuencas_nivel/red_nivel/tif_mascaras/'):
mask_path = '/media/nicolas/Home/nicolas/01_SIATA/info_operacional_cuencas_nivel/red_nivel/tif_mascaras/%s.tif'%(codigo)
mask_map = wmf.read_map_raster(mask_path)
mask_vect = cu.Transform_Map2Basin(mask_map[0],mask_map[1])
else:
mask_vect = None
if mask_vect is not None:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
try:
mean.append(np.sum(mask_vect*rvec)/float(mask_vect[mask_vect==1].size))
except: # para las que no hay mascara.
mean.append(np.nan)
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if save_bin == True and len(codigos)==1 and path_res is not None:
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#guarda en df meanrainfall.
mean = []
if path != '':
mean.append(rvec.mean())
else:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_radar_rain_OP_newmasks(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
meanrain_ALL=True,complete_naninaccum=False, evs_hist=False,save_bin=False,save_class = False,
path_res=None,umbral=0.005,include_escenarios = None,
path_masks_csv = None,verbose=True):
'''
Read .nc's file forn rutaNC:101Radar_Class within assigned period and frequency.
Por ahora solo sirve con un barrido por timestep, operacional a 5 min, melo.
0. It divides by 1000.0 and converts from mm/5min to mm/h.
1. Get mean radar rainfall in basins assigned in 'codigos' for finding masks, if the mask exist.
2. Write binary files if is setted.
- Cannot do both 1 and 2.
- To saving binary files (2) set: meanrain_ALL=False, save_bin=True, path_res= path where to write results,
len('codigos')=1, nc_path aims to the one with dxp and simubasin props setted.
Parameters
----------
start: string, date&time format %Y-%m%-d %H:%M, local time.
end: string, date&time format %Y-%m%-d %H:%M, local time.
Dt: float, timedelta in seconds. For this function it should be lower than 3600s (1h).
cuenca: string, simubasin .nc path with dxp and format from WMF. It should be 260 path if whole catchment analysis is needed, or any other .nc path for saving the binary file.
codigos: list, with codes of stage stations. Needed for finding the mask associated to a basin.
rutaNC: string, path with .nc files from radar meteorology group. Default in amazonas: 101Radar_Class
Optional Parameters
----------
accum: boolean, default False. True for getting the accumulated matrix between start and end.
Change returns: df,rvec (accumulated)
path_tif: string, path of tif to write accumlated basin map. Default None.
all_radextent:boolean, default False. True for getting the accumulated matrix between start and end in the
whole radar extent. Change returns: df,radmatrix.
meanrain_ALL: boolean, defaul True. True for getting the mean radar rainfall within several basins which mask are defined in 'codigos'.
save_bin: boolean, default False. True for saving .bin and .hdr files with rainfall and if len('codigos')=1.
save_class: boolean,default False. True for saving .bin and .hdr for convective and stratiform classification. Applies if len('codigos')=1 and save_bin = True.
path_res: string with path where to write results if save_bin=True, default None.
umbral: float. Minimum umbral for writing rainfall, default = 0.005.
include_escenarios: string wth the name of scenarios to use for future.
path_masks_csv: string with path of csv with pos of masks, pos are related tu the shape of the simubasin designated.
Returns
----------
- df whith meanrainfall of assiged codes in 'codigos'.
- df,rvec if accum = True.
- df,radmatrix if all_radextent = True.
- save .bin and .hdr if save_bin = True, len('codigos')=1 and path_res=path.
'''
#### FECHAS Y ASIGNACIONES DE NC####
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de dias y de rutas
ListDatesinNC.sort()
ListRutas.sort()
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
PosDates = []
pos1 = [0]
for d1,d2 in zip(datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
if len(pos2) == 0 and complete_naninaccum == True: # si no hay barridos en el dt de inicio ellena con cero
pos2 = pos1
elif complete_naninaccum == True: #si hay barridos en este dt guarda esta pos para si es necesario completar las pos de dt en el sgte paso
pos1 = pos2
elif len(pos2) == 0:
pos2=[]
PosDates.append(pos2)
paths_inperiod = [[ListRutas[p] for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
pospaths_inperiod = [[p for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
######### LISTA EN ORDEN CON ARCHIVOS OBSERVADOS Y ESCENARIOS#############3
##### buscar el ultimo campo de lluvia observado ######
datessss = []
nc010 = []
for date,l_step,lpos_step in zip(datesDt[1:],paths_inperiod,pospaths_inperiod):
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
#siempre intenta buscar en cada paso de tiempo el observado, solo si no puede, busca escenarios futuros.
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
nc010.append(path)
datessss.append(date)
######punto a partir del cual usar escenarios
#si dentro del periodo existe alguno len(date)>1, sino = 0 (todo el periodo corresponde a forecast)
#si no existe pos_lastradarfield = pos del primer paso de tiempo paraque se cojan todos los archivos
if len(datessss)>0:
pos_lastradarfield = np.where(datesDt[1:]==datessss[-1])[0][0]
else:
pos_lastradarfield = 0
list_paths= []
# escoge rutas y pos organizados para escenarios, por ahora solo sirve con 1 barrido por timestep.
for ind,date,l_step,lpos_step in zip(np.arange(datesDt[1:].size),datesDt[1:],paths_inperiod,pospaths_inperiod):
# pos_step = []; paths_step = []
if len(l_step) == 0:
list_paths.append('')
else:
# ordenar rutas de ncs
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
# print (ind,path,pospath)
#si es un evento viejo
if evs_hist:
#primero escanarios futuros.
if include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios):
list_paths.append(path)
break
#despues observados.
elif path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
#si es rigth now
else:
#primero observados y para ahi si se lo encontro
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
break
#despues escenarios futuros, y solo despues que se acaban observados
elif include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios) and ind > pos_lastradarfield:
list_paths.append(path)
######### LECTURA DE CUENCA, DATOS Y GUARDADO DE BIN.###########
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#accumulated in basin
if accum:
rvec_accum = np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#itera sobre ncs abre y guarda ifnfo
for dates,path in zip(datesDt[1:],list_paths):
if verbose:
print (dates,path)
rvec = np.zeros(cu.ncells)
if path != '': #sino hay archivo pone cero.
try:
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(path)
#if all extent
if all_radextent:
radmatrix += g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0)
#on basins --> wmf.
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0),RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
rConv[rvec == 0] = 0
rStra[rvec == 0] = 0
#Cierra el netCDF
g.close()
except:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
else:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
if meanrain_ALL:
mean = []
#para todas
df_posmasks = pd.read_csv(path_masks_csv,index_col=0)
for codigo in codigos:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
try:
mean.append(np.sum(rvec*df_posmasks[codigo])/float(df_posmasks[codigo][df_posmasks[codigo]==1].size))
except: # para las que no hay mascara.
mean.append(np.nan)
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if save_bin == True and len(codigos)==1 and path_res is not None:
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#guarda en df meanrainfall.
mean = []
if path != '':
mean.append(rvec.mean())
else:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_rainfall2sim(ConfigList,cu,path_ncbasin,starts_m,end, #se corre el bin mas largo.
Dt= float(wmf.models.dt),include_escenarios=None,
evs_hist= False,
check_file=True,stepback_start = '%ss'%int(wmf.models.dt *1),
complete_naninaccum=True,verbose=False,zero_fill=None):
#generacion o lectura de lluvia
start,end = starts_m[-1],end
start,end = (pd.to_datetime(start)- | pd.Timedelta(stepback_start) | pandas.Timedelta |
import pgpasslib
from sqlalchemy import create_engine
import pandas as pd
from pymedextcore.document import Document
from .med import MedicationAnnotator
def get_engine():
password = pgpasslib.getpass('10.172.28.101', 5432, '<PASSWORD>', '<PASSWORD>')
return create_engine(f'postgresql+psycopg2://coronascientist:{password}@10.172.28.101:5432/coronaomop')
def construct_query(limit, min_date='2020-03-01', view='unstable', note_ids = None):
if limit != -1:
limit = "LIMIT {}".format(limit)
else:
limit = ""
notes = ""
if note_ids is not None:
notes = f"AND note_id IN {tuple(note_ids)}"
query = f"""
SELECT note_id, person_id, note_text, note_date
FROM {view}.note
WHERE note_date > '{min_date}'
{notes}
AND (note_text IS NOT NULL OR note_text not in ('', ' ') )
{limit}
"""
#print(query)
#logger.debug('query set with parameters: view = {}, min_date= {}, limit={}'.format(view, min_date, limit))
return query
def get_note_ids(engine, min_date='2020-03-01', view='unstable'):
query = f"""
SELECT note_id
FROM {view}.note
WHERE note_date > '{min_date}'
AND (note_text IS NOT NULL OR note_text not in ('', ' ') )
"""
with engine.connect() as connection:
ids = pd.read_sql_query(query, engine)
return set(ids.note_id.values)
def rawtext_pg(limit, min_date, engine,chunksize=100):
query = construct_query(limit, min_date)
with engine.connect() as connection:
return pd.read_sql_query(query, connection,chunksize=chunksize)
def get_from_omop_note(engine, limit = -1, min_date='2020-03-01', note_ids= None):
query = construct_query(limit = limit, min_date =min_date, note_ids = note_ids)
with engine.connect() as connection:
query_res = connection.execute(query)
return query_res.fetchall()
def convert_chunk_to_doc(query_res, chunksize):
res = []
for note_id, person_id, raw_text, note_date in query_res.fetchmany(chunksize):
res.append(Document(raw_text=raw_text,
ID = note_id,
attributes = {'person_id':person_id},
documentDate = note_date.strftime("%Y/%m/%d")
))
return res
def convert_notes_to_doc(notes):
res = []
for note_id, person_id, raw_text, note_date in notes:
res.append(Document(raw_text=raw_text,
ID = note_id,
attributes = {'person_id':person_id},
documentDate = note_date.strftime("%Y/%m/%d")
))
return res
def load_processed_ids(filename = 'data/omop_tables/notes_processed_med.txt'):
processed = []
with open(filename, 'r') as f:
for ID in f:
processed.append(int(ID.strip()))
return set(processed)
def chunk_to_omop(annotated_chunk):
omop = [MedicationAnnotator.doc_to_omop(x) for x in annotated_chunk]
omop = [item for sublist in omop for item in sublist]
return | pd.DataFrame.from_records(omop) | pandas.DataFrame.from_records |
import argparse
import six
from tqdm import tqdm
import pandas as pd
import string, re
from nltk.translate.bleu_score import corpus_bleu
from typing import List, Tuple, Dict, Set, Union
def compute_corpus_level_bleu_score(references: List[str], hypotheses: List[str]) -> float:
""" Given decoding results and reference sentences, compute corpus-level BLEU score.
@param references (List[List[str]]): a list of gold-standard reference target sentences
@param hypotheses (List[Hypothesis]): a list of hypotheses, one for each reference
@returns bleu_score: corpus-level BLEU score
"""
bleu_score = corpus_bleu([[ref] for ref in references],
[hyp for hyp in hypotheses])
return bleu_score
def standardize_punctuation_and_lowercase(sentences: List[str]) -> List[str]:
'''
This function removes the last punctuation mark and lowers the words.
Input: 'realmente gostei dessa camisa . deveria ter comprado .'
Output: 'realmente gostei dessa camisa . deveria ter comprado'
'''
for i, sentence in enumerate(sentences):
punkt_occurrences = 0
sentence_list = []
matches = re.finditer(r'[.?!]', sentence)
output_generator = [match for match in matches]
if len(output_generator) == 0:
continue
last_match = output_generator[-1]
last_match_ind = last_match.start()
# remove space before punctuation
if sentence[last_match_ind-1:last_match_ind+1].__contains__(' '):
sentences[i] = sentence[:last_match_ind-1].lower()
else:
sentences[i] = sentence[:last_match_ind].lower()
return sentences
def translate_sentences(text_list, output):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
from google.cloud import translate_v2 as translate
# RODAR export GOOGLE_APPLICATION_CREDENTIALS="/home/arthurtelles/gtranslate-api-290022-4899f0c9d3f7.json"
translate_client = translate.Client()
# text = 'ola eu gosto de framboesa'
result_list = []
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
for line in tqdm(text_list):
if isinstance(line, six.binary_type):
line = line.decode("utf-8")
result = translate_client.translate(line, target_language='en')
result_list.append(result)
# print(u"Text: {}".format(result["input"]))
# print(u"Translation: {}".format(result["translatedText"]))
# print(u"Detected source language: {}".format(result["detectedSourceLanguage"]))
text_file2 = open("eng_gtranslate.txt", "a")
for line in result_list:
text_file2.write(f"{line['translatedText']}\n")
text_file2.close()
return result_list
parser = argparse.ArgumentParser()
parser.add_argument('-input_file', required=True)
parser.add_argument('-reference_file', required=True)
parser.add_argument('-bleu_output', required=True)
parser.add_argument('-google_translate_api', action='store_true')
parser.add_argument('-output_translations_csv', type=str)
opt = parser.parse_args()
print(f'Inputs: {opt}\n')
# class InputArgs():
# def __init__(self):
# self.input_file = 'seq2seq_test_translations2.txt'
# self.reference_file = 'data/eng_test.txt' # 'rnn_naive_model_translations.txt' # 'vanilla_transformer.txt'
# self.bleu_output = 'seq2seq_test_bleu2.txt' # 'weights_test' # 'rnn_naive_model' # 'transformer_test'
# self.google_translate_api = False
# self.output_translations_csv = 'seq2seq_test_bleu2.csv'
# opt = InputArgs()
# print(opt.__dict__)
ref_file = open(opt.reference_file, "r+").read()
ref_list = ref_file.split('\n')
# não rodar pois já sabemos o resultado
if opt.google_translate_api == True:
result_list = translate_sentences(translated_list)
translated_list = [result['translatedText'] for result in result_list]
else:
text_file1 = open(opt.input_file, "r+").read()
translated_list = text_file1.split('\n')
ref_list = standardize_punctuation_and_lowercase(ref_list)
translated_list = standardize_punctuation_and_lowercase(translated_list)
bleu_score = compute_corpus_level_bleu_score(ref_list, translated_list)
print(f"Corpus bleu from file {opt.input_file}: {bleu_score}\n")
if opt.output_translations_csv is not None:
data_list = [{'Reference': ref_list[i], 'Hypothesis': translated_list[i]} for i in range(len(ref_list))]
data_df = | pd.DataFrame(data_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 04 18:33:27 2018
@author: Prodipta
"""
import pandas as pd
import datetime as dt
import numpy as np
from pyfolio.utils import extract_rets_pos_txn_from_zipline
from pyfolio.timeseries import perf_stats
from empyrical.stats import cum_returns_final, aggregate_returns
import json
VaR_CUTOFF = 0.05
MAX_TXN = 50000
def convert_zipline_results_to_json(results):
#make the json out of the day-wise dataframe
res = results.drop(['transactions','orders','positions',],1)
s = res.to_json(orient='index', date_format ='iso')
jsonDict = json.loads(s)
jsonArray = []
for index in jsonDict:
jsonDict[index]['start'] = index
jsonArray.append(jsonDict[index])
# compute drawdowns seperately
value = results['portfolio_value']
max_dd = np.zeros(len(value))
for i in range(len(value)):
max_dd[i] = (value[i]/value[0:i].max() - 1)
results["max_drawdown"] = max_dd
# algo returns vs benchmark returns for the main plot - this is cumulative returns
returns = results['algorithm_period_return']
benchmark_rets = results['benchmark_period_return']
max_dd = results['max_drawdown']
positions = results['positions']
transactions = results['transactions']
gross_lev = results['gross_leverage']
average_txn = 0
# extract structured info using pyfolio
if any(results['transactions'].apply(len)) > 0:
returns, positions, transactions, _ = extract_rets_pos_txn_from_zipline(results)
average_txn = np.mean(abs(transactions['txn_dollars']))
# for plotting daily transaction, use <transactions>
transactions['id'] = pd.Series([str(i) for i in range(len(transactions))],index=transactions.index)
transactions = transactions.set_index(keys='id')
transactions = transactions.drop(['sid'],1)
# resize transactions if exceeds limit
if len(transactions) > MAX_TXN:
frames = [transactions.head(MAX_TXN/2),transactions.tail(MAX_TXN/2)]
transactions = | pd.concat(frames) | pandas.concat |
import numpy as np
import pandas as pd
import os
import argparse
import json
import tensorflow.keras as k
def readData(tumorFileName, normalFileName):
x_true = pd.read_csv(tumorFileName, sep='\t', header=0, index_col=0).T
x_false = pd.read_csv(normalFileName, sep='\t', header=0, index_col=0).T
# if this data set has some nulls fill it so it does not crash the program
x = | pd.concat([x_true, x_false]) | pandas.concat |
from surf.script_tab import keytab
from surf.surf_tool import regex2pairs
import os, json, time, re, codecs, glob, shutil
import matplotlib.pyplot as plt
import matplotlib as mpl
import logging.handlers
import pandas as pd
import itertools
import numpy as np
import random
import tensorflow as tf
from sklearn.model_selection import KFold
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args
# 拆分时间序列的类
class PurgedGroupTimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator variant with non-overlapping groups.
Allows for a gap in groups to avoid potentially leaking info from
train into test if the model has windowed or lag features.
Provides train/test indices to split time series data samples
that are observed at fixed time intervals according to a
third-party provided group.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
max_train_group_size : int, default=Inf
Maximum group size for a single training set.
group_gap : int, default=None
Gap between train and test
max_test_group_size : int, default=Inf
We discard this number of groups from the end of each train split
"""
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
max_train_group_size=np.inf,
max_test_group_size=np.inf,
group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.group_gap = group_gap
self.max_test_group_size = max_test_group_size
self.verbose = verbose
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is None:
raise ValueError(
"The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
group_gap = self.group_gap
max_test_group_size = self.max_test_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds,
n_groups))
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size,
n_groups, group_test_size)
for group_test_start in group_test_starts:
train_array = []
test_array = []
group_st = max(0, group_test_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(
np.concatenate((train_array,
train_array_tmp)),
axis=None), axis=None)
train_end = train_array.size
for test_group_idx in unique_groups[group_test_start:
group_test_start +
group_test_size]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(
np.concatenate((test_array,
test_array_tmp)),
axis=None), axis=None)
test_array = test_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i) for i in train_array], [int(i) for i in test_array]
# 拆分时间序列的类
class PurgedGroupTimeSeriesSplitStacking(_BaseKFold):
"""Time Series cross-validator variant with non-overlapping groups.
Allows for a gap in groups to avoid potentially leaking info from
train into test if the model has windowed or lag features.
Provides train/test indices to split time series data samples
that are observed at fixed time intervals according to a
third-party provided group.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
stacking_mode : bool, default=True
Whether to provide an additional set to test a stacking classifier or not.
max_train_group_size : int, default=Inf
Maximum group size for a single training set.
max_val_group_size : int, default=Inf
Maximum group size for a single validation set.
max_test_group_size : int, default=Inf
We discard this number of groups from the end of each train split, if stacking_mode = True and None
it defaults to max_val_group_size.
val_group_gap : int, default=None
Gap between train and validation
test_group_gap : int, default=None
Gap between validation and test, if stacking_mode = True and None
it defaults to val_group_gap.
"""
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
stacking_mode=True,
max_train_group_size=np.inf,
max_val_group_size=np.inf,
max_test_group_size=np.inf,
val_group_gap=None,
test_group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.max_val_group_size = max_val_group_size
self.max_test_group_size = max_test_group_size
self.val_group_gap = val_group_gap
self.test_group_gap = test_group_gap
self.verbose = verbose
self.stacking_mode = stacking_mode
def split(self, X, y=None, groups=None):
if self.stacking_mode:
return self.split_ensemble(X, y, groups)
else:
return self.split_standard(X, y, groups)
def split_standard(self, X, y=None, groups=None):
"""Generate indices to split data into training and validation set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/validation set.
Yields
------
train : ndarray
The training set indices for that split.
val : ndarray
The validation set indices for that split.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_splits = self.n_splits
group_gap = self.val_group_gap
max_val_group_size = self.max_val_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds, n_groups))
group_val_size = min(n_groups // n_folds, max_val_group_size)
group_val_starts = range(n_groups - n_splits * group_val_size, n_groups, group_val_size)
for group_val_start in group_val_starts:
train_array = []
val_array = []
group_st = max(0, group_val_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_val_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(np.concatenate((train_array, train_array_tmp)), axis=None), axis=None)
train_end = train_array.size
for val_group_idx in unique_groups[group_val_start: group_val_start + group_val_size]:
val_array_tmp = group_dict[val_group_idx]
val_array = np.sort(np.unique(np.concatenate((val_array, val_array_tmp)), axis=None), axis=None)
val_array = val_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i) for i in train_array], [int(i) for i in val_array]
def split_ensemble(self, X, y=None, groups=None):
"""Generate indices to split data into training, validation and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
val : ndarray
The validation set indices for that split (testing indices for base classifiers).
test : ndarray
The testing set indices for that split (testing indices for final classifier)
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_splits = self.n_splits
val_group_gap = self.val_group_gap
test_group_gap = self.test_group_gap
if test_group_gap is None:
test_group_gap = val_group_gap
max_train_group_size = self.max_train_group_size
max_val_group_size = self.max_val_group_size
max_test_group_size = self.max_test_group_size
if max_test_group_size is None:
max_test_group_size = max_val_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds, n_groups))
group_val_size = min(n_groups // n_folds, max_val_group_size)
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size, n_groups, group_test_size)
train_indices = []
val_indices = []
test_indices = []
for group_test_start in group_test_starts:
train_array = []
val_array = []
test_array = []
val_group_st = max(max_train_group_size + val_group_gap,
group_test_start - test_group_gap - max_val_group_size)
train_group_st = max(0, val_group_st - val_group_gap - max_train_group_size)
for train_group_idx in unique_groups[train_group_st:(val_group_st - val_group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(np.concatenate((train_array, train_array_tmp)), axis=None), axis=None)
train_end = train_array.size
for val_group_idx in unique_groups[val_group_st:(group_test_start - test_group_gap)]:
val_array_tmp = group_dict[val_group_idx]
val_array = np.sort(np.unique(np.concatenate((val_array, val_array_tmp)), axis=None), axis=None)
val_array = val_array[val_group_gap:]
for test_group_idx in unique_groups[group_test_start:(group_test_start + group_test_size)]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(np.concatenate((test_array, test_array_tmp)), axis=None), axis=None)
test_array = test_array[test_group_gap:]
yield [int(i) for i in train_array], [int(i) for i in val_array], [int(i) for i in test_array]
def sharp_ratio(data, base_ratio=0.0):
num = len(data)
t_return = (data.shift(-1) - data) / data
std = t_return.std()
sharpratio = (t_return.mean() - base_ratio) * (np.sqrt(num)) / std
return sharpratio
class Pre_data(object):
def __init__(self):
self.funcmap = {
"种子": self.set_all_seeds,
"填充": self.pipe_pad,
# 一个数组栏,一个dataframe
"取列": self.split_columns,
"取行": self.split_rows,
}
def set_all_seeds(self, dataobj, seed):
np.random.seed(seed)
random.seed(seed)
# tf.random.set_seed(seed)
return dataobj
def pipe_pad(self, dataobj, paras={}):
if paras["值"] is None:
if paras["方式"] == "向前":
# 再向上填充
dataobj.fillna(method='bfill', inplace=True)
elif paras["方式"] == "向后":
# 先向下填充
dataobj.fillna(method='ffill', inplace=True)
else:
raise Exception("paras error {}".format(paras))
else:
dataobj.fillna(value=paras["值"], inplace=True)
return dataobj
def split_columns(self, dataobj, paras):
return dataobj[paras]
def split_rows(self, dataobj, paras):
if isinstance(paras[0], str):
outdata = dataobj.loc[paras[0]:]
elif isinstance(paras[0], int):
outdata = dataobj.iloc[paras[0]:]
else:
raise Exception("type error {}".format(paras))
if isinstance(paras[1], str):
outdata = outdata.loc[:paras[1]]
elif isinstance(paras[1], int):
outdata = outdata.iloc[:paras[1]]
else:
raise Exception("type error {}".format(paras))
return outdata
def __call__(self, infiles, commands):
outdata = []
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
# 顺序处理
for command in commands:
tkey = list(command.keys())[0]
pdobj = self.funcmap[tkey](pdobj, command[tkey])
outdata.append(pdobj)
return outdata
class Train_split(object):
def __init__(self):
self.funcmap = {
# 一个数组栏,一个dataframe
"拆分": self.split_train_test,
}
def split_train_test(self, dataobj, paras):
outlist = []
if isinstance(paras[0], str):
outlist.append(dataobj.loc[:paras[0]])
if len(paras) > 1:
outlist.append(dataobj.loc[paras[0]:paras[1]])
outlist.append(dataobj.loc[paras[1]:])
else:
outlist.append(dataobj.loc[paras[0]:])
elif isinstance(paras[0], int):
outlist.append(dataobj.iloc[:paras[0]])
if len(paras) > 1:
outlist.append(dataobj.iloc[paras[0]:paras[1]])
outlist.append(dataobj.iloc[paras[1]:])
else:
outlist.append(dataobj.iloc[paras[0]:])
elif isinstance(paras[0], float):
tsplit = len(dataobj)
tsplit1 = int(tsplit * paras[0])
outlist.append(dataobj.iloc[:tsplit1])
if len(paras) > 1:
tsplit2 = int(tsplit * sum(paras))
outlist.append(dataobj.iloc[tsplit1:tsplit2])
outlist.append(dataobj.iloc[tsplit2:])
else:
outlist.append(dataobj.iloc[tsplit1:])
else:
raise Exception("type error {}".format(paras))
return outlist
def __call__(self, infiles, commands):
outdata = []
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
# 顺序处理
for command in commands:
tkey = list(command.keys())[0]
pdobj = self.funcmap[tkey](pdobj, command[tkey])
outdata.append(pdobj)
return outdata
class SequenceChara(object):
def __init__(self):
self.funcmap = {
"均值n": self.mean_n,
"标准差n": self.std_n,
"涨幅比n": self.ratio_n,
"回撤n": self.draw_n,
"最涨n": self.maxrise_n,
"夏普n": self.sharp_n,
"label_最大n": self.l_max_n,
"label_最小n": self.l_min_n,
"label_回撤n": self.l_draw_n,
"label_最涨n": self.l_maxrise_n,
}
def mean_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).mean()
return outdata
def std_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).std()
return outdata
def ratio_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).apply(lambda x: x[-1] / x[0])
return outdata
def draw_n(self, dataobj, n):
pricepd = dataobj.iloc[:, 0]
maxfallret = pd.Series(index=pricepd.index)
for i in range(0, len(dataobj) - n):
tmpsec = pricepd[i + 1:i + n + 1]
tmpmax = pricepd[i]
tmpmin = pricepd[i]
tmpdrawdown = [1.0]
for t in range(0, n):
if tmpsec[t] > tmpmax:
tmpmax = tmpsec[t]
tmpdrawdown.append(tmpdrawdown[-1])
elif tmpsec[t] <= tmpmin:
tmpmin = tmpsec[t]
tmpdrawdown.append(tmpmin / tmpmax)
else:
pass
maxfallret[i] = min(tmpdrawdown)
return maxfallret
def maxrise_n(self, dataobj, n):
pricepd = dataobj.iloc[:, 0]
maxraiseret = | pd.Series(index=pricepd.index) | pandas.Series |
import pandas as pd
import numpy as np
from scipy.sparse.linalg import svds
from skbio import OrdinationResults
from skbio.stats.composition import clr
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use("ggplot")
def apca(df):
"""Performs Aitchison PCA on a feature table.
Parameters
----------
df: pd.DataFrame
A numeric DataFrame whose rows are "features" and whose columns are
"samples."
Returns
-------
A 3-tuple (U, p, V) where:
U: pd.DataFrame
Feature loadings.
p: pd.DataFrame
Proportions of variance explained.
V: pd.DataFrame
Sample loadings.
"""
# do A-PCA
U, s, V = svds(clr(df), k=2)
V = V.T
# reverse (see SVDs docs)
U = np.flip(U, axis=1)
V = np.flip(V, axis=1)
s = s[::-1]
# Rename columns; we use "Axis 1", etc. to be consistent with the Qurro
# interface
pcs = min(V.shape)
cols = ["Axis {}".format(pc+1) for pc in range(pcs)]
# Make DataFrames from the feature (U) and sample (V) loadings
U = pd.DataFrame(U[:, :pcs], df.index, cols)
V = pd.DataFrame(V[:, :pcs], df.columns, cols)
# For clarity, rename top-left cell in both loading DataFrames
U.index.name = "FeatureID"
V.index.name = "SampleID"
# get prop. var. explained
p = s**2 / np.sum(s**2)
p = | pd.Series(p.T, index=cols) | pandas.Series |
import numpy as np
import pandas as pd
from collections import namedtuple
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.cluster import KMeans
from fcutils.maths.geometry import calc_distance_from_point
from fcutils.maths.geometry import calc_distance_between_points_in_a_vector_2d, calc_distance_between_points_2d
from fcutils.plotting.colors import salmon, colorMap, goldenrod, desaturate_color
from fcutils.maths.filtering import median_filter_1d
from behaviour.utilities.signals import get_times_signal_high_and_low
from analysis.dbase.tables import Session, Tracking, ProcessedMouse
"""
Bunch of utils to facilitated the loading and processing of locomotion data
"""
# --------------------------------- Fetchers --------------------------------- #
def fetch_tracking_processed(experiment = None, subexperiment = None,
mouse = None, injected = None, just_mouse=False):
if not just_mouse:
q = (Session * ProcessedMouse)
else:
q = (Session)
if experiment is not None:
q = q & f'exp_name="{experiment}"'
if subexperiment is not None:
q = q & f'subname="{subexperiment}"'
if mouse is not None:
q = q & f'mouse_id="{mouse}"'
if injected is not None:
q = q * Session.IPinjection & f'injected="{injected}"'
if len(q) == 1:
df = pd.DataFrame(q.fetch1())
else:
df = pd.DataFrame(q.fetch())
if df.empty:
raise ValueError("Could not fetch tracking data processed...")
else:
return df
def fetch_tracking(for_bp=True,
bp = None, bp1 = None, bp2 = None,
experiment = None, subexperiment = None,
mouse = None, injected = None):
q = (Session * Tracking)
if for_bp:
q = q * Tracking.BodyPartTracking
if bp is not None:
q = q & f"bp='{bp}'"
else:
q = q * Tracking.BodySegmentTracking
if bp1 is not None:
q = q & f"1='{bp1}'"
if bp2 is not None:
q = q & f"bp='{bp2}'"
if experiment is not None:
q = q & f'exp_name="{experiment}"'
if subexperiment is not None:
q = q & f'subname="{subexperiment}"'
if mouse is not None:
q = q & f'mouse_id="{mouse}"'
if injected is not None:
q = q * Session.IPinjection & f'injected="{injected}"'
if len(q) == 1:
return pd.DataFrame(q.fetch1())
else:
return pd.DataFrame(q.fetch())
# --------------------------------- Analysis --------------------------------- #
def get_frames_state(tracking):
if not len(tracking):
return tracking
# Standardise
try:
speed = preprocessing.scale(np.nan_to_num(tracking.speed.values))
angular_velocity = preprocessing.scale(np.nan_to_num(tracking.ang_vel.values))
except Exception as e:
speed = preprocessing.scale(np.nan_to_num(tracking.speed.values[0]))
angular_velocity = preprocessing.scale(np.nan_to_num(tracking.ang_vel.values[0]))
# Fit kmeans
dataset = pd.DataFrame(dict(speed=speed, angular_velocity=angular_velocity))
kmeans = KMeans(n_clusters = 10, init = 'k-means++', random_state = 42)
res = kmeans.fit(dataset)
# Get cluster and state
y_kmeans = kmeans.fit_predict(dataset)
tracking['cluster'] = y_kmeans
dataset['cluster'] = y_kmeans
# Get state from clusters
clusters_means = round(dataset.groupby('cluster').mean(),1)
right_clusters = clusters_means.loc[clusters_means.angular_velocity < -.5]
right_clusters_index = list(right_clusters.index.values)
left_clusters = clusters_means.loc[clusters_means.angular_velocity > .5]
left_clusters_index = list(left_clusters.index.values)
clusters = dict(
left_turn = 'left',
right_turn = 'right',
)
running_clusters = clusters_means.loc[
(clusters_means.angular_velocity > -.5) &
(clusters_means.angular_velocity < .5)].sort_values('speed')
for i, idx in enumerate(running_clusters.index.values):
clusters[f'locomotion_{i}'] = idx
clusters_lookup = {v:k for k,v in clusters.items()}
# Clean up states
y_kmeans = ['left' if k in left_clusters_index else
'right' if k in right_clusters_index else k
for k in y_kmeans]
state = [clusters_lookup[v] for v in y_kmeans]
tracking['state'] = state
return tracking
def get_when_in_center(tracking, center, radius):
xy = np.vstack([tracking.x, tracking.y])
dist = calc_distance_from_point(xy.T, center)
to_exclude = np.where(dist > radius)[0]
in_center = np.ones_like(tracking.x)
in_center[to_exclude] = 0
tracking['in_center'] = in_center
return tracking
def get_center_bouts(datasets):
all_bouts = {}
for dn, (dataset, datas) in enumerate(datasets.items()):
bts = dict(start=[], end=[], speed=[], orientation=[],
ang_vel=[], x=[], y=[],
duration=[], distance=[],
state=[], in_center=[], mouse=[],
abs_ang_displ=[], ang_displ=[])
for mouse,data in datas.items():
in_center = data.in_center.values
onsets, offsets = get_times_signal_high_and_low(in_center, th=.1)
if offsets[0] < onsets[0]:
offsets = offsets[1:]
# Loop over bouts
for onset, offset in zip(onsets, offsets):
onset += 1
if offset < onset: raise ValueError
elif offset - onset < 5: continue # skip bouts that are too short
bts['start'].append(onset)
bts['end'].append(offset)
bts['duration'].append(offset - onset)
bts['speed'].append(data.speed.values[onset:offset])
bts['distance'].append(np.sum(data.speed.values[onset:offset]))
bts['orientation'].append(data.orientation.values[onset:offset])
bts['ang_vel'].append(data.ang_vel.values[onset:offset])
bts['abs_ang_displ'].append(np.sum(np.abs(data.ang_vel.values[onset:offset])))
bts['ang_displ'].append(np.sum(data.ang_vel.values[onset:offset]))
bts['x'].append(data.x.values[onset:offset])
bts['y'].append(data.y.values[onset:offset])
bts['state'].append(data.state.values[onset:offset])
bts['in_center'].append(data.in_center.values[onset:offset])
bts['mouse'].append(mouse)
all_bouts[dataset] = | pd.DataFrame(bts) | pandas.DataFrame |
from typing import Dict
from typing import Union
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.transforms import ResampleWithDistributionTransform
DistributionDict = Dict[str, pd.DataFrame]
@pytest.fixture
def daily_exog_ts() -> Dict[str, Union[TSDataset, DistributionDict]]:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_2",
"target": [1] + 23 * [0] + [1] + 23 * [0],
}
)
df = pd.concat([df1, df2], ignore_index=True)
df_exog1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="D", periods=3),
"segment": "segment_1",
"regressor_exog": 2,
}
)
df_exog2 = pd.DataFrame(
{
"timestamp": | pd.date_range(start="2020-01-05", freq="D", periods=3) | pandas.date_range |
#
# Copyright 2021 Grupo de Sistemas Inteligentes, DIT, Universidad Politecnica de Madrid (UPM)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processing of the PL05 dataset.
URL:
http://www.cs.cornell.edu/people/pabo/movie-review-data/
REF:
<NAME> and <NAME>
Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales
Proceedings of ACL 2005.
"""
import os
import logging
import codecs
import pandas as pd
import numpy as np
from glob import glob
from gsitk.datasets import utils
from gsitk.datasets.datasets import Dataset
from gsitk.preprocess import normalize
logger = logging.getLogger(__name__)
class Pl05(Dataset):
def normalize_data(self):
dataset = | pd.DataFrame(columns=['id', 'text', 'polarity']) | pandas.DataFrame |
### preprocessing
"""
code is taken from
tunguz - Surprise Me 2!
https://www.kaggle.com/tunguz/surprise-me-2/code
"""
import glob, re
import numpy as np
import pandas as pd
from sklearn import *
from datetime import datetime
import matplotlib.pyplot as plt
data = {
'tra': pd.read_csv('../input/air_visit_data.csv'),
'as': pd.read_csv('../input/air_store_info.csv'),
'hs': pd.read_csv('../input/hpg_store_info.csv'),
'ar': pd.read_csv('../input/air_reserve.csv'),
'hr': pd.read_csv('../input/hpg_reserve.csv'),
'id': pd.read_csv('../input/store_id_relation.csv'),
'tes': | pd.read_csv('../input/sample_submission.csv') | pandas.read_csv |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename).drop_duplicates()
# remove rows with invalid values:
to_remove = pd.concat([df.loc[(df.bedrooms <= 0) & (df.bathrooms <= 0)],
df.loc[(df.sqft_living <= 0) | (df.sqft_lot <= 0) | (df.sqft_above <= 0)
| (df.sqft_basement < 0) | (df.sqft_living15 < 0)
| (df.sqft_lot15 < 0) | (df.price < 0) | (df.price > 5_000_000)
| (df.price.isnull())]]).drop_duplicates()
df.drop(to_remove.index, inplace=True)
# parse the date column:
df['date'] = | pd.to_datetime(df.date, errors='coerce') | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = | Panel({'Item1': df}) | pandas.core.panel.Panel |
# %% import packages
import numpy as np
import pandas as pd
import itertools
import warnings
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.stattools import adfuller
import statsmodels.api as sm
from pandas.api.types import CategoricalDtype
from scipy.stats import boxcox
def timeStampToDays(series: pd.Series) -> pd.Series:
"""
Convert a datetime series into float series with the number of days
Args:
series (pd.Series): input pandas series.
Returns:
result (pd.Series): pandas series with float of days.
"""
D = series.dt.components['days']
H = series.dt.components['hours']
M = series.dt.components['minutes']
result = D + (H / 24) + (M / (60 * 24))
return result
def sampleTimeSeries(series: pd.Series,
sampleInterval: str) -> pd.Series:
"""
Sample a pandas series using a sampling interval
Args:
series (pd.Series): input pandas datetime series.
sampleInterval (str): type of sampling required.
Raises:
ValueError: error in case of invalid sampling parameter.
Returns:
series (TYPE): Output sampled seried.
"""
if sampleInterval not in ['day', 'week', 'month', 'year']:
raise ValueError(f"""sampleInterval parameter: {sampleInterval} not a valid sample interval.
Choose between ['day', 'week', 'month', 'year']""")
if sampleInterval == 'day':
series = series.dt.strftime('%Y-%j')
elif sampleInterval == 'week':
series = series.dt.strftime('%Y-%U')
elif sampleInterval == 'month':
series = series.dt.strftime('%Y-%m')
elif sampleInterval == 'year':
series = series.dt.strftime('%Y')
return series
def groupPerWeek(df: pd.DataFrame,
timeVariable: str,
groupVariable: str,
groupType: str) -> pd.DataFrame:
"""
Perform a weekly groupby based on a datetime variable, applying a specific type of grouping
Args:
df (pd.DataFrame): input pandas dataframe.
timeVariable (str): column name corresponding to the time variable.
groupVariable (str): column name corresponding to the grouping variable.
groupType (str): type of grouping function.
Raises:
ValueError: error in case of invalid sampling parameter.
Returns:
df (TYPE): Output grouped DataFrame.
"""
if groupType not in ['count', 'sum']:
raise ValueError(f"""groupType parameter: {groupType} not a valid grouping function.
Choose between ['count', 'sum']""")
# convert to dataframe if a series
if isinstance(df, pd.Series):
df = pd.DataFrame([[df.index.values.T, df.values]],
columns=[timeVariable, groupVariable])
df['DatePeriod'] = pd.to_datetime(df[timeVariable]) - pd.to_timedelta(7, unit='d')
if groupType == 'count':
df = df.groupby([pd.Grouper(key=timeVariable,
freq='W-MON')])[groupVariable].size()
elif groupType == 'sum':
df = df.groupby([pd.Grouper(key=timeVariable,
freq='W-MON')])[groupVariable].sum()
df = df.sort_index()
return df
def groupPerMonth(df: pd.DataFrame,
timeVariable: str,
groupVariable: str,
groupType: str) -> pd.DataFrame:
"""
Perform a monthly groupby based on a datetime variable, applying a specific type of grouping
Args:
df (pd.DataFrame): input pandas dataframe.
timeVariable (str): column name corresponding to the time variable.
groupVariable (str): column name corresponding to the grouping variable.
groupType (str): type of grouping function.
Raises:
ValueError: error in case of invalid sampling parameter.
Returns:
df (pd.DataFrame): Output grouped DataFrame.
"""
if groupType not in ['count', 'sum']:
raise ValueError(f"""groupType parameter: {groupType} not a valid grouping function.
Choose between ['count', 'sum']""")
if isinstance(df, pd.Series): # convert to dataframe if a series
df = pd.DataFrame([[df.index.values.T, df.values]],
columns=[timeVariable, groupVariable])
# df['DatePeriod'] = pd.to_datetime(df[timeVariable]) - pd.to_timedelta(7, unit='d')
if groupType == 'count':
df = df.groupby([pd.Grouper(key=timeVariable, freq='M')])[groupVariable].size()
elif groupType == 'sum':
df = df.groupby([pd.Grouper(key=timeVariable, freq='M')])[groupVariable].sum()
df = df.sort_index()
return df
def groupPerWeekday(df: pd.DataFrame,
timeVariable: str,
groupVariable: str) -> pd.DataFrame:
"""
Perform a groupby per weekday based on a datetime variable, applying a specific type of grouping
Args:
df (pd.DataFrame): input pandas dataframe.
timeVariable (str): column name corresponding to the time variable.
groupVariable (str): column name corresponding to the grouping variable.
Returns:
D_grouped (pd.DataFrame): Output grouped DataFrame.
"""
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
cat_type = | CategoricalDtype(categories=cats, ordered=True) | pandas.api.types.CategoricalDtype |
"""
GridFrame -- subclass of wx.Frame. Contains grid and buttons to manipulate it.
GridBuilder -- data methods for GridFrame (add data to frame, save it, etc.)
"""
import wx
import pandas as pd
import numpy as np
from dialogs import drop_down_menus3 as drop_down_menus
from dialogs import pmag_widgets as pw
from dialogs import magic_grid3 as magic_grid
#from pmagpy.controlled_vocabularies3 import vocab
import pmagpy.contribution_builder as cb
class GridFrame(wx.Frame): # class GridFrame(wx.ScrolledWindow):
"""
make_magic
"""
def __init__(self, contribution, WD=None, frame_name="grid frame",
panel_name="grid panel", parent=None, exclude_cols=(),
huge=False, main_frame=None):
self.parent = parent
self.main_frame = main_frame
wx.GetDisplaySize()
title = 'Edit {} data'.format(panel_name)
super(GridFrame, self).__init__(parent=parent, id=wx.ID_ANY,
name=frame_name, title=title)
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
self.remove_cols_mode = False
self.deleteRowButton = None
self.selected_rows = set()
self.contribution = contribution
self.huge = huge
self.df_slice = None
self.exclude_cols = exclude_cols
self.error_frame = None
self.panel = wx.Panel(self, name=panel_name, size=wx.GetDisplaySize())
self.grid_type = str(panel_name)
dm = self.contribution.data_model.dm[self.grid_type]
dm['str_validations'] = dm['validations'].str.join(", ")
# these are the headers that are required no matter what for this datatype
self.reqd_headers = dm[dm['str_validations'].str.contains("required\(\)").fillna(False)].index
self.dm = dm
if self.parent:
self.Bind(wx.EVT_WINDOW_DESTROY, self.parent.Parent.on_close_grid_frame)
if self.grid_type == 'ages':
self.child_type = None
self.parent_type = None
else:
try:
child_ind = self.contribution.ancestry.index(self.grid_type) - 1
if child_ind < 0:
self.child_type = None
self.child_type = self.contribution.ancestry[child_ind]
parent_ind = self.contribution.ancestry.index(self.grid_type) + 1
if parent_ind >= len(self.contribution.ancestry):
self.parent_type = None
else:
self.parent_type = self.contribution.ancestry[parent_ind]
except ValueError:
self.child_type = None
self.parent_type = None
self.WD = WD
self.InitUI()
# remove 'level' column from age grid if present
if self.grid_type == 'ages':
try:
ind = self.grid.col_labels.index('level')
self.remove_col_label(col=ind)
except ValueError:
pass
# if grid is empty except for defaults, reset grid.changes
if self.grid_builder.current_grid_empty():
self.grid.changes = set()
del wait
## Initialization functions
def InitUI(self):
"""
initialize window
"""
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
if self.grid_type in self.contribution.tables:
dataframe = self.contribution.tables[self.grid_type]
else:
dataframe = None
self.grid_builder = GridBuilder(self.contribution, self.grid_type,
self.panel, parent_type=self.parent_type,
reqd_headers=self.reqd_headers,
exclude_cols=self.exclude_cols,
huge=self.huge)
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
## Column management buttons
self.add_cols_button = wx.Button(self.panel, label="Add additional columns",
name='add_cols_btn',
size=(170, 20))
self.Bind(wx.EVT_BUTTON, self.on_add_cols, self.add_cols_button)
self.remove_cols_button = wx.Button(self.panel, label="Remove columns",
name='remove_cols_btn',
size=(170, 20))
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
## Row management buttons
self.remove_row_button = wx.Button(self.panel, label="Remove last row",
name='remove_last_row_btn')
self.Bind(wx.EVT_BUTTON, self.on_remove_row, self.remove_row_button)
many_rows_box = wx.BoxSizer(wx.HORIZONTAL)
self.add_many_rows_button = wx.Button(self.panel, label="Add row(s)",
name='add_many_rows_btn')
self.rows_spin_ctrl = wx.SpinCtrl(self.panel, value='1', initial=1,
name='rows_spin_ctrl')
many_rows_box.Add(self.add_many_rows_button, flag=wx.ALIGN_CENTRE)
many_rows_box.Add(self.rows_spin_ctrl)
self.Bind(wx.EVT_BUTTON, self.on_add_rows, self.add_many_rows_button)
self.deleteRowButton = wx.Button(self.panel, id=-1,
label='Delete selected row(s)',
name='delete_row_btn')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_remove_row(event, False), self.deleteRowButton)
self.deleteRowButton.Disable()
# measurements table should not be able to add new rows
# that should be done elsewhere
if self.huge:
self.add_many_rows_button.Disable()
self.rows_spin_ctrl.Disable()
self.remove_row_button.Disable()
# can't remove cols (seg fault), but can add them
#self.add_cols_button.Disable()
self.remove_cols_button.Disable()
## Data management buttons
self.importButton = wx.Button(self.panel, id=-1,
label='Import MagIC-format file',
name='import_btn')
self.Bind(wx.EVT_BUTTON, self.onImport, self.importButton)
self.exitButton = wx.Button(self.panel, id=-1,
label='Save and close grid',
name='save_and_quit_btn')
self.Bind(wx.EVT_BUTTON, self.onSave, self.exitButton)
self.cancelButton = wx.Button(self.panel, id=-1, label='Cancel',
name='cancel_btn')
self.Bind(wx.EVT_BUTTON, self.onCancelButton, self.cancelButton)
self.Bind(wx.EVT_CLOSE, self.onCancelButton)
## Input/output buttons
self.copyButton = wx.Button(self.panel, id=-1,
label="Start copy mode",
name="copy_mode_btn")
self.Bind(wx.EVT_BUTTON, self.onCopyMode, self.copyButton)
self.selectAllButton = wx.Button(self.panel, id=-1,
label="Copy all cells",
name="select_all_btn")
self.Bind(wx.EVT_BUTTON, self.onSelectAll, self.selectAllButton)
self.copySelectionButton = wx.Button(self.panel, id=-1,
label="Copy selected cells",
name="copy_selection_btn")
self.Bind(wx.EVT_BUTTON, self.onCopySelection, self.copySelectionButton)
self.copySelectionButton.Disable()
## Help message and button
# button
self.toggle_help_btn = wx.Button(self.panel, id=-1, label="Show help",
name='toggle_help_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_help, self.toggle_help_btn)
# message
self.help_msg_boxsizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='help_msg_boxsizer'), wx.VERTICAL)
if self.grid_type == 'measurements':
self.default_msg_text = "Edit measurements here.\nIn general, measurements should be imported directly into Pmag GUI,\nwhich has protocols for converting many lab formats into the MagIC format.\nIf we are missing your particular lab format, please let us know: https://github.com/PmagPy/PmagPy/issues.\nThis grid is just meant for looking at your measurements and doing small edits.\nCurrently, you can't add/remove rows here. You can add columns and edit cell values."
else:
self.default_msg_text = 'Edit {} here.\nYou can add or remove both rows and columns, however required columns may not be deleted.\nControlled vocabularies are indicated by **, and will have drop-down-menus.\nSuggested vocabularies are indicated by ^^, and also have drop-down-menus.\nTo edit all values in a column, click the column header.\nYou can cut and paste a block of cells from an Excel-like file.\nJust click the top left cell and use command "v".'.format(self.grid_type)
txt = ''
if self.grid_type == 'locations':
txt = '\n\nNote: you can fill in location start/end latitude/longitude here.\nHowever, if you add sites in step 2, the program will calculate those values automatically,\nbased on site latitudes/logitudes.\nThese values will be written to your upload file.'
if self.grid_type == 'samples':
txt = "\n\nNote: you can fill in lithology, class, and type for each sample here.\nHowever, if the sample's class, lithology, and type are the same as its parent site,\nthose values will propagate down, and will be written to your sample file automatically."
if self.grid_type == 'specimens':
txt = "\n\nNote: you can fill in lithology, class, and type for each specimen here.\nHowever, if the specimen's class, lithology, and type are the same as its parent sample,\nthose values will propagate down, and will be written to your specimen file automatically."
if self.grid_type == 'ages':
txt = "\n\nNote: only ages for which you provide data will be written to your upload file."
self.default_msg_text += txt
self.msg_text = wx.StaticText(self.panel, label=self.default_msg_text,
style=wx.TE_CENTER, name='msg text')
self.help_msg_boxsizer.Add(self.msg_text)
self.help_msg_boxsizer.ShowItems(False)
## Code message and button
# button
self.toggle_codes_btn = wx.Button(self.panel, id=-1,
label="Show method codes",
name='toggle_codes_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_codes, self.toggle_codes_btn)
# message
self.code_msg_boxsizer = pw.MethodCodeDemystifier(self.panel, self.contribution.vocab)
self.code_msg_boxsizer.ShowItems(False)
## Add content to sizers
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
col_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Columns',
name='manage columns'), wx.VERTICAL)
row_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Rows',
name='manage rows'), wx.VERTICAL)
self.main_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Manage data',
name='manage data'), wx.VERTICAL)
input_output_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='In/Out',
name='manage in out'), wx.VERTICAL)
col_btn_vbox.Add(self.add_cols_button, flag=wx.ALL, border=5)
col_btn_vbox.Add(self.remove_cols_button, flag=wx.ALL, border=5)
row_btn_vbox.Add(many_rows_box, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.remove_row_button, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.deleteRowButton, flag=wx.ALL, border=5)
self.main_btn_vbox.Add(self.importButton, flag=wx.ALL, border=5)
self.main_btn_vbox.Add(self.exitButton, flag=wx.ALL, border=5)
self.main_btn_vbox.Add(self.cancelButton, flag=wx.ALL, border=5)
input_output_vbox.Add(self.copyButton, flag=wx.ALL, border=5)
input_output_vbox.Add(self.selectAllButton, flag=wx.ALL, border=5)
input_output_vbox.Add(self.copySelectionButton, flag=wx.ALL, border=5)
self.hbox.Add(col_btn_vbox)
self.hbox.Add(row_btn_vbox)
self.hbox.Add(self.main_btn_vbox)
self.hbox.Add(input_output_vbox)
#self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
#
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.panel.Bind(wx.EVT_TEXT_PASTE, self.do_fit)
# add actual data!
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
# fill in some default values
self.grid_builder.fill_defaults()
# set scrollbars
self.grid.set_scrollbars()
## this would be a way to prevent editing
## some cells in age grid.
## with multiple types of ages, though,
## this doesn't make much sense
#if self.grid_type == 'ages':
# attr = wx.grid.GridCellAttr()
# attr.SetReadOnly(True)
# self.grid.SetColAttr(1, attr)
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self.contribution, self.grid)
self.grid_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='grid container'), wx.VERTICAL)
self.grid_box.Add(self.grid, 1, flag=wx.ALL|wx.EXPAND, border=5)
# final layout, set size
self.main_sizer.Add(self.hbox, flag=wx.ALL|wx.ALIGN_CENTER,#|wx.SHAPED,
border=20)
self.main_sizer.Add(self.toggle_help_btn, 0,
flag=wx.BOTTOM|wx.ALIGN_CENTRE,#|wx.SHAPED,
border=5)
self.main_sizer.Add(self.help_msg_boxsizer, 0,
flag=wx.BOTTOM|wx.ALIGN_CENTRE,
border=10)
self.main_sizer.Add(self.toggle_codes_btn, 0,
flag=wx.BOTTOM|wx.ALIGN_CENTRE,#|wx.SHAPED,
border=5)
self.main_sizer.Add(self.code_msg_boxsizer, 0,
flag=wx.BOTTOM|wx.ALIGN_CENTRE,#|wx.SHAPED,
border=5)
self.main_sizer.Add(self.grid_box, 2, flag=wx.ALL|wx.ALIGN_CENTER|wx.EXPAND, border=10)
self.panel.SetSizer(self.main_sizer)
panel_sizer = wx.BoxSizer(wx.VERTICAL)
panel_sizer.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(panel_sizer)
panel_sizer.Fit(self)
## this keeps sizing correct if the user resizes the window manually
#self.Bind(wx.EVT_SIZE, self.do_fit)
# self.Centre()
self.Show()
def on_key_down(self, event):
"""
If user does command v,
re-size window in case pasting has changed the content size.
"""
keycode = event.GetKeyCode()
meta_down = event.MetaDown() or event.GetCmdDown()
if keycode == 86 and meta_down:
# treat it as if it were a wx.EVT_TEXT_SIZE
self.do_fit(event)
def do_fit(self, event, min_size=None):
"""
Re-fit the window to the size of the content.
"""
#self.grid.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_NEVER)
if event:
event.Skip()
self.main_sizer.Fit(self)
disp_size = wx.GetDisplaySize()
actual_size = self.GetSize()
# if there isn't enough room to display new content
# resize the frame
if disp_size[1] - 75 < actual_size[1]:
self.SetSize((actual_size[0], disp_size[1] * .95))
# make sure you adhere to a minimum size
if min_size:
actual_size = self.GetSize()
larger_width = max([actual_size[0], min_size[0]])
larger_height = max([actual_size[1], min_size[1]])
if larger_width > actual_size[0] or larger_height > actual_size[1]:
self.SetSize((larger_width, larger_height))
self.Centre()
# this forces a resize which works
s = self.GetSize()
self.SetSize((0,0))
self.SetSize(s)
def toggle_help(self, event, mode=None):
"""
Show/hide help message on help button click.
"""
# if mode == 'open', show no matter what.
# if mode == 'close', close. otherwise, change state
btn = self.toggle_help_btn
shown = self.help_msg_boxsizer.GetStaticBox().IsShown()
# if mode is specified, do that mode
if mode == 'open':
self.help_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide help')
elif mode == 'close':
self.help_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show help')
# otherwise, simply toggle states
else:
if shown:
self.help_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show help')
else:
self.help_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide help')
self.do_fit(None)
def toggle_codes(self, event):
"""
Show/hide method code explanation widget on button click
"""
btn = event.GetEventObject()
if btn.Label == 'Show method codes':
self.code_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide method codes')
else:
self.code_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show method codes')
self.do_fit(None)
def show_errors(self, event):
from dialogs import thellier_gui_dialogs
import os
error_file = os.path.join(self.WD, self.grid_type + "_errors.txt")
if not os.path.exists(error_file):
pw.simple_warning("No error file for this grid")
return
frame = thellier_gui_dialogs.MyForm(0, error_file)
frame.Show()
self.error_frame = frame
# frame should be destroyed when grid frame is
## Grid event methods
def remove_col_label(self, event=None, col=None):
"""
check to see if column is required
if it is not, delete it from grid
"""
if event:
col = event.GetCol()
if not col:
return
label = self.grid.GetColLabelValue(col)
if '**' in label:
label = label.strip('**')
elif '^^' in label:
label = label.strip('^^')
if label in self.reqd_headers:
pw.simple_warning("That header is required, and cannot be removed")
return False
else:
print('That header is not required:', label)
# remove column from wxPython grid
self.grid.remove_col(col)
# remove column from DataFrame if present
if self.grid_type in self.contribution.tables:
if label in self.contribution.tables[self.grid_type].df.columns:
del self.contribution.tables[self.grid_type].df[label]
# causes resize on each column header delete
# can leave this out if we want.....
self.main_sizer.Fit(self)
def on_add_cols(self, event):
"""
Show simple dialog that allows user to add a new column name
"""
col_labels = self.grid.col_labels
dia = pw.ChooseOne(self, yes="Add single columns", no="Add groups")
result1 = dia.ShowModal()
if result1 == wx.ID_CANCEL:
return
elif result1 == wx.ID_YES:
items = sorted([col_name for col_name in self.dm.index if col_name not in col_labels])
dia = pw.HeaderDialog(self, 'columns to add',
items1=list(items), groups=[])
dia.Centre()
result2 = dia.ShowModal()
else:
groups = self.dm['group'].unique()
dia = pw.HeaderDialog(self, 'groups to add',
items1=list(groups), groups=True)
dia.Centre()
result2 = dia.ShowModal()
new_headers = []
if result2 == 5100:
new_headers = dia.text_list
# if there is nothing to add, quit
if not new_headers:
return
if result1 == wx.ID_YES:
# add individual headers
errors = self.add_new_grid_headers(new_headers)
else:
# add header groups
errors = self.add_new_header_groups(new_headers)
if errors:
errors_str = ', '.join(errors)
pw.simple_warning('You are already using the following headers: {}\nSo they will not be added'.format(errors_str))
# problem: if widgets above the grid are too wide,
# the grid does not re-size when adding columns
# awkward solution (causes flashing):
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.Centre()
self.main_sizer.Fit(self)
#
self.grid.changes = set(range(self.grid.GetNumberRows()))
dia.Destroy()
def add_new_header_groups(self, groups):
"""
compile list of all headers belonging to all specified groups
eliminate all headers that are already included
add any req'd drop-down menus
return errors
"""
already_present = []
for group in groups:
col_names = self.dm[self.dm['group'] == group].index
for col in col_names:
if col not in self.grid.col_labels:
col_number = self.grid.add_col(col)
# add to appropriate headers list
# add drop down menus for user-added column
if col in self.contribution.vocab.vocabularies:
self.drop_down_menu.add_drop_down(col_number, col)
elif col in self.contribution.vocab.suggested:
self.drop_down_menu.add_drop_down(col_number, col)
elif col in ['specimen', 'sample', 'site', 'location',
'specimens', 'samples', 'sites']:
self.drop_down_menu.add_drop_down(col_number, col)
elif col == 'experiments':
self.drop_down_menu.add_drop_down(col_number, col)
if col == "method_codes":
self.drop_down_menu.add_method_drop_down(col_number, col)
else:
already_present.append(col)
return already_present
def add_new_grid_headers(self, new_headers):
"""
Add in all user-added headers.
If those new headers depend on other headers,
add the other headers too.
"""
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
# add drop down menus for user-added column
if name in self.contribution.vocab.vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in self.contribution.vocab.suggested:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in ['specimen', 'sample', 'site',
'specimens', 'samples', 'sites']:
self.drop_down_menu.add_drop_down(col_number, name)
elif name == 'experiments':
self.drop_down_menu.add_drop_down(col_number, name)
if name == "method_codes":
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present
def on_remove_cols(self, event):
"""
enter 'remove columns' mode
"""
# open the help message
self.toggle_help(event=None, mode='open')
# first unselect any selected cols/cells
self.remove_cols_mode = True
self.grid.ClearSelection()
self.remove_cols_button.SetLabel("end delete column mode")
# change button to exit the delete columns mode
self.Unbind(wx.EVT_BUTTON, self.remove_cols_button)
self.Bind(wx.EVT_BUTTON, self.exit_col_remove_mode, self.remove_cols_button)
# then disable all other buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Disable()
# then make some visual changes
self.msg_text.SetLabel("Remove grid columns: click on a column header to delete it.\nRequired headers for {} may not be deleted.".format(self.grid_type))
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DOUBLE_BORDER)
self.grid.Refresh()
self.main_sizer.Fit(self) # might not need this one
self.grid.changes = set(range(self.grid.GetNumberRows()))
def on_add_rows(self, event):
"""
add rows to grid
"""
num_rows = self.rows_spin_ctrl.GetValue()
#last_row = self.grid.GetNumberRows()
for row in range(num_rows):
self.grid.add_row()
#if not self.grid.changes:
# self.grid.changes = set([])
#self.grid.changes.add(last_row)
#last_row += 1
self.main_sizer.Fit(self)
def on_remove_row(self, event, row_num=-1):
"""
Remove specified grid row.
If no row number is given, remove the last row.
"""
text = "Are you sure? If you select delete you won't be able to retrieve these rows..."
dia = pw.ChooseOne(self, "Yes, delete rows", "Leave rows for now", text)
dia.Centre()
result = dia.ShowModal()
if result == wx.ID_NO:
return
default = (255, 255, 255, 255)
if row_num == -1:
# unhighlight any selected rows:
for row in self.selected_rows:
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
row_num = self.grid.GetNumberRows() - 1
self.deleteRowButton.Disable()
self.selected_rows = {row_num}
# remove row(s) from the contribution
df = self.contribution.tables[self.grid_type].df
row_nums = list(range(len(df)))
df = df.iloc[[i for i in row_nums if i not in self.selected_rows]]
self.contribution.tables[self.grid_type].df = df
# now remove row(s) from grid
# delete rows, adjusting the row # appropriately as you delete
for num, row in enumerate(self.selected_rows):
row -= num
if row < 0:
row = 0
self.grid.remove_row(row)
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
# reset the grid
self.selected_rows = set()
self.deleteRowButton.Disable()
self.grid.Refresh()
self.main_sizer.Fit(self)
def exit_col_remove_mode(self, event):
"""
go back from 'remove cols' mode to normal
"""
# close help messge
self.toggle_help(event=None, mode='close')
# update mode
self.remove_cols_mode = False
# re-enable all buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Enable()
# unbind grid click for deletion
self.grid.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
# undo visual cues
self.grid.SetWindowStyle(wx.DEFAULT)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DEFAULT)
self.msg_text.SetLabel(self.default_msg_text)
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
# re-bind self.remove_cols_button
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
self.remove_cols_button.SetLabel("Remove columns")
def onSelectRow(self, event):
"""
Highlight or unhighlight a row for possible deletion.
"""
grid = self.grid
row = event.Row
default = (255, 255, 255, 255)
highlight = (191, 216, 216, 255)
cell_color = grid.GetCellBackgroundColour(row, 0)
attr = wx.grid.GridCellAttr()
if cell_color == default:
attr.SetBackgroundColour(highlight)
self.selected_rows.add(row)
else:
attr.SetBackgroundColour(default)
try:
self.selected_rows.remove(row)
except KeyError:
pass
if self.selected_rows and self.deleteRowButton:
self.deleteRowButton.Enable()
else:
self.deleteRowButton.Disable()
grid.SetRowAttr(row, attr)
grid.Refresh()
def onLeftClickLabel(self, event):
"""
When user clicks on a grid label,
determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values,
or highlight a row for deletion).
"""
if event.Col == -1 and event.Row == -1:
pass
if event.Row < 0:
if self.remove_cols_mode:
self.remove_col_label(event)
else:
self.drop_down_menu.on_label_click(event)
else:
if event.Col < 0 and self.grid_type != 'age':
self.onSelectRow(event)
## Meta buttons -- cancel & save functions
def onImport(self, event):
"""
Import a MagIC-format file
"""
if self.grid.changes:
print("-W- Your changes will be overwritten...")
wind = pw.ChooseOne(self, "Import file anyway", "Save grid first",
"-W- Your grid has unsaved changes which will be overwritten if you import a file now...")
wind.Centre()
res = wind.ShowModal()
# save grid first:
if res == wx.ID_NO:
self.onSave(None, alert=True, destroy=False)
# reset self.changes
self.grid.changes = set()
openFileDialog = wx.FileDialog(self, "Open MagIC-format file", self.WD, "",
"MagIC file|*.*", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
result = openFileDialog.ShowModal()
if result == wx.ID_OK:
# get filename
filename = openFileDialog.GetPath()
# make sure the dtype is correct
f = open(filename)
line = f.readline()
if line.startswith("tab"):
delim, dtype = line.split("\t")
else:
delim, dtype = line.split("")
f.close()
dtype = dtype.strip()
if (dtype != self.grid_type) and (dtype + "s" != self.grid_type):
text = "You are currently editing the {} grid, but you are trying to import a {} file.\nPlease open the {} grid and then re-try this import.".format(self.grid_type, dtype, dtype)
pw.simple_warning(text)
return
# grab old data for concatenation
if self.grid_type in self.contribution.tables:
old_df_container = self.contribution.tables[self.grid_type]
else:
old_df_container = None
old_col_names = self.grid.col_labels
# read in new file and update contribution
df_container = cb.MagicDataFrame(filename, dmodel=self.dm,
columns=old_col_names)
# concatenate if possible
if not isinstance(old_df_container, type(None)):
df_container.df = pd.concat([old_df_container.df, df_container.df],
axis=0, sort=True)
self.contribution.tables[df_container.dtype] = df_container
self.grid_builder = GridBuilder(self.contribution, self.grid_type,
self.panel, parent_type=self.parent_type,
reqd_headers=self.reqd_headers)
# delete old grid
self.grid_box.Hide(0)
self.grid_box.Remove(0)
# create new, updated grid
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
# add data to new grid
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
# add new grid to sizer and fit everything
self.grid_box.Add(self.grid, flag=wx.ALL, border=5)
self.main_sizer.Fit(self)
self.Centre()
# add any needed drop-down-menus
self.drop_down_menu = drop_down_menus.Menus(self.grid_type,
self.contribution,
self.grid)
# done!
return
def onCancelButton(self, event):
"""
Quit grid with warning if unsaved changes present
"""
if self.grid.changes:
dlg1 = wx.MessageDialog(self, caption="Message:",
message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ",
style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
dlg1.Destroy()
self.Destroy()
else:
self.Destroy()
if self.main_frame:
self.main_frame.Show()
self.main_frame.Raise()
def onSave(self, event, alert=False, destroy=True):
"""
Save grid data
"""
# tidy up drop_down menu
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# then save actual data
self.grid_builder.save_grid_data()
if not event and not alert:
return
# then alert user
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
if destroy:
self.Destroy()
### Custom copy/paste functionality
def onCopyMode(self, event):
# first save all grid data
self.grid_builder.save_grid_data()
self.drop_down_menu.clean_up()
# enable and un-grey the exit copy mode button
self.copyButton.SetLabel('End copy mode')
self.Bind(wx.EVT_BUTTON, self.onEndCopyMode, self.copyButton)
# disable and grey out other buttons
btn_list = [self.add_cols_button, self.remove_cols_button,
self.remove_row_button, self.add_many_rows_button,
self.importButton, self.cancelButton, self.exitButton]
for btn in btn_list:
btn.Disable()
self.copySelectionButton.Enable()
# next, undo useless bindings (mostly for drop-down-menus)
# this one works:
self.drop_down_menu.EndUI()
# these ones don't work: (it doesn't matter which one you bind to)
#self.grid.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
#self.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
#self.panel.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
# this works hack-like:
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.do_nothing)
# this one is irrelevant (it just deals with resizing)
#self.Unbind(wx.EVT_KEY_DOWN)
#
# make grid uneditable
self.grid.EnableEditing(False)
self.Refresh()
# change and show help message
copy_text = """You are now in 'copy' mode. To return to 'editing' mode, click 'End copy mode'.
To copy the entire grid, click the 'Copy all cells' button.
To copy a selection of the grid, you must first make a selection by either clicking and dragging, or using Shift click.
Once you have your selection, click the 'Copy selected cells' button, or hit 'Ctrl c'.
You may then paste into a text document or spreadsheet!
"""
self.toggle_help_btn.SetLabel('Hide help')
self.msg_text.SetLabel(copy_text)
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
self.help_msg_boxsizer.ShowItems(True)
self.do_fit(None)
# then bind for selecting cells in multiple columns
self.grid.Bind(wx.grid.EVT_GRID_RANGE_SELECT, self.onDragSelection)
# bind Cmd c for copy
self.grid.Bind(wx.EVT_KEY_DOWN, self.onKey)
# done!
def onDragSelection(self, event):
"""
Set self.df_slice based on user's selection
"""
if self.grid.GetSelectionBlockTopLeft():
#top_left = self.grid.GetSelectionBlockTopLeft()
#bottom_right = self.grid.GetSelectionBlockBottomRight()
# awkward hack to fix wxPhoenix memory problem, (Github issue #221)
bottom_right = eval(repr(self.grid.GetSelectionBlockBottomRight()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", ""))
top_left = eval(repr(self.grid.GetSelectionBlockTopLeft()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", ""))
#
top_left = top_left[0]
bottom_right = bottom_right[0]
else:
return
# GetSelectionBlock returns (row, col)
min_col = top_left[1]
max_col = bottom_right[1]
min_row = top_left[0]
max_row = bottom_right[0]
self.df_slice = self.contribution.tables[self.grid_type].df.iloc[min_row:max_row+1, min_col:max_col+1]
def do_nothing(self, event):
"""
Dummy method to prevent default header-click behavior
while in copy mode
"""
pass
def onKey(self, event):
"""
Copy selection if control down and 'c'
"""
if event.CmdDown() or event.ControlDown():
if event.GetKeyCode() == 67:
self.onCopySelection(None)
def onSelectAll(self, event):
"""
Selects full grid and copies it to the Clipboard
"""
# do clean up here!!!
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# save all grid data
self.grid_builder.save_grid_data()
df = self.contribution.tables[self.grid_type].df
# write df to clipboard for pasting
# header arg determines whether columns are taken
# index arg determines whether index is taken
pd.DataFrame.to_clipboard(df, header=False, index=False)
print('-I- You have copied all cells! You may paste them into a text document or spreadsheet using Command v.')
# done!
def onCopySelection(self, event):
"""
Copies self.df_slice to the Clipboard if slice exists
"""
if self.df_slice is not None:
| pd.DataFrame.to_clipboard(self.df_slice, header=False, index=False) | pandas.DataFrame.to_clipboard |
# coding=utf-8
"""
PAT - the name of the current project.
instrument.py - the name of the new file which you specify in the New File
dialog box during the file creation.
Hossein - the login name of the current user.
6 / 15 / 18 - the current system date.
8: 03 AM - the current system time.
PyCharm - the name of the IDE in which the file will be created.
"""
import pandas
import datetime
import pytz
import logging
from requests import exceptions
from price_fetcher.bigquery import GoogleQuery
from price_fetcher.stock_api import Iex
logger = logging
logging.basicConfig(filename='ERRORS.log', level=logging.ERROR)
class Instrument:
"""
base class for Stocks
"""
def __init__(self, name):
self.ticker = name
self._stats = None
self._financials = None
self._news = None
self._price = None
self._score = None
self._iex = Iex('iexfinance', self.ticker)
self._price_history = None
self.gquery = GoogleQuery(ticker=name, dataset_id='my_dataset', table_id='live_' + name)
self.latest_info = {'Time': None, 'Ticker': name, 'Price': None, 'Volume': None}
self.changed = True
self._timezone = 'America/Chicago'
@property
def timezone(self):
"""
the time zone of the user
:return: str time zone
"""
return self._timezone
@timezone.setter
def timezone(self, value):
self._timezone = value
@staticmethod
def _append(df, time, price, volume):
new_row = [{'Time': time, 'Price': price, 'Volume': volume}]
new_df = | pandas.DataFrame(new_row) | pandas.DataFrame |
from typing import List
import pandas as pd
import plotly.graph_objs as go
from dash import Dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from dash.dependencies import Input, Output
GOOGLE_SHEETS_URL = "https://docs.google.com/spreadsheets/d/e/{}&single=true&output=csv"
SCHEDULE_URL = "2PACX-1vQys0oYM-7DvOA2QwVzeyjmYLX_jBv39gahspHoiWvAlqbms3U-yezehPPHFUD_kg/pub?gid=1056981259"
POINTS_URL = "2PACX-1vQys0oYM-7DvOA2QwVzeyjmYLX_jBv39gahspHoiWvAlqbms3U-yezehPPHFUD_kg/pub?gid=131097540"
WEEK_COL = "Week"
PLAYER_COL = "Player"
AGAINST_COL = "Against"
POINTS_COL = "Points"
RANK_COL = "Rank"
COL_JOIN = "{} {}"
CLOSE_MATCH_DIFF = 10
COLORS = [
"#EC7063",
"#AF7AC5",
"#5DADE2",
"#48C9B0",
"#F9E79F",
"#E59866",
"#F06292",
"#58D68D",
"#AED6F1",
"#F8BBD0",
]
def determine_points_against(
points_data: pd.DataFrame,
schedule: pd.DataFrame,
id_col: str,
player_col: str,
against_col: str,
points_col: str,
) -> pd.DataFrame:
"""Determine points against.
Args:
points_wide: DataFrame, weekly points data
schedule: DataFrame, schedule information
id_col: str, name of identifier column
player_col: str, player column name
against_col: str, vs column name
points_col: str, points column name
Returns:
points_against: DataFrame, weekly points against
"""
against = | pd.merge(points_data, schedule, on=[id_col, player_col], how="left") | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 2 23:17:22 2017
@author: roshi
"""
import pandas as pd
import matplotlib.pyplot as plt
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from app import app
data = | pd.read_csv('./data/youth_tobacco_analysis.csv') | pandas.read_csv |
import pandas as pd
import pyomo.environ as pe
import os
import shutil
class invsys:
def __init__(self,inp_folder='',dshed_cost=1000000,rshed_cost=500,vmin=0.8,vmax=1.2,sbase=100,ref_bus=0):
"""Initialise the investment problem.
:param str inp_folder: The input directory for the data. It expects to find several CSV files detailing the system input data (Default current folder)
:param float dshed_cost: Demand Shedding Price (Default 1000000)
:param float rshed_cost: Renewable Shedding Price (Default 500)
:param float vmin: Minimum node voltage (Default 0.8)
:param float vmax: Maximum node voltage (Default 1.2)
:param float sbase: Base Apparent Power (default 100 MVA)
:param int ref_bus: Reference node (Default 0)
:Example:
>>> import pyeplan
>>> sys_inv = pyeplan.invsys("3bus_inv")
"""
self.cgen = pd.read_csv(inp_folder+os.sep+'cgen_dist.csv')
self.egen = pd.read_csv(inp_folder+os.sep+'egen_dist.csv')
self.cren = pd.read_csv(inp_folder+os.sep+'cren_dist.csv')
self.eren = pd.read_csv(inp_folder+os.sep+'eren_dist.csv')
self.clin = pd.read_csv(inp_folder+os.sep+'elin_dist.csv')
self.elin = pd.read_csv(inp_folder+os.sep+'elin_dist.csv')
self.pdem = pd.read_csv(inp_folder+os.sep+'pdem_dist.csv')
self.qdem = pd.read_csv(inp_folder+os.sep+'qdem_dist.csv')
self.pren = pd.read_csv(inp_folder+os.sep+'pren_dist.csv')
self.qren = pd.read_csv(inp_folder+os.sep+'qren_dist.csv')
self.ncg = len(self.cgen)
self.neg = len(self.egen)
self.ncr = len(self.cren)
self.ner = len(self.eren)
self.ncl = len(self.clin)
self.nel = len(self.elin)
self.nbb = self.pdem.shape[1]
self.ntt = self.pdem.shape[0]
self.cds = dshed_cost
self.crs = rshed_cost
self.sb = sbase
self.vmin = vmin
self.vmax = vmax
self.inp_folder = inp_folder
def solve(self,solver='cbc',network=True,commit=True):
"""Solve the investment problem.
:param str solver: the solver to be used (Default is 'cbc')
:param bool network: Include the network constraints
:param bool commit: ???
:returns: ???
:rtype: int
:Example:
>>> import pyeplan
>>> sys_inv = pyeplan.invsys("3bus_inv")
>>> sys_inv.solve(outdir='res_inv')
"""
#Define the Model
m = pe.ConcreteModel()
#Define the Sets
m.cg = pe.Set(initialize=list(range(self.ncg)),ordered=True)
m.eg = pe.Set(initialize=list(range(self.neg)),ordered=True)
m.cr = pe.Set(initialize=list(range(self.ncr)),ordered=True)
m.er = pe.Set(initialize=list(range(self.ner)),ordered=True)
m.cl = pe.Set(initialize=list(range(self.ncl)),ordered=True)
m.el = pe.Set(initialize=list(range(self.nel)),ordered=True)
m.bb = pe.Set(initialize=list(range(self.nbb)),ordered=True)
m.tt = pe.Set(initialize=list(range(self.ntt)),ordered=True)
#Define Variables
#Objective Function
m.z = pe.Var()
#Active and Reactive Power Generations (Conventional)
m.pcg = pe.Var(m.cg,m.tt,within=pe.NonNegativeReals)
m.peg = pe.Var(m.eg,m.tt,within=pe.NonNegativeReals)
m.qcg = pe.Var(m.cg,m.tt,within=pe.NonNegativeReals)
m.qeg = pe.Var(m.eg,m.tt,within=pe.NonNegativeReals)
#Active and Reactive Power Generations (Renewable)
m.pcr = pe.Var(m.cr,m.tt,within=pe.NonNegativeReals)
m.per = pe.Var(m.er,m.tt,within=pe.NonNegativeReals)
m.qcr = pe.Var(m.cr,m.tt,within=pe.Reals)
m.qer = pe.Var(m.er,m.tt,within=pe.Reals)
#Demand and Renewable Shedding
m.pds = pe.Var(m.bb,m.tt,within=pe.Binary)
m.prs = pe.Var(m.bb,m.tt,within=pe.NonNegativeReals)
#Voltage Magnitude
m.vol = pe.Var(m.bb,m.tt,within=pe.Reals,bounds=(self.vmin,self.vmax))
#Active and Reactive Line Flows
m.pcl = pe.Var(m.cl,m.tt,within=pe.Reals) #Active Power
m.pel = pe.Var(m.el,m.tt,within=pe.Reals) #Active Power
m.qcl = pe.Var(m.cl,m.tt,within=pe.Reals) #Reactive Power
m.qel = pe.Var(m.el,m.tt,within=pe.Reals) #Reactive Power
#Commitment Status
if commit:
m.cu = pe.Var(m.cg,m.tt,within=pe.Binary)
m.eu = pe.Var(m.eg,m.tt,within=pe.Binary)
else:
m.cu = pe.Var(m.cg,m.tt,within=pe.NonNegativeReals,bounds=(0,1))
m.eu = pe.Var(m.eg,m.tt,within=pe.NonNegativeReals,bounds=(0,1))
#Investment Status (Conventional)
m.xg = pe.Var(m.cg,within=pe.Binary)
#Investment Status (Renewable)
m.xr = pe.Var(m.cr,within=pe.Binary)
#Investment Status (Line)
m.xl = pe.Var(m.cr,within=pe.Binary)
#Objective Function
def obj_rule(m):
return m.z
m.obj = pe.Objective(rule=obj_rule)
#Definition Cost
def cost_def_rule(m):
if commit:
return m.z == sum(self.cgen['icost'][cg]*m.xg[cg] for cg in m.cg) + \
sum(self.cren['icost'][cr]*m.xr[cr] for cr in m.cr) + \
sum(self.clin['icost'][cl]*m.xl[cl] for cl in m.cl) + \
sum(self.cgen['scost'][cg]*m.cu[cg,tt] for cg in m.cg for tt in m.tt) + \
sum(self.egen['scost'][eg]*m.cu[eg,tt] for eg in m.eg for tt in m.tt) - \
sum(self.cgen['scost'][cg]*m.cu[cg,tt-1] for cg in m.cg for tt in m.tt if tt>1) - \
sum(self.egen['scost'][eg]*m.eu[eg,tt-1] for eg in m.eg for tt in m.tt if tt>1) + \
self.sb*(sum(self.cgen['ocost'][cg]*m.pcg[cg,tt] for cg in m.cg for tt in m.tt) + \
sum(self.egen['ocost'][eg]*m.peg[eg,tt] for eg in m.eg for tt in m.tt) + \
sum(self.cren['ocost'][cr]*m.pcr[cr,tt] for cr in m.cr for tt in m.tt) + \
sum(self.eren['ocost'][er]*m.per[er,tt] for er in m.er for tt in m.tt) + \
sum(self.cds*self.pdem.iloc[tt,bb]*m.pds[bb,tt] for bb in m.bb for tt in m.tt) + \
sum(self.crs*m.prs[bb,tt] for bb in m.bb for tt in m.tt))
else:
return m.z == sum(self.cgen['icost'][cg]*m.xg[cg] for cg in m.cg) + \
sum(self.cren['icost'][cr]*m.xr[cr] for cr in m.cr) + \
sum(self.clin['icost'][cl]*m.xl[cl] for cl in m.cl) + \
self.sb*(sum(self.cgen['ocost'][cg]*m.pcg[cg,tt] for cg in m.cg for tt in m.tt) + \
sum(self.egen['ocost'][eg]*m.peg[eg,tt] for eg in m.eg for tt in m.tt) + \
sum(self.cren['ocost'][cr]*m.pcr[cr,tt] for cr in m.cr for tt in m.tt) + \
sum(self.eren['ocost'][er]*m.per[er,tt] for er in m.er for tt in m.tt) + \
sum(self.cds*self.pdem.iloc[tt,bb]*m.pds[bb,tt] for bb in m.bb for tt in m.tt) + \
sum(self.crs*m.prs[bb,tt] for bb in m.bb for tt in m.tt))
m.cost_def = pe.Constraint(rule=cost_def_rule)
#Active Energy Balance
def act_bal_rule(m,bb,tt):
return sum(m.pcg[cg,tt] for cg in m.cg if self.cgen['bus'][cg] == bb) + \
sum(m.peg[eg,tt] for eg in m.eg if self.egen['bus'][eg] == bb) + \
sum(m.pcr[cr,tt] for cr in m.cr if self.cren['bus'][cr] == bb) + \
sum(m.per[er,tt] for er in m.er if self.eren['bus'][er] == bb) + \
sum(m.pcl[cl,tt] for cl in m.cl if self.clin['to'][cl] == bb) + \
sum(m.pel[el,tt] for el in m.el if self.elin['to'][el] == bb) == \
sum(m.pcl[cl,tt] for cl in m.cl if self.clin['from'][cl] == bb) + \
sum(m.pel[el,tt] for el in m.el if self.elin['from'][el] == bb) + \
self.pdem.iloc[tt,bb]*(1 - m.pds[bb,tt])
m.act_bal = pe.Constraint(m.bb, m.tt, rule=act_bal_rule)
#Reactive Energy Balance
def rea_bal_rule(m,bb,tt):
return sum(m.qcg[cg,tt] for cg in m.cg if self.cgen['bus'][cg] == bb) + \
sum(m.qeg[eg,tt] for eg in m.eg if self.egen['bus'][eg] == bb) + \
sum(m.qcr[cr,tt] for cr in m.cr if self.cren['bus'][cr] == bb) + \
sum(m.qer[er,tt] for er in m.er if self.eren['bus'][er] == bb) + \
sum(m.qcl[cl,tt] for cl in m.cl if self.clin['to'][cl] == bb) + \
sum(m.qel[el,tt] for el in m.el if self.elin['to'][el] == bb) == \
sum(m.qcl[cl,tt] for cl in m.cl if self.clin['from'][cl] == bb) + \
sum(m.qel[el,tt] for el in m.el if self.elin['from'][el] == bb) + \
self.qdem.iloc[tt,bb]*(1 - m.pds[bb,tt])
m.rea_bal = pe.Constraint(m.bb, m.tt, rule=rea_bal_rule)
#Minimum Active Generation (Conventional)
def min_act_cgen_rule(m,cg,tt):
return m.pcg[cg,tt] >= m.cu[cg,tt]*self.cgen['pmin'][cg]
m.min_act_cgen = pe.Constraint(m.cg, m.tt, rule=min_act_cgen_rule)
def min_act_egen_rule(m,eg,tt):
return m.peg[eg,tt] >= m.eu[eg,tt]*self.egen['pmin'][eg]
m.min_act_egen = pe.Constraint(m.eg, m.tt, rule=min_act_egen_rule)
#Minimum Active Generation (Renewable)
def min_act_cren_rule(m,cr,tt):
return m.pcr[cr,tt] >= self.cren['pmin'][cr]
m.min_act_cren = pe.Constraint(m.cg, m.tt, rule=min_act_cren_rule)
def min_act_eren_rule(m,er,tt):
return m.per[er,tt] >= self.eren['pmin'][er]
m.min_act_eren = pe.Constraint(m.eg, m.tt, rule=min_act_eren_rule)
#Maximum Active Generation (Conventional)
def max_act_cgen_rule(m,cg,tt):
return m.pcg[cg,tt] <= m.cu[cg,tt]*self.cgen['pmax'][cg]
m.max_act_cgen = pe.Constraint(m.cg, m.tt, rule=max_act_cgen_rule)
def max_act_egen_rule(m,eg,tt):
return m.peg[eg,tt] <= m.eu[eg,tt]*self.egen['pmax'][eg]
m.max_act_egen = pe.Constraint(m.eg, m.tt, rule=max_act_egen_rule)
#Maximum Active Generation (Renewable)
def max_act_cren_rule(m,cr,tt):
return m.pcr[cr,tt] <= m.xr[cr]*self.cren['pmax'][cr]*sum(self.pren.iloc[tt,bb] for bb in m.bb if self.cren['bus'][cr] == bb)
m.max_act_cren = pe.Constraint(m.cr, m.tt, rule=max_act_cren_rule)
def max_act_eren_rule(m,er,tt):
return m.per[er,tt] <= self.eren['pmax'][er]*sum(self.pren.iloc[tt,bb] for bb in m.bb if self.eren['bus'][er] == bb)
m.max_act_eren = pe.Constraint(m.er, m.tt, rule=max_act_eren_rule)
#Minimum Reactive Generation (Conventional)
def min_rea_cgen_rule(m,cg,tt):
return m.qcg[cg,tt] >= m.cu[cg,tt]*self.cgen['qmin'][cg]
m.min_rea_cgen = pe.Constraint(m.cg, m.tt, rule=min_rea_cgen_rule)
def min_rea_egen_rule(m,eg,tt):
return m.qeg[eg,tt] >= m.eu[eg,tt]*self.egen['qmin'][eg]
m.min_rea_egen = pe.Constraint(m.eg, m.tt, rule=min_rea_egen_rule)
#Minimum Reactive Generation (Renewable)
def min_rea_cren_rule(m,cr,tt):
return m.qcr[cr,tt] >= m.xr[cr]*self.cren['qmin'][cr]
m.min_rea_cren = pe.Constraint(m.cr, m.tt, rule=min_rea_cren_rule)
def min_rea_eren_rule(m,er,tt):
return m.qer[er,tt] >= self.eren['qmin'][er]
m.min_rea_eren = pe.Constraint(m.er, m.tt, rule=min_rea_eren_rule)
#Maximum Reactive Generation (Conventional)
def max_rea_cgen_rule(m,cg,tt):
return m.qcg[cg,tt] <= m.cu[cg,tt]*self.cgen['qmax'][cg]
m.max_rea_cgen = pe.Constraint(m.cg, m.tt, rule=max_rea_cgen_rule)
def max_rea_egen_rule(m,eg,tt):
return m.qeg[eg,tt] <= m.eu[eg,tt]*self.egen['qmax'][eg]
m.max_rea_egen = pe.Constraint(m.eg, m.tt, rule=max_rea_egen_rule)
#Maximum Reactive Generation (Renewable)
def max_rea_cren_rule(m,cr,tt):
return m.qcr[cr,tt] <= m.xr[cr]*self.cren['qmax'][cr]
m.max_rea_cren = pe.Constraint(m.cr, m.tt, rule=max_rea_cren_rule)
def max_rea_eren_rule(m,er,tt):
return m.qer[er,tt] <= self.eren['qmax'][er]
m.max_rea_eren = pe.Constraint(m.er, m.tt, rule=max_rea_eren_rule)
#Maximum Renewable Shedding
def max_shed_rule(m,bb,tt):
return m.prs[bb,tt] <= (sum(m.xr[cr]*self.cren['pmax'][cr]*sum(self.pren.iloc[tt,bb] for bb in m.bb if self.cren['bus'][cr] == bb) for cr in m.cr) + \
sum(self.eren['pmax'][er]*sum(self.pren.iloc[tt,bb] for bb in m.bb if self.eren['bus'][er] == bb) for er in m.er)) - \
(sum(m.pcr[cr,tt] for cr in m.cr if self.cren['bus'][cr] == bb) + \
sum(m.per[er,tt] for er in m.er if self.eren['bus'][er] == bb))
m.max_shed = pe.Constraint(m.bb, m.tt, rule=max_shed_rule)
#Line flow Definition
def flow_rule(m,cl,el,tt):
if network:
if el == cl:
return (m.vol[self.clin['from'][cl],tt] - m.vol[self.clin['to'][cl],tt]) == \
self.clin['res'][cl]*(m.pcl[cl,tt]+m.pel[el,tt]) + \
self.clin['rea'][cl]*(m.qcl[cl,tt]+m.qel[el,tt])
else: return pe.Constraint.Skip
else:
return pe.Constraint.Skip
m.flow = pe.Constraint(m.cl, m.el, m.tt, rule=flow_rule)
#Max Active Line Flow
def max_act_cflow_rule(m,cl,tt):
if network:
return m.pcl[cl,tt] <= self.clin['pmax'][cl]*m.xl[cl]
else:
return pe.Constraint.Skip
m.max_act_cflow = pe.Constraint(m.cl, m.tt, rule=max_act_cflow_rule)
def max_act_eflow_rule(m,el,tt):
if network:
return m.pel[el,tt] <= self.elin['pmax'][el]*self.elin['ini'][el]
else:
return pe.Constraint.Skip
m.max_act_eflow = pe.Constraint(m.el, m.tt, rule=max_act_eflow_rule)
#Min Active Line Flow
def min_act_cflow_rule(m,cl,tt):
if network:
return m.pcl[cl,tt] >= -self.clin['pmax'][cl]*m.xl[cl]
else:
return pe.Constraint.Skip
m.min_act_cflow = pe.Constraint(m.cl, m.tt, rule=min_act_cflow_rule)
def min_act_eflow_rule(m,el,tt):
if network:
return m.pel[el,tt] >= -self.elin['pmax'][el]*self.elin['ini'][el]
else:
return pe.Constraint.Skip
m.min_act_eflow = pe.Constraint(m.el, m.tt, rule=min_act_eflow_rule)
#Max Reactive Line Flow
def max_rea_cflow_rule(m,cl,tt):
if network:
return m.qcl[cl,tt] <= self.clin['qmax'][cl]*m.xl[cl]
else:
return pe.Constraint.Skip
m.max_rea_cflow = pe.Constraint(m.cl, m.tt, rule=max_rea_cflow_rule)
def max_rea_eflow_rule(m,el,tt):
if network:
return m.qel[el,tt] <= self.elin['qmax'][el]*self.elin['ini'][el]
else:
return pe.Constraint.Skip
m.max_rea_eflow = pe.Constraint(m.el, m.tt, rule=max_rea_eflow_rule)
#Min Reactive Line Flow
def min_rea_cflow_rule(m,cl,tt):
if network:
return m.qcl[cl,tt] >= -self.clin['qmax'][cl]*m.xl[cl]
else:
return pe.Constraint.Skip
m.min_rea_cflow = pe.Constraint(m.cl, m.tt, rule=min_rea_cflow_rule)
def min_rea_eflow_rule(m,el,tt):
if network:
return m.qel[el,tt] >= -self.elin['qmax'][el]*self.elin['ini'][el]
else:
return pe.Constraint.Skip
m.min_rea_eflow = pe.Constraint(m.el, m.tt, rule=min_rea_eflow_rule)
#Voltage Magnitude at Reference Bus
def vol_ref_rule(m,tt):
if network:
return sum(m.vol[bb,tt] for bb in m.bb if bb==0) == 1
else:
return pe.Constraint.Skip
m.vol_ref = pe.Constraint(m.tt, rule=vol_ref_rule)
#Investment Status
def inv_stat_rule(m,cg,tt):
return m.cu[cg,tt] <= m.xg[cg]
m.inv_stat = pe.Constraint(m.cg, m.tt, rule=inv_stat_rule)
#Solve the optimization problem
solver_manager = pe.SolverManagerFactory('neos')
opt = pe.SolverFactory(solver)
opt.options['threads'] = 1
opt.options['mipgap'] = 1e-9
result = solver_manager.solve(m,opt=opt,symbolic_solver_labels=True,tee=True)
print(result['Solver'][0])
print(m.display())
#Save the results
self.output = m
self.xg_output = pyomo2dfinv(m.xg,m.cg).T
self.xr_output = pyomo2dfinv(m.xr,m.cr).T
self.xl_output = pyomo2dfinv(m.xl,m.cl).T
self.cu_output = pyomo2dfopr(m.cu,m.cg,m.tt).T
self.eu_output = pyomo2dfopr(m.eu,m.eg,m.tt).T
self.pcg_output = pyomo2dfopr(m.pcg,m.cg,m.tt).T
self.qcg_output = pyomo2dfopr(m.qcg,m.cg,m.tt).T
self.peg_output = pyomo2dfopr(m.peg,m.eg,m.tt).T
self.qeg_output = pyomo2dfopr(m.qeg,m.eg,m.tt).T
self.pcr_output = pyomo2dfopr(m.pcr,m.cr,m.tt).T
self.qcr_output = pyomo2dfopr(m.qcr,m.cr,m.tt).T
self.per_output = pyomo2dfopr(m.per,m.er,m.tt).T
self.qer_output = pyomo2dfopr(m.qer,m.er,m.tt).T
self.pds_output = pyomo2dfopr(m.pds,m.bb,m.tt).T
self.prs_output = pyomo2dfopr(m.prs,m.bb,m.tt).T
self.vol_output = pyomo2dfopr(m.vol,m.bb,m.tt).T
self.pcl_output = pyomo2dfopr(m.pcl,m.cl,m.tt).T
self.qcl_output = pyomo2dfopr(m.qcl,m.cl,m.tt).T
self.pel_output = pyomo2dfopr(m.pel,m.cl,m.tt).T
self.qel_output = pyomo2dfopr(m.qel,m.cl,m.tt).T
# Setup the results folder
outdir = self.inp_folder + os.sep + 'results'
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.makedirs(outdir)
self.xg_output.to_csv(outdir+os.sep+'investment_conventional.csv',index=False)
self.xr_output.to_csv(outdir+os.sep+'investment_renewable.csv',index=False)
self.xr_output.to_csv(outdir+os.sep+'investment_line.csv',index=False)
self.cu_output.to_csv(outdir+os.sep+'cu.csv',index=False)
self.eu_output.to_csv(outdir+os.sep+'eu.csv',index=False)
self.pcg_output.to_csv(outdir+os.sep+'pcg.csv',index=False)
self.qcg_output.to_csv(outdir+os.sep+'qcg.csv',index=False)
self.peg_output.to_csv(outdir+os.sep+'peg.csv',index=False)
self.qeg_output.to_csv(outdir+os.sep+'qeg.csv',index=False)
self.pcr_output.to_csv(outdir+os.sep+'pcr.csv',index=False)
self.qcr_output.to_csv(outdir+os.sep+'qcr.csv',index=False)
self.per_output.to_csv(outdir+os.sep+'per.csv',index=False)
self.qer_output.to_csv(outdir+os.sep+'qer.csv',index=False)
self.pds_output.to_csv(outdir+os.sep+'pds.csv',index=False)
self.prs_output.to_csv(outdir+os.sep+'prs.csv',index=False)
self.vol_output.to_csv(outdir+os.sep+'vol.csv',index=False)
self.pcl_output.to_csv(outdir+os.sep+'pcl.csv',index=False)
self.qcl_output.to_csv(outdir+os.sep+'qcl.csv',index=False)
self.pel_output.to_csv(outdir+os.sep+'pel.csv',index=False)
self.qel_output.to_csv(outdir+os.sep+'qel.csv',index=False)
def pyomo2dfinv(pyomo_var,index1):
mat = []
for i in index1:
row = []
row.append(pyomo_var[i].value)
mat.append(row)
return | pd.DataFrame(mat) | pandas.DataFrame |
import pytest
from pandas import (
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
class TestSeriesRenameAxis:
def test_rename_axis_mapper(self):
# GH 19978
mi = | MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"]) | pandas.MultiIndex.from_product |
"""
inserindo dados com pandas
C - CREATE
R - READ
U - UPDATE
D - DELETE
"""
import pandas as pd
BASE_PATH = 'base.csv'
# CREATE
def post(dados: dict):
df_antigo = pd.DataFrame(get())
df_novo = pd.DataFrame(dados, index=[0])
df = df_antigo.append(df_novo)
df.to_csv(BASE_PATH, sep=',', index=False)
# READ
def get(id: int=None):
try:
df = pd.read_csv(BASE_PATH, sep=',')
lista_dados = df.to_dict('records')
if not id:
return df.to_dict('records') # [{"id":1 ...}, {"id": 2 ...}, ...]
for dado in lista_dados:
if dado['id'] == id:
return [dado]
except:
return []
# UPDATE
def put(id: int, dados_alterar):
lista_antiga = get()
lista_dados_novos = []
for dado in lista_antiga:
if dado["id"] == id:
dado = dados_alterar
lista_dados_novos.append(dado)
df = pd.DataFrame(lista_dados_novos)
df.to_csv(BASE_PATH, sep=',', index=False)
return
# DELETE
def delete(id: int):
lista_antiga = get()
lista_dados_novos = []
for dado in lista_antiga:
if not dado["id"] == id:
lista_dados_novos.append(dado)
df = | pd.DataFrame(lista_dados_novos) | pandas.DataFrame |
import numpy as np
import pandas as pd
from analysis.transform_fast import load_raw_cohort, transform
def test_immuno_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMRX_DAT <> NULL | Select | Next
if pd.notnull(row["immrx_dat"]):
assert row["immuno_group"]
continue
# IF IMMDX_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["immdx_cov_dat"]):
assert row["immuno_group"]
else:
assert not row["immuno_group"]
def test_ckd_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CKD_COV_DAT <> NULL (diagnoses) | Select | Next
if pd.notnull(row["ckd_cov_dat"]):
assert row["ckd_group"]
continue
# IF CKD15_DAT = NULL (No stages) | Reject | Next
if pd.isnull(row["ckd15_dat"]):
assert not row["ckd_group"]
continue
# IF CKD35_DAT>=CKD15_DAT | Select | Reject
if gte(row["ckd35_dat"], row["ckd15_dat"]):
assert row["ckd_group"]
else:
assert not row["ckd_group"]
def test_ast_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF ASTADM_DAT <> NULL | Select | Next
if pd.notnull(row["astadm_dat"]):
assert row["ast_group"]
continue
# IF AST_DAT <> NULL | Next | Reject
if pd.isnull(row["ast_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM1 <> NULL | Next | Reject
if pd.isnull(row["astrxm1_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM2 <> NULL | Next | Reject
if pd.isnull(row["astrxm2_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM3 <> NULL | Select | Reject
if pd.notnull(row["astrxm3_dat"]):
assert row["ast_group"]
else:
assert not row["ast_group"]
def test_cns_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CNS_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["cns_cov_dat"]):
assert row["cns_group"]
else:
assert not row["cns_group"]
def test_resp_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF AST_GROUP <> NULL | Select | Next
if row["ast_group"]:
assert row["resp_group"]
continue
# IF RESP_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["resp_cov_dat"]):
assert row["resp_group"]
else:
assert not row["resp_group"]
def test_bmi_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_OBESITY_DAT > BMI_DAT | Select | Next
if gt(row["sev_obesity_dat"], row["bmi_dat"]):
assert row["bmi_group"]
continue
# IF BMI_VAL >=40 | Select | Reject
if gte(row["bmi_val"], 40):
assert row["bmi_group"]
else:
assert not row["bmi_group"]
def test_diab_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF DIAB_DAT > DMRES_DAT | Select | Reject
if gt(row["diab_dat"], row["dmres_dat"]):
assert row["diab_group"]
else:
assert not row["diab_group"]
def test_sevment_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_MENTAL_DAT > SMHRES_DAT | Select | Reject
if gt(row["sev_mental_dat"], row["smhres_dat"]):
assert row["sevment_group"]
else:
assert not row["sevment_group"]
def test_atrisk_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMUNOGROUP <> NULL | Select | Next
if row["immuno_group"]:
assert row["atrisk_group"]
continue
# IF CKD_GROUP <> NULL | Select | Next
if row["ckd_group"]:
assert row["atrisk_group"]
continue
# IF RESP_GROUP <> NULL | Select | Next
if row["resp_group"]:
assert row["atrisk_group"]
continue
# IF DIAB_GROUP <> NULL | Select | Next
if row["diab_group"]:
assert row["atrisk_group"]
continue
# IF CLD_DAT <>NULL | Select | Next
if pd.notnull(row["cld_dat"]):
assert row["atrisk_group"]
continue
# IF CNS_GROUP <> NULL | Select | Next
if row["cns_group"]:
assert row["atrisk_group"]
continue
# IF CHD_COV_DAT <> NULL | Select | Next
if pd.notnull(row["chd_cov_dat"]):
assert row["atrisk_group"]
continue
# IF SPLN_COV_DAT <> NULL | Select | Next
if pd.notnull(row["spln_cov_dat"]):
assert row["atrisk_group"]
continue
# IF LEARNDIS_DAT <> NULL | Select | Next
if | pd.notnull(row["learndis_dat"]) | pandas.notnull |
import json
import datetime
import re
import sys
import ipdb
import pandas as pd
def logging(message):
sys.stderr.write('\r')
sys.stderr.write(message)
sys.stderr.flush()
def clean_company_name(name):
company_token = [
'^\"', '\"$', 'Inc\W', 'Inc$', 'Co\W', 'Co$',
'Corp\W', 'Corp$', 'Technology', ',.*$',
'\.com', 'Company', 'ltd\W',
'Class A', 'Class C', 'Corporation', '& Co\.'
]
company_pattern = '|'.join(company_token)
return re.sub(company_pattern, '', name).strip()
def generate_news_label():
snp = pd.read_csv('data/constituents.csv')
for name in snp['Name']:
sys.stdout.write(clean_company_name(name) + '\n')
sys.stdout.flush()
df = pd.DataFrame()
date = datetime.datetime.strptime('20180702', '%Y%m%d')
end_date = datetime.datetime.strptime('20180930', '%Y%m%d')
one_day = datetime.timedelta(days=1)
count = 0
while date <= end_date:
date_str = date.strftime('%Y%m%d')
file_path = 'data/reuters/news_{0:s}.json'.format(date_str)
with open(file_path, 'r') as f:
data = json.load(f)
title_map = {}
total_articles = len(data)
for index, item in enumerate(data):
count += 1
logging('{0:s}: {1:d}/{2:d}'.format(date_str, index + 1, total_articles))
published_time = item['published_time']
title = item['title']
text = item['text']
cleaned_title = re.sub(r'[^\w\s]', '', title)
cleaned_title = re.sub(r' +', ' ', cleaned_title)
if cleaned_title in title_map:
continue
title_map[cleaned_title] = 1
for ticker, name in zip(snp['Symbol'], snp['Name']):
ticker_pattern = '\W{0:s}\W'.format(ticker)
cleaned_name = clean_company_name(name)
name_pattern = '{0:s}\W'.format(cleaned_name)
add_datum = False
'''
if len(ticker) != 1 and ticker != 'CA':
if re.search(ticker_pattern, title) is not None or \
re.search(ticker_pattern, text) is not None:
add_datum = True
'''
if cleaned_name != 'CA':
if '*' in cleaned_name:
name_pattern = re.sub('\\*', '\\\\*', name_pattern)
if '.' in cleaned_name:
name_pattern = re.sub('\\.', '\\\\.', name_pattern)
if re.search(name_pattern, title) is not None or \
re.search(name_pattern, title) is not None:
add_datum = True
if (ticker == 'GOOGL' or ticker == 'GOOG') and 'Google' in text:
add_datum = True
if add_datum:
sys.stdout.write('{0:s}: {1:s}, {2:s}, {3:s}\n'.format(date_str, cleaned_title, ticker, cleaned_name))
sys.stdout.write('\t{0:s}\n'.format(text))
sys.stdout.flush()
datum = pd.Series(
data={
'published_time':published_time,
'title':title,
'text':text,
'ticker':ticker,
'name':name
}
)
df = df.append(datum, ignore_index=True)
date = date + one_day
print('\nnews articles: ', count)
return df
def label_webhose_data():
webhose = | pd.read_csv('webhose_data.csv') | pandas.read_csv |
from os import path
import os.path
from datetime import datetime as dt
import datetime
# import plotly.express as px
# from dash.dependencies import Input, Output, State
# import dash_html_components as html
# import dash_core_components as dcc
import json
import pandas as pd
import numpy as np
# from jupyter_dash import JupyterDash
# import plotly.graph_objs as go
# import dash
# import traceback
import sys
import os
import copy
import os
import glob
# import grasia_dash_components as gdc
import json
# import jsonify
# import ciso8601
old_env = os.environ['PATH']
df_return = []
# couleur(5,98,138)
# 72,145,118 #489176
# # 206,136,87 #ce8857
# 154,80,82 #9a5052
# 160,175,82 #a0af52
# 88,158,157 #589e9d
# 103,120,132 #677884
# 206,182,75 #ceb64b
# 40,72,101 #284865
# 166,135,103 #a68767
from flask import Flask, jsonify,render_template
# from flask_cors import CORS
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# CORS(app, support_credentials=True)
today = dt.today().strftime("%m-%d-%Y")
# csv_today_path = 'C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin/' + today + ".csv"
csv_today_path = today + ".csv"
if path.exists(csv_today_path):
df_vaccin_quotidien = pd.read_csv(csv_today_path)
@app.route('/somme/<reg>/<vaccin>')
def filter_data_somme_quotidien(reg,vaccin):
# print(type(reg))
reg=np.int64(reg)
vaccin=np.int64(vaccin)
return df_vaccin_quotidien.query('reg==@reg & vaccin==@vaccin').to_json()
@app.route('/detail/<reg>/<vaccin>')
def filter_data_detail(reg,vaccin):
# print(type(reg))
reg=np.int64(reg)
vaccin=np.int64(vaccin)
# response=df_vaccin_detail.query('reg==@reg & vaccin==@vaccin').to_json()
# response.headers.add("Access-Control-Allow-Origin", "*")
return df_vaccin_detail.query('reg==@reg & vaccin==@vaccin').reset_index().to_json()
@app.route("/")
def helloWorld():
return "Hello, cross-origin-world!"
@app.route('/color')
def choose_color(i):
color_list = ["#489176", "#ce8857", "#9a5052", "#a0af52", " #589e9d", "#677884", "#ceb64b", "#284865", "#a68767"]
if i>=len(color_list):
return color_list[len(color_list)-i]
else:
return color_list[i]
# liste_des_vaccins
df_liste_vaccin = ["Tous","COMIRNATY Pfizer/BioNTech", "Moderna", "AstraZeneka"]
print(df_liste_vaccin)
# CHargement liste des régions
src = 'C:/Users/Utilisateur/PycharmProjects/montee_en_competence/stat_pop.csv'
# df_population=pd.read_csv(src,sep=";")///////////////a remettre
# bilan des données actuelle
# Le fichier est mis à jour quotidiennement et il comprend la somme par région et par vaccin. Cependant pour certaines
# régions et vaccins,la valeur peut manquer à un jour donné. On créee donc un fichier à jour cumulant les données
# de la journée selon data-france et celles données par data-fr&ance il y a pluysieurs jours pour les valeurs manquantes
date_min=datetime.datetime.strptime("27/12/2020","%d/%m/%Y").timestamp()
date_max=datetime.datetime.timestamp(datetime.datetime.today()-datetime.timedelta(days=1))
print(date_min)
labels = ["A1", "A2", "A3", "A4", "A5", "B1", "B2"]
parents = ["", "", "", "", "", "", ""]
values = ["11", "12", "13", "14", "15", "20", "30"]
# fig = go.Figure(go.Treemap(
# labels = labels,
# values = values,
# parents = parents,
# marker_colors = [ choose_color(i) for i in range(len(labels))]
# ))
@app.route('/req/proutos')
def maj_data_complete():
today = dt.today().strftime("%m-%d-%Y")
csv_today_path = 'C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin/' + today + ".csv"
if path.exists(csv_today_path + "r"):
df_vaccin_quotidien = pd.read_csv(csv_today_path)
else:
src = 'https://www.data.gouv.fr/fr/datasets/r/900da9b0-8987-4ba7-b117-7aea0e53f530'
df_vaccin_quotidien = pd.read_csv(src, sep=";")
dernier_jour = df_vaccin_quotidien.tail(1)
dernier_jour = str(dernier_jour['jour'].values[0])
df_vaccin_quotidien = df_vaccin_quotidien.query('jour==@dernier_jour')
df_vaccin_quotidien.query('reg!=6', inplace=True)
df_vaccin_quotidien.query('reg!=8', inplace=True)
# df_vaccin_quotidien.to_csv('C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin/01-03-2021.csv')
df_vaccin_quotidien.query('reg!=1', inplace=True)
reg_sans_data = [
x for x in pd.unique(
df_population["id"]) if x not in pd.unique(
df_vaccin_quotidien["reg"])]
# recherche de données dans des anciennes stats
day_diff = 0
# for reg in reg_sans_data:
data_full = False
# boucle sur les archives de données
while not data_full:
day_diff += 1
today = dt.today().strftime("%m-%d-%Y")
day_delta = dt.today() + datetime.timedelta(days=-day_diff)
day_delta_str = day_delta.strftime("%m-%d-%Y")
files = glob.glob(
"C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin/*-*-*.csv")
df_reg_manquant = pd.read_csv(files[-1])
df_reg_manquant = df_reg_manquant[df_reg_manquant.reg.isin(
reg_sans_data)]
if pd.unique(df_reg_manquant.reg) == len(reg_sans_data):
data_full = True
if not data_full:
print("data pas complète")
exit()
df_reg_manquant.sort_values(['jour'], inplace=True)
# on reprend la dernière ligne du fichier de données manquante
dernier_jour = df_reg_manquant.tail(1)
dernier_jour = str(dernier_jour['jour'].values[0])
df_reg_manquant.query('jour==@dernier_jour', inplace=True)
df_vaccin_quotidien = pd.concat([df_vaccin_quotidien, df_reg_manquant])
# sauvegarde des données
df_vaccin_quotidien.iloc[:-2, : 7].to_csv(csv_today_path)
return df_vaccin_quotidien.iloc[:-2, : 7].to_json()
# return "e"
# df_vaccin_quotidien=maj_data_complete()
# somme des datas par jour même incomplète pour pouvoir viusaliser les changements depuis lavaeille
#dans df_vaccin_detail on peut avoir des journées manquantes
@app.route('/b')
def make_vaccin_detail():
today = dt.today().strftime("%m-%d-%Y")
csv_today_path = 'C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin_detail/' + today + ".csv"
if path.exists(csv_today_path):
return pd.read_csv(csv_today_path)
else:
src = 'https://www.data.gouv.fr/fr/datasets/r/900da9b0-8987-4ba7-b117-7aea0e53f530'
df_vaccin_detail = pd.read_csv(src, sep=";")
df_vaccin_detail['datetime']=pd.to_datetime(df_vaccin_detail['jour'], format='%Y-%m-%d')
df_vaccin_detail['timestamp']=df_vaccin_detail['datetime'].apply(lambda x: datetime.datetime.timestamp(x))
# df_vaccin_detail['somme_jour_vaccin'] = df_vaccin_detail.groupby(['jour', 'vaccin'])['n_cum_dose1'].transform(
# sum)
df_vaccin_detail.to_csv(csv_today_path)
return df_vaccin_detail
#le seul truc qui nous manque c'est le cumul par jour par vaccin sur toutes les régions
df_vaccin_detail=make_vaccin_detail()
# month_options = [{'label': month, 'value': i}
# for (month,i) in enumerate(pd.unique(df_liste_vaccin))]
#somme_jour_vaccin tient compte du fait qu'il peut y voir plusieurs ligns pour une meme journée et uneme meme re
#gion et un meme vaccin
# dataframe=pd.DataFrame([]);
# dataframe['somme_jour_vaccin'] = df_vaccin_detail.groupby(['jour', 'vaccin'])[
# 'n_dose1'].transform(sum)
# dataframe.drop(['reg','n_dose1','n_dose2','n_cum_dose1','n_cum_dose2'],axis=1,inplace=True)
@app.route('/c')
def make_data(df_vaccin_detail=df_vaccin_detail,liste_vaccin=[0,1,2,3],date_range=[]):
if liste_vaccin is None:
liste_vaccin = [0, 1, 2, 3]
dataframe = df_vaccin_detail.copy()
dataframe.drop_duplicates()
dataframe = dataframe.sort_values(by=['jour'])
for vaccin in liste_vaccin:
dataframe['cumsum_' + str(vaccin)] = dataframe.loc[dataframe.vaccin == vaccin, "n_dose1"].cumsum()
dataframe.to_clipboard(sep=',', index=False)
if date_range != []:
date_min = date_range[0]
date_max= date_range[1]
dataframe = dataframe.loc[dataframe['timestamp']>date_min,]
dataframe = dataframe.loc[dataframe['timestamp']<date_max,]
dataframe.drop(['timestamp','datetime'],axis=1,inplace=True)
data = []
for vaccin in liste_vaccin:
dataframe2 = dataframe.query('vaccin==@vaccin')
trace = go.Scatter(x=dataframe2.jour, y=dataframe2['cumsum_' + str(vaccin)], name=df_liste_vaccin[vaccin])
data.append(trace)
return data
# data=make_data(df_vaccin_detail)
# layout
# layout = go.Layout(xaxis={'title': 'Time'},
# yaxis={'title': 'Produced Units'},
# margin={'l': 40, 'b': 40, 't': 50, 'r': 50},
# hovermode='closest')
# figChart = go.Figure(data=data, layout=layout)
gg = 0
df = []
# ratio de pop vacciné
@app.route('/f')
def ratio():
df_vaccin_quotidien['somme_jour'] = df_vaccin_quotidien.groupby(['reg', 'jour'])[
'n_dose1'].transform(sum)
population_totale = 65000000
pop_totale_vacciné = population_totale / df_vaccin_quotidien["n_dose1"].sum()
liste_vaccin = pd.unique(df_vaccin_quotidien["vaccin"])
# df_vaccin_quotidien['reg'] = df_vaccin_quotidien['reg'].apply(lambda x: x.zfill(2))
# df_vaccin_quotidien['reg']=df_vaccin_quotidien['reg'].astype("string")
df_tot = df_population.merge(df_vaccin_quotidien, left_on='id', right_on='reg')
df_tot = df_tot.query('vaccin==0') # vaccin=0 somme des vaccins
df_tot["ratio"] = 100 * (df_tot["n_cum_dose1"] / df_tot["pop"])
df_tot["ratio_dose_2"] = 100 * \
(df_tot["n_cum_dose1"] + df_tot["n_cum_dose2"]) / df_tot["pop"]
return df_tot
# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css','./assets/yah.css']
# external_scripts = ['https://polyfill.io/v3/polyfill.min.js?features=default',
# 'france_geojson.js', 'https://cdn.jsdelivr.net/npm/[email protected]/underscore-min.js',
# 'load_gmap.js',
# 'https://maps.googleapis.com/maps/api/js?key=<KEY>'
# 'MNcHVdkLgHctexkayh5tAMOWjA&callback=initMap&libraries=&v=weekly']
# app = dash.Dash(__name__, external_scripts=external_scripts)
@app.route('/g')
def load_geojson():
with open('C:/Users/Utilisateur/PycharmProjects/montee_en_competence/france_geojson.json') as f:
geojson = json.load(f)
for i, x in enumerate(geojson["features"]):
df.append([x["properties"]["code"], x["properties"]
["nom"], "pink", x["properties"]["code"]])
df = | pd.DataFrame(df, columns=['code', 'nom', 'color', 'custom_data']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
# In[2]:
pd.set_option('display.max_rows', 1000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# In[3]:
df = pd.read_csv('data.csv')
# ## Grouping Players Dataset by Teams
# In[4]:
df_teams = df
# ### Dropping NaNs in `Club` and `Position` Features
# In[5]:
df_teams = df_teams.drop('Unnamed: 0', axis=1)
# In[6]:
df_teams = df_teams.dropna(subset=['Club', 'Position'], axis=0)
# ### Goal Keeper rows: Replacing NaNs with 0s in `Position` Column
# In[7]:
# Raplacing NaNs with 0s for Goal Keeper rows
df_teams.iloc[:,27:53] = df_teams.iloc[:,27:53].fillna(value=0)
# ### Dropping `Joined` and Replacing NaNs in `Release Clause` and `Loaned From`
# In[8]:
# Dropping 'Joined' column
df_teams = df_teams.drop('Joined', axis=1)
# In[9]:
# Replacing NaNs in 'Release Clause' and 'Loaned From' features
df_teams['Release Clause'] = df_teams['Release Clause'].fillna(0)
df_teams['Loaned From'] = df_teams['Loaned From'].fillna('Not Loaned')
# ### Adding `Field Position` Feature
# In[10]:
defense = ['CB', 'RB', 'LB', 'RWB', 'LWB', 'RCB', 'LCB']
midfield = ['RW', 'LW', 'RM', 'LM', 'CM', 'CDM', 'CAM', 'RCM', 'LCM', 'LAM', 'RAM', 'RDM', 'LDM']
attack = ['ST', 'CF', 'RF', 'LF', 'RS', 'LS']
goalkeeper = ['GK']
# In[11]:
# function to create Field Position for each player
def field(row):
if row['Position'] in defense:
val = 'Defense'
elif row['Position'] in midfield:
val = 'Midfield'
elif row['Position'] in attack:
val = 'Attack'
else:
val = 'GK'
return val
# In[12]:
df_teams['Field Position'] = df_teams.apply(field, axis=1)
# ### Editing values in `Value` and `Wage` columns
# In[13]:
def change_value(row):
if (row['Value'][-1]=='K'):
return int(pd.to_numeric(row['Value'][1:-1])*1000)
elif (row['Value'][-1]=='M'):
return int(pd.to_numeric(row['Value'][1:-1])*1000000)
elif (row['Value'][-1]=='0'):
return 0
# In[14]:
df_teams['Value'] = df_teams.apply(change_value, axis=1)
# In[15]:
def change_wage(row):
if (row['Wage'][-1]=='K'):
return (pd.to_numeric(row['Wage'][1:-1]))
elif (row['Wage'][-1]=='0'):
return 0
# In[16]:
df_teams['Wage'] = df_teams.apply(change_wage, axis=1)
# ### Applying Player Overvalue Ratio
# In[17]:
df_teams['Overvalue Ratio'] = df_teams['Wage'] / df_teams['Overall']
df_teams['Overvalue Ratio'] = df_teams['Overvalue Ratio'].apply(lambda x: round(x,2))
# ### Multiplying Wage by 1000
# In[18]:
df_teams['Wage'] = df_teams['Wage']*1000
# ### Adding New Feature: Field Position Num (Numerically Encoded Field Positions)
# In[19]:
le = LabelEncoder()
df_teams['Field Position Num'] = le.fit_transform(df_teams['Field Position'])
# ### Getting Club players
# In[20]:
def get_club_players(club):
if club in df_teams.Club.values:
club_players = []
club = df_teams['Club'] == club
vals = df_teams.loc[club]
names = list(vals['Name'])
overvalue_ratios = list(vals['Overvalue Ratio'])
potential = list(vals['Potential'])
overall_ratings = list(vals['Overall']) # list of player ratings from highest to lowest
position = list(vals['Position'])
wage = list(vals['Wage'])
# If club is a user's 'custom' club
elif type(club) == list:
player_indexes = []
for dic in club:
for key, value in dic.items():
if key == "id":
player_indexes.append(value -1)
names = df_teams.iloc[player_indexes]['Name'].values
overvalue_ratios = df_teams.iloc[player_indexes]['Overvalue Ratio'].values
potential = df_teams.iloc[player_indexes]['Potential'].values
overall_ratings = df_teams.iloc[player_indexes]['Overall'].values
position = df_teams.iloc[player_indexes]['Position'].values
wage = df_teams.iloc[player_indexes]['Wage'].values
else:
return 'Your club entry was not located.'
club_list = list(zip(names, position, overvalue_ratios, overall_ratings, potential, wage))
cols = ['Name', 'Position', 'Overvalue Ratio', 'Overall Rating', 'Potential Rating', 'Wage']
df_club_list = pd.DataFrame(data=club_list, columns=cols)
top_2_rated_players = sorted(club_list, key=lambda x: x[2], reverse=True)[:2]
df_top_2_rated_players = | pd.DataFrame(data=top_2_rated_players, columns=cols) | pandas.DataFrame |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(font_scale=2.2)
plt.style.use("seaborn")
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler, OneHotEncoder
from sklearn.model_selection import StratifiedKFold, train_test_split, ShuffleSplit
from sklearn.metrics import f1_score
import itertools
import lightgbm as lgb
import xgboost as xgb
from xgboost import XGBClassifier
import shap
from tqdm import tqdm
import featuretools as ft
import warnings
warnings.filterwarnings("ignore")
import time
df_train = pd.read_csv("./costa-rican-household-poverty-prediction/train.csv")
df_test = pd.read_csv("./costa-rican-household-poverty-prediction/test.csv")
print("df_train shape:", df_train.shape, " ", "df_test shape: ", df_test.shape)
df_train.head()
df_train.describe()
df_test.head()
# 1.2 Make description df
description = pd.read_csv("./costa-rican-household-poverty-prediction/codebook.csv")
description
# 1.3 Check null data
total = df_train.isnull().sum().sort_values(ascending=False)
percent = 100 * (df_train.isnull().sum() / df_train.isnull().count()).sort_values(ascending=False)
missing_df = pd.concat( [total, percent], axis=1, keys=["Total", "Percent"])
missing_df.head(20)
# 1.4 Fill missing values
# Below cell is from this kernel:
# if education is "yes" and person is head of household, fill with escolari
df_train.loc[(df_train["edjefa"] == "yes") & (df_train["parentesco1"] == 1), "edjefa"] = df_train.loc[ (df_train["edjefa"] == "yes") & (df_train["parentesco1"] == 1), "escolari"]
df_train.loc[(df_train["edjefe"] == "yes") & (df_train["parentesco1"] == 1), "edjefe"] = df_train.loc[ (df_train["edjefe"] == "yes") & (df_train["parentesco1"] == 1), "escolari"]
df_test.loc[(df_test["edjefa"] == "yes") & (df_test["parentesco1"] == 1), "edjefa"] = df_test.loc[ (df_test["edjefa"] == "yes") & (df_test["parentesco1"] == 1), "escolari"]
df_test.loc[(df_test["edjefe"] == "yes") & (df_test["parentesco1"] == 1), "edjefe"] = df_test.loc[ (df_test["edjefe"] == "yes") & (df_test["parentesco1"] == 1), "escolari"]
# This field is supposed to be interaction between gender and escolari, but it isn't clear what "yes" means, let's fill it with 4
df_train.loc[df_train["edjefa"] == "yes", "edjefa"] = 4
df_train.loc[df_train["edjefe"] == "yes", "edjefe"] = 4
df_test.loc[df_test["edjefa"] == "yes", "edjefa"] = 4
df_test.loc[df_test["edjefe"] == "yes", "edjefe"] = 4
# create feature with max education of either head of household
df_train["edjef"] = np.max(df_train[["edjefa", "edjefe"]], axis=1)
df_test["edjef"] = np.max(df_test[["edjefa", "edjefe"]], axis=1)
# fix some inconsistencies in the data - some rows indicate both that the household does and does not have a toilet,
# if there is no water we'll assume they do not
df_train.loc[(df_train.v14a == 1) & (df_train.sanitario1 == 1) & (df_train.abastaguano == 0), "v14a"] = 0
df_train.loc[(df_train.v14a == 1) & (df_train.sanitario1 == 1) & (df_train.abastaguano == 0), "sanitario1"] = 0
df_test.loc[(df_test.v14a == 1 ) & (df_test.sanitario1 == 1) & (df_test.abastaguano == 0), "v14a"] = 0
df_test.loc[(df_test.v14a == 1 ) & (df_test.sanitario1 == 1) & (df_test.abastaguano == 0), "sanitario1"] = 0
# rez_esz, SQBmeaned
# - rez_esc : Years behind in school -> filled with 0
# - SQBmeaned : square of the mean years of education of adults (>=18) in the household agesq, Age squared -> same with rez_esc -> filled with 0
df_train["rez_esc"].fillna(0, inplace=True)
df_test["rez_esc"].fillna(0, inplace=True)
df_train["SQBmeaned"].fillna(0, inplace=True)
df_test["SQBmeaned"].fillna(0, inplace=True)
# meaneduc
# - meaneduc: average years of education for adults (18+) -> filled with 0
df_train["meaneduc"].fillna(0, inplace=True)
df_test["meaneduc"].fillna(0, inplace=True)
# v18q1
# - v18q1: number of tablets household owns -> if v18q (Do you own a tablet?) == 1, there are some values. If not, only NaN values in v18q1. See below 3 cells.
df_train["v18q"].value_counts()
df_train.loc[df_train["v18q"] == 1, "v18q1"].value_counts()
df_train.loc[df_train["v18q"] == 0, "v18q1"].value_counts()
df_train["v18q1"].fillna(0, inplace=True)
df_test["v18q1"].fillna(0, inplace=True)
# - v2a1 : number of tablets household owns -> if tipovivi3 (rented?) == 1, there are some values. If not, there are also some values.
# - NaN value could be replaced by 0.
df_train["tipovivi3"].value_counts()
sns.kdeplot(df_train.loc[df_train["tipovivi3"] == 1, "v2a1"], label = "Monthly rent payment of household(rented=1")
sns.kdeplot(df_train.loc[df_train["tipovivi3"] == 0, "v2a1"], label = "Monthly rent payment of household(rented=0")
plt.xscale("log")
plt.show()
df_train["v2a1"].fillna(0, inplace=True)
df_test["v2a1"].fillna(0, inplace=True)
total = df_train.isnull().sum().sort_values(ascending=False)
percent = 100 * (df_train.isnull().sum() / df_train.isnull().count()).sort_values(ascending=False)
missing_df = pd.concat([total, percent], axis=1, keys=["Total", "Percent"])
missing_df.head(20)
total = df_test.isnull().sum().sort_values(ascending=False)
percent = 100 * (df_test.isnull().sum() / df_test.isnull().count()).sort_values(ascending=False)
missing_df = pd.concat([total, percent], axis=1, keys=["Total", "Percent"])
missing_df.head(20)
# For now, there are no NaN values.
# 2. Feature engineering
# 2.1 Object features
features_object = [col for col in df_train.columns if df_train[col].dtype == "object"]
features_object
# dependency
# some dependencies are NA, fill those with the square root of the square
df_train["dependency"] = np.sqrt(df_train["SQBdependency"])
df_test["dependency"] = np.sqrt(df_test["SQBdependency"])
# edjefe
# - edjefe, years of education of male head of household, based on the interaction of escolari (years of education), head of household and gender, yes=1 and no=0
# - replace yes -> 1 and no -> 0
def replace_edjefe(x):
if x == "yes":
return 1
elif x == "no":
return 0
else:
return x
df_train["edjefe"] = df_train["edjefe"].apply(replace_edjefe).astype(float)
df_test["edjefe"] = df_test["edjefe"].apply(replace_edjefe).astype(float)
# edjefa
# - edjefa, years of education of female head of household, based on the interaction of escolari (years of education), head of household and gender, yes=1 and no=0
# - replace yes -> 1 and no -> 0
def replace_edjefa(x):
if x == "yes":
return 1
elif x == "no":
return 0
else:
return x
df_train["edjefa"] = df_train["edjefa"].apply(replace_edjefa).astype(float)
df_test["edjefa"] = df_test["edjefa"].apply(replace_edjefa).astype(float)
# create feature with max education of either head of household
df_train["edjef"] = np.max(df_train[["edjefa", "edjefe"]], axis=1)
df_test["edjef"] = np.max(df_test[["edjefa", "edjefe"]], axis=1)
# roof and electricity
# I refered to https://www.kaggle.com/mineshjethva/exploratory-data-analysis-lightgbm.
# Thanks!
df_train["roof_waste_material"] = np.nan
df_test["roof_waste_material"] = np.nan
df_train["electricity_other"] = np.nan
df_test["electricity_other"] = np.nan
def fill_roof_exception(x):
if (x["techozinc"] == 0) and (x["techoentrepiso"] == 0) and (x["techocane"] == 0) and (x["techootro"] == 0):
return 1
else:
return 0
def fill_no_electricity(x):
if (x["public"] == 0) and (x["planpri"] == 0) and (x["noelec"] == 0) and (x["coopele"] == 0):
return 1
else:
return 0
df_train["roof_waste_material"] = df_train.apply(lambda x : fill_roof_exception(x), axis=1)
df_test["roof_waste_material"] = df_test.apply(lambda x : fill_roof_exception(x), axis=1)
df_train["electricity_other"] = df_train.apply( lambda x: fill_no_electricity(x), axis=1)
df_test["electricity_other"] = df_test.apply( lambda x: fill_no_electricity(x), axis=1)
# 2.2 Extract cat features
# - According to data description, there are many binary category features.
binary_cat_features = [col for col in df_train.columns if df_train[col].value_counts().shape[0] == 2 ]
# 2.3 Make new features using continuous feature
continuous_features = [col for col in df_train.columns if col not in binary_cat_features]
continuous_features = [col for col in df_train.columns if col not in features_object]
continuous_features = [col for col in df_train.columns if col not in ["Id", "Target", "idhogar"]]
print("There are {} continuous features".format(len(continuous_features)))
for col in continuous_features:
print("{}: {}".format(col, description.loc[description["Variable name"] == col, "Variable description"].values))
# - hhsize : household size
# - tamhog : size of the household
# what is different?
# - As you can see, the meaning of two features are same but the exact number are different. Are they different?
# - I don't know For now, I decided to drop one feature "tamhog".
df_train["edjef"].value_counts()
df_train.drop("tamhog", axis=1, inplace=True)
df_test.drop("tamhog", axis=1, inplace=True)
# Squared features
# - There are many squared features.
# - Actually, tree models like lightgbm don't need them. But at this kernel, I want to use lightgbm as feature filter model and set entity-embedding as classifier.
# So, let's keep them.
# Family features
# - hogar_nin, hogar_adul, hogar_mayor, hogar_total, r4h1, r4h2, r4h3, r4m1, r4m2, r4m3, r4t1, r4t2, r4t3, tmbhog, tamvid, rez_esc, escolari
# - Family size features (substract, ratio)
df_train["adult"] = df_train["hogar_adul"] - df_train["hogar_mayor"]
df_train["dependency_count"] = df_train["hogar_nin"] + df_train["hogar_mayor"]
df_train["dependency"] = df_train["dependency_count"] / df_train["adult"]
df_train["child_percent"] = df_train["hogar_nin"] / df_train["hogar_total"]
df_train["elder_percent"] = df_train["hogar_mayor"] / df_train["hogar_total"]
df_train["adult_percent"] = df_train["hogar_adul"] / df_train["hogar_total"]
df_train["males_younger_12_years_percent"] = df_train["r4h1"] / df_train["hogar_total"]
df_train["males_older_12_years_percent"] = df_train["r4h2"] / df_train["hogar_total"]
df_train["males_percent"] = df_train["r4h3"] / df_train["hogar_total"]
df_train["females_younger_12_years_percent"] = df_train["r4m1"] / df_train["hogar_total"]
df_train["females_older_12_years_percent"] = df_train["r4m2"] / df_train["hogar_total"]
df_train["females_percent"] = df_train["r4m3"] / df_train["hogar_total"]
df_train["persons_younger_12_years_percent"] = df_train["r4t1"] / df_train["hogar_total"]
df_train["persons_older_12_years_percent"] = df_train["r4t2"] / df_train["hogar_total"]
df_train["persons_percent"] = df_train["r4t3"] / df_train["hogar_total"]
df_test["adult"] = df_test["hogar_adul"] - df_test["hogar_mayor"]
df_test["dependency_count"] = df_test["hogar_nin"] + df_test["hogar_mayor"]
df_test["dependency"] = df_test["dependency_count"] / df_test["adult"]
df_test["child_percent"] = df_test["hogar_nin"] / df_test["hogar_total"]
df_test["elder_percent"] = df_test["hogar_mayor"] / df_test["hogar_total"]
df_test["adult_percent"] = df_test["hogar_adul"] / df_test["hogar_total"]
df_test["males_younger_12_years_percent"] = df_test["r4h1"] / df_test["hogar_total"]
df_test["males_older_12_years_percent"] = df_test["r4h2"] / df_test["hogar_total"]
df_test["males_percent"] = df_test["r4h3"] / df_test["hogar_total"]
df_test["females_younger_12_years_percent"] = df_test["r4m1"] / df_test["hogar_total"]
df_test["females_older_12_years_percent"] = df_test["r4m2"] / df_test["hogar_total"]
df_test["females_percent"] = df_test["r4m3"] / df_test["hogar_total"]
df_test["persons_younger_12_years_percent"] = df_test["r4t1"] / df_test["hogar_total"]
df_test["persons_older_12_years_percent"] = df_test["r4t2"] / df_test["hogar_total"]
df_test["persons_percent"] = df_test["r4t3"] / df_test["hogar_total"]
df_train['males_younger_12_years_in_household_size'] = df_train['r4h1'] / df_train['hhsize']
df_train['males_older_12_years_in_household_size'] = df_train['r4h2'] / df_train['hhsize']
df_train['males_in_household_size'] = df_train['r4h3'] / df_train['hhsize']
df_train['females_younger_12_years_in_household_size'] = df_train['r4m1'] / df_train['hhsize']
df_train['females_older_12_years_in_household_size'] = df_train['r4m2'] / df_train['hhsize']
df_train['females_in_household_size'] = df_train['r4m3'] / df_train['hogar_total']
df_train['persons_younger_12_years_in_household_size'] = df_train['r4t1'] / df_train['hhsize']
df_train['persons_older_12_years_in_household_size'] = df_train['r4t2'] / df_train['hhsize']
df_train['persons_in_household_size'] = df_train['r4t3'] / df_train['hhsize']
df_test['males_younger_12_years_in_household_size'] = df_test['r4h1'] / df_test['hhsize']
df_test['males_older_12_years_in_household_size'] = df_test['r4h2'] / df_test['hhsize']
df_test['males_in_household_size'] = df_test['r4h3'] / df_test['hhsize']
df_test['females_younger_12_years_in_household_size'] = df_test['r4m1'] / df_test['hhsize']
df_test['females_older_12_years_in_household_size'] = df_test['r4m2'] / df_test['hhsize']
df_test['females_in_household_size'] = df_test['r4m3'] / df_test['hogar_total']
df_test['persons_younger_12_years_in_household_size'] = df_test['r4t1'] / df_test['hhsize']
df_test['persons_older_12_years_in_household_size'] = df_test['r4t2'] / df_test['hhsize']
df_test['persons_in_household_size'] = df_test['r4t3'] / df_test['hhsize']
df_train["overcrowding_room_and_bedroom"] = (df_train["hacdor"] + df_train["hacapo"]) / 2
df_test["overcrowding_room_and_bedroom"] = (df_test["hacdor"] + df_test["hacapo"]) / 2
df_train['escolari_age'] = df_train['escolari']/df_train['age']
df_test['escolari_age'] = df_test['escolari']/df_test['age']
df_train['age_12_19'] = df_train['hogar_nin'] - df_train['r4t1']
df_test['age_12_19'] = df_test['hogar_nin'] - df_test['r4t1']
df_train['phones-per-capita'] = df_train['qmobilephone'] / df_train['tamviv']
df_train['tablets-per-capita'] = df_train['v18q1'] / df_train['tamviv']
df_train['rooms-per-capita'] = df_train['rooms'] / df_train['tamviv']
df_train['rent-per-capita'] = df_train['v2a1'] / df_train['tamviv']
df_test['phones-per-capita'] = df_test['qmobilephone'] / df_test['tamviv']
df_test['tablets-per-capita'] = df_test['v18q1'] / df_test['tamviv']
df_test['rooms-per-capita'] = df_test['rooms'] / df_test['tamviv']
df_test['rent-per-capita'] = df_test['v2a1'] / df_test['tamviv']
# - You can see that "Total persons in the household" != "# of total individuals in the household".
# - Somewhat weired. But for now I will keep it.
(df_train["hogar_total"] == df_train["r4t3"]).sum()
# Rent per family features
# - I will reduce the number of features using shap, so let's generate many features!!
# Hope catch some fortune features :)
family_size_features = ["adult", "hogar_adul", "hogar_mayor", "hogar_nin", "hogar_total",
"r4h1", "r4h2", "r4h3",
"r4m1", "r4m2", "r4m3",
"r4t1", "r4t2", "r4t3", "hhsize"]
new_feats = []
for col in family_size_features:
new_col_name = "new_{}_per_{}".format("v2a1", col)
new_feats.append(new_col_name)
df_train[new_col_name] = df_train["v2a1"] / df_train[col]
df_test[new_col_name] = df_test["v2a1"] / df_test[col]
# Ratio feature can have infinite values. So let them be filled with 0.
for col in new_feats:
df_train[col].replace([np.inf], np.nan, inplace=True)
df_train[col].fillna(0, inplace=True)
df_test[col].replace([np.inf], np.nan, inplace=True)
df_test[col].fillna(0, inplace=True)
# Room per family features
new_feats = []
for col in family_size_features:
new_col_name = "new_{}_per_{}".format("rooms", col)
new_feats.append(new_col_name)
df_train[new_col_name] = df_train["rooms"] / df_train[col]
df_test[new_col_name] = df_test["rooms"] / df_test[col]
for col in new_feats:
df_train[col].replace([np.inf], np.nan, inplace=True)
df_train[col].fillna(0, inplace=True)
df_test[col].replace([np.inf], np.nan, inplace=True)
df_test[col].fillna(0, inplace=True)
# BedRoom per family features
new_feats = []
for col in family_size_features:
new_col_name = "new_{}_per_{}".format("bedrooms", col)
new_feats.append(new_col_name)
df_train[new_col_name] = df_train["bedrooms"] / df_train[col]
df_test[new_col_name] = df_test["bedrooms"] / df_test[col]
for col in new_feats:
df_train[col].replace(np.inf, np.nan, inplace=True)
df_train[col].fillna(0, inplace=True)
df_test[col].replace(np.inf, np.nan, inplace=True)
df_test[col].fillna(0, inplace=True)
print(df_train.shape, df_test.shape)
new_feats = []
for col in family_size_features:
new_col_name = "new_{}_per_{}".format("v18q1", col)
new_feats.append(new_col_name)
df_train[new_col_name] = df_train["v18q1"] / df_train[col]
df_test[new_col_name] = df_test["v18q1"] / df_test[col]
for col in new_feats:
df_train[col].replace([np.inf], np.nan, inplace=True)
df_train[col].fillna(0, inplace=True)
df_test[col].replace([np.inf], np.nan, inplace=True)
df_test[col].fillna(0, inplace=True)
# Phone per family features
new_feats = []
for col in family_size_features:
new_col_name = "new_{}_per_{}".format("qmobilephone", col)
new_feats.append(new_col_name)
df_train[new_col_name] = df_train["qmobilephone"] / df_train[col]
df_test[new_col_name] = df_train["qmobilephone"] / df_test[col]
for col in new_feats:
df_train[col].replace([np.inf], np.nan, inplace=True)
df_train[col].fillna(0, inplace=True)
df_test[col].replace([np.inf], np.nan, inplace=True)
df_test[col].fillna(0, inplace=True)
# rez_esc (Years behind in school) per family features
new_feats = []
for col in family_size_features:
new_col_name = "new_{}_per_{}".format("rez_esc", col)
new_feats.append(new_col_name)
df_train[new_col_name] = df_train["rez_esc"] / df_train[col]
df_test[new_col_name] = df_train["rez_esc"] / df_train[col]
for col in new_feats:
df_train[col].replace([np.inf], np.nan, inplace=True)
df_train[col].fillna(0, inplace=True)
df_test[col].replace([np.inf], np.nan, inplace=True)
df_test[col].fillna(0, inplace=True)
df_train["rez_esc_age"] = df_train["rez_esc"] / df_train["age"]
df_train["rez_esc_escolari"] = df_train["rez_esc"] / df_train["escolari"]
df_test["rez_esc_age"] = df_test["rez_esc"] / df_test["age"]
df_test["rez_esc_escolari"] = df_test["rez_esc"] / df_test["escolari"]
# Rich features
# I think the more richer, the larger number of phones and tabulets
df_train["tabulet_x_qmobilephone"] = df_train["v18q1"] * df_train["qmobilephone"]
df_test["tabulet_x_qmobilephone"] = df_test["v18q1"] * df_test["qmobilephone"]
# wall, roof, floor may be key factor
# Let's multiply each of them. Because they are binary cat features, so multiplification of each feature generates new categorical features.
# wall and roof
for col1 in ["epared1", "epared2", "epared3"]:
for col2 in ["etecho1", "etecho2", "etecho3"]:
new_col_name = "new_{}_x_{}".format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
for col1 in ["epared1", "epared2", "epared3"]:
for col2 in ["eviv1", "eviv2", "eviv3"]:
new_col_name = "new_{}_x_{}".format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
# roof and floor
for col1 in ["etecho1", "etecho2", "etecho3"]:
for col2 in ["eviv1", "eviv2", "eviv3"]:
new_col_name = "new_{}_x_{}".format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
# combination using three features
for col1 in ["epared1", "epared2", "epared3"]:
for col2 in ["etecho1", "etecho2", "etecho3"]:
for col3 in ["eviv1", "eviv2", "eviv3"]:
new_col_name = "new_{}_x_{}_x_{}".format(col1, col2, col3)
df_train[new_col_name] = df_train[col1] * df_train[col2] * df_train[col3]
df_test[new_col_name] = df_test[col1] * df_test[col2] * df_test[col3]
print(df_train.shape, df_test.shape)
# I want to mix electricity and energy features -> energy features
for col1 in ["public", "planpri", "noelec", "coopele"]:
for col2 in ["energcocinar1", "energcocinar2", "energcocinar3", "energcocinar4"]:
new_col_name = "new_{}_x_{}".format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
# I want to mix toilet and rubbish disposal features -> other_infra features
for col1 in ["sanitario1", "sanitario2", "sanitario3", "sanitario5", "sanitario6"]:
for col2 in ["elimbasu1", "elimbasu2", "elimbasu3", "elimbasu4", "elimbasu5", "elimbasu6"]:
new_col_name = "new_{}_x_{}".format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
# I want to mix toilet and water provision features -> water features
for col1 in ["abastaguadentro", "abastaguafuera", "abastaguano"]:
for col2 in ["sanitario1", "sanitario2", "sanitario3", "sanitario5", "sanitario6"]:
new_col_name = "new_{}_x_{}".format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
print(df_train.shape, df_test.shape)
# I want to mix education and area features -> education_zone_features
for col1 in ['area1', 'area2']:
for col2 in ['instlevel1', 'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9']:
new_col_name = 'new_{}_x_{}'.format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
# Mix region and education
for col1 in ['lugar1', 'lugar2', 'lugar3', 'lugar4', 'lugar5', 'lugar6']:
for col2 in ['instlevel1', 'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9']:
new_col_name = 'new_{}_x_{}'.format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
print(df_train.shape, df_test.shape)
# Multiply television / mobilephone / computer / tabulet / refrigerator -> electronics features
df_train["electronics"] = df_train["computer"] * df_train["mobilephone"] * df_train["television"] * df_train["v18q"] * df_train["refrig"]
df_test["electronics"] = df_test["computer"] * df_test["mobilephone"] * df_test["television"] * df_test["v18q"] * df_test["refrig"]
df_train["no_appliances"] = df_train["refrig"] + df_train["computer"] + df_train["television"] + df_train["mobilephone"]
df_test["no_appliances"] = df_test["refrig"] + df_test["computer"] + df_test["television"] + df_test["mobilephone"]
# Mix wall materials of roof, floor, wall
for col1 in ['paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras',
'paredother']:
for col2 in ['pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera']:
new_col_name = 'new_{}_x_{}'.format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
for col1 in ['pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera']:
for col1 in ['techozinc', 'techoentrepiso', 'techocane', 'techootro']:
new_col_name = 'new_{}_x_{}'.format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
for col1 in ['paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras',
'paredother']:
for col2 in ['techozinc', 'techoentrepiso', 'techocane', 'techootro']:
new_col_name = 'new_{}_x_{}'.format(col1, col2)
df_train[new_col_name] = df_train[col1] * df_train[col2]
df_test[new_col_name] = df_test[col1] * df_test[col2]
for col1 in ['paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras',
'paredother']:
for col2 in ['pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera']:
for col3 in ['techozinc', 'techoentrepiso', 'techocane', 'techootro']:
new_col_name = 'new_{}_x_{}_x_{}'.format(col1, col2, col3)
df_train[new_col_name] = df_train[col1] * df_train[col2] * df_train[col3]
df_test[new_col_name] = df_test[col1] * df_test[col2] * df_train[col3]
print(df_train.shape, df_test.shape)
# Wow without any aggregation features, we have 446 features for now.
# Actually, mixing the materials of wall make thousands of features.
# I don't want to do that because of computational costs!
# Remove features with only one value.
cols_with_only_one_value = []
for col in df_train.columns:
if col == "Target":
continue
if df_train[col].value_counts().shape[0] == 1 or df_test[col].value_counts().shape[0] == 1:
print(col)
cols_with_only_one_value.append(col)
# Let's remove them!
df_train.drop(cols_with_only_one_value, axis=1, inplace=True)
df_test.drop(cols_with_only_one_value, axis=1, inplace=True)
# Check whether both train and test have same features
cols_train = np.array(sorted([col for col in df_train.columns if col != "Target"]))
cols_test = np.array(sorted([col for col in df_test.columns if col != "Target"]))
(cols_train == cols_test).sum() == len(cols_train)
# Good, let's move!
# 2.4 Aggregating features
# In this competition, each sample are member of specific household (idhogar).
# So let's aggregate based on "idhogar" values.
# Aggregation for family features.
def max_min(x):
return x.max() - x.min()
agg_train = pd.DataFrame()
agg_test = pd.DataFrame()
for item in tqdm(family_size_features):
for i, function in enumerate(["mean", "std", "min", "max", "sum", "count", max_min]):
group_train = df_train[item].groupby(df_train["idhogar"]).agg(function)
group_test = df_test[item].groupby(df_test["idhogar"]).agg(function)
if i == 6:
new_col = item + "_new_" + "max_min"
else:
new_col = item + "_new_" + function
agg_train[new_col] = group_train
agg_test[new_col] = group_test
print("new aggregated train set has {} rows and {} features".format(agg_train.shape[0], agg_train.shape[1]))
print("new aggregated test set has {} rows and {} features".format(agg_test.shape[0], agg_test.shape[1]))
aggr_list = ["rez_esc", "dis", "male", "female",
"estadocivil1", "estadocivil2", "estadocivil3", "estadocivil4", "estadocivil5", "estadocivil6", "estadocivil7",
"parentesco2", "parentesco3", "parentesco4", "parentesco5", "parentesco6", "parentesco7", "parentesco8", "parentesco9", "parentesco10", "parentesco11", "parentesco12",
"instlevel1", "instlevel2", "instlevel3", "instlevel4", "instlevel5", "instlevel6", "instlevel7", "instlevel8", "instlevel9",
"epared1", "epared2", "epared3",
"etecho1", "etecho2", "etecho3",
"eviv1", "eviv2", "eviv3",
"refrig", "television", "mobilephone", "area1", "area2", "v18q", "edjef"]
for item in tqdm(aggr_list):
for function in ["count", "sum"]:
group_train = df_train[item].groupby(df_train["idhogar"]).agg(function)
group_test = df_test[item].groupby(df_test["idhogar"]).agg(function)
new_col = item + "_new1_" + function
agg_train[new_col] = group_train
agg_test[new_col] = group_test
print("new aggregated train set has {} rows and {} features".format(agg_train.shape[0], agg_train.shape[1]))
print("new aggregated test set has {} rows and {} features".format(agg_test.shape[0], agg_test.shape[1]))
aggr_list = ['escolari', 'age', 'escolari_age', 'dependency', 'bedrooms', 'overcrowding', 'rooms', 'qmobilephone', 'v18q1']
for item in tqdm(aggr_list):
for function in ['mean','std','min','max','sum', 'count', max_min]:
group_train = df_train[item].groupby(df_train['idhogar']).agg(function)
group_test = df_test[item].groupby(df_test['idhogar']).agg(function)
if i == 6:
new_col = item + '_new2_' + 'max_min'
else:
new_col = item + '_new2_' + function
agg_train[new_col] = group_train
agg_test[new_col] = group_test
print('new aggregate train set has {} rows, and {} features'.format(agg_train.shape[0], agg_train.shape[1]))
print('new aggregate test set has {} rows, and {} features'.format(agg_test.shape[0], agg_test.shape[1]))
agg_train = agg_train.reset_index()
agg_test = agg_test.reset_index()
train_agg = | pd.merge(df_train, agg_train, on="idhogar") | pandas.merge |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period( | pd.Timestamp('2011-01') | pandas.Timestamp |
#!/usr/bin/env python
from scipy import interpolate
import numpy as np
from numpy.lib.recfunctions import append_fields
import scipy.signal as sig
import scipy.stats as st
import time, os
import pandas as pd
import math
#import report_ctd
import ctdcal.report_ctd as report_ctd
import warnings
import ctdcal.fit_ctd as fit_ctd
import datetime
from decimal import Decimal
import settings
import sys
sys.path.append('ctdcal/')
import oxy_fitting
import gsw
warnings.filterwarnings("ignore", 'Mean of empty slice.')
def cast_details(stacast, log_file, p_col, time_col, b_lat_col, b_lon_col, alt_col, inMat=None):
'''
We determine the cast details using pandas magic.
First find alternating periods of pumps on and pumps off, then select the
pumps on period with the highest pressure. Get values from the row with the
highest pressure, and return all values to be sent to log.
Input:
stacast - integer, the station and cast, as SSSCC format
log_file - file handle or string, log_file
p_col - string, name of the pressure column
time_col - string, name of the time column
b_lat_col - string, name of the latitude column
b_lon_col - string, name of the longitude column
alt_col - string, name of the altimeter column
inMat - pandas dataframe, the dataframe to come in
Output:
start_cast_time - float, unix epoch seconds?, start of cast time, to be reported to log file
end_cast_time - float, unix epoch seconds?, end of cast time, to be reported to log file
bottom_cast_time - float, unix epoch seconds?, bottom of cast time, to be reported to log file
start_pressure - float, pressure at which cast started, to be reported to log file
max_pressure - float, bottom of the cast pressure, to be reported to log file
b_lat - float, latitude at bottom of cast
b_lon - float, longitude at bottom of cast
b_alti - float, altimeter reading at bottom of cast - volts only!
inMat - the dataframe that came in, with soak period trimmed off
don't need end_cast_time, max_pressure
inMat is trimmed to start and end of cast
'''
df_test = pd.DataFrame.from_records(inMat)
dfs = find_pump_on_off_dfs(df_test)
dfs_1 = find_pumps_on_dfs(dfs)
df_cast = find_max_pressure_df(dfs_1)
df_cast1 = find_last_soak_period(df_cast)
df_cast2 = trim_soak_period_from_df(df_cast1)
start_cast_time = float(df_cast2['scan_datetime'].head(1))
start_pressure = float(df_cast2['CTDPRS'].head(1))
end_cast_time = float(df_cast2['scan_datetime'].tail(1))
max_pressure = float(df_cast2['CTDPRS'].max())
bottom_cast_time = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['scan_datetime'])
b_lat = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['GPSLAT'])
b_lon = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['GPSLON'])
b_alti = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['ALT'])
#last two lines must be in to return the same as old - change to slices of df later
report_ctd.report_cast_details(stacast, log_file, start_cast_time, end_cast_time,
bottom_cast_time, start_pressure, max_pressure, b_alti,
b_lat, b_lon)
#reconvert to ndarray - might need to be altered to remove second index
# inMat = df_cast2.loc[:df_cast2['CTDPRS'].idxmax()].to_records(index=False)
inMat = df_cast2.loc[:df_cast2['CTDPRS'].idxmax()]
return start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_lat, b_lon, b_alti, inMat
#Move next four functions to a library or class(?) Clean up module
def find_pump_on_off_dfs(df):
'''Find pump_on patterns of dataframes, and return a list(?) of dataframes to iterate over.
'''
return [g for i,g in df.groupby(df['pump_on'].ne(df['pump_on'].shift()).cumsum())]
def find_max_pressure_df(dfs):
'''Giving a list of data frames, return a reference to the frame with which contians the highest pressure value
'''
max_pressure_df = dfs[0]
max_pressure = max_pressure_df['CTDPRS'].max() #TODO make into config var
for df in dfs:
if df['CTDPRS'].max() > max_pressure:
max_pressure_df = df
return max_pressure_df
def find_pumps_on_dfs(dfs):
'''given a list of dataframes, remove all the frames with one or more rows containing a "false" pump on flag
'''
return list(filter(lambda df: df['pump_on'].all(), dfs))
def trim_soak_period_from_df(df):
'''Look for minimum pressure in dataframe, then return everything after minimum pressure/top of cast.
'''
test = int(df.iloc[1:int((len(df)/4))]['CTDPRS'].idxmin())
return df.loc[test:]
def find_last_soak_period(df_cast, surface_pressure=2, time_bin=8, downcast_pressure=50):
"""Find the soak period before the downcast starts.
The algorithm is tuned for repeat hydrography work, specifically US GO-SHIP
parameters. This assumes the soak depth will be somewhere between 10 and 30
meters, the package will sit at the soak depth for at least 20 to 30 seconds
before starting ascent to the surface and descent to target depth.
Parameters
----------
df_cast : DataFrame
DataFrame of the entire cast
surface_pressure : integer
Minimum surface pressure threshold required to look for soak depth.
2 dbar was chosen as an average rosette is roughly 1.5 to 2 meters tall.
time_bin : integer
Time, in whole seconds.
downcast_pressure : integer
Minimum pressure threshold required to assume downcast has started.
50 dbar has been chosen as double the deep soak depth of 20-30 dbar.
Returns
-------
df_cast_ret : DataFrame
DataFrame starting within time_bin seconds of the last soak period.
The algorithm is not guaranteed to catch the exact start of the soak period,
but within a minimum period of time_bin seconds(?) from end of the soak if
the soak period assumption is valid. This should be shorter than the total
soak period time, and able to catch the following rise and descent of the
package that signals the start of the cast.
The algorithm has been designed to handle four general cases of casts:
* A routine cast with pumps turning on in water and normal soak
* A cast where the pumps turn on in air/on deck
* A cast where the pumps turn on and off due to rosette coming out of water
* A cast where there are multiple stops on the downcast to the target depth
"""
#Validate user input
if time_bin <= 0:
raise ValueError('Time bin value should be positive whole seconds.')
if downcast_pressure <=0:
raise ValueError('Starting downcast pressure threshold must be positive integers.')
if downcast_pressure < surface_pressure:
raise ValueError(f'Starting downcast pressure threshold must be greater \
than surface pressure threshold.')
# If pumps have not turned on until in water, return DataFrame
if df_cast.iloc[0]['CTDPRS'] > surface_pressure:
return df_cast
#Bin the data by time, and compute the average rate of descent
df_blah = df_cast.loc[:,:]
df_blah['bin'] = pd.cut(df_blah.loc[:,'index'],
range(df_blah.iloc[0]['index'],df_blah.iloc[-1]['index'],time_bin*24),
labels=False, include_lowest=True)
df_blah2 = df_blah.groupby('bin').mean()
#Compute difference of descent rates and label bins
df_blah2['prs_diff'] = df_blah2['CTDPRS'].diff().fillna(0).round(0)
df_blah2['movement'] = pd.cut(df_blah2['prs_diff'], [-1000,-0.5,0.5,1000], labels=['up','stop','down'])
#Find all periods where the rosette is not moving
df_stop = df_blah2.groupby('movement').get_group('stop')
groupby_test = df_blah2.groupby(df_blah2['movement'].ne(df_blah2['movement'].shift()).cumsum())
list_test = [g for i,g in groupby_test]
#Find a dataframe index of the last soak period before starting descent
def poop(list_obj, downcast_pressure):
""" Return dataframe index in the last soak period before starting
descent to target depth.
"""
for i, x in zip(range(len(list_test)),list_test):
if x['CTDPRS'].max() < downcast_pressure:
if x.max()['movement'] == 'stop':
index = i
if x['CTDPRS'].max() > downcast_pressure:
return index
return index
#Truncate dataframe to new starting index : end of dataframe
start_index = np.around(list_test[poop(list_test, downcast_pressure)].head(1)['index'])
df_cast = df_cast.set_index('index')
df_cast = df_cast.loc[int(start_index):,:]
df_cast_ret = df_cast.reset_index()
return df_cast_ret
#End move four functions
# def cast_details_old(stacast, log_file, p_col, time_col, b_lat_col, b_lon_col, alt_col, inMat=None):
# """cast_details function
#
# Function takes full NUMPY ndarray with predefined dtype array
# and adjusts ndarray to remove all extraneous surface data.
# Function returns cast start time, end time, bottom time and
# cleaned up matrix.
#
# Args:
# param1 (str): stacast, station cast input
# param2 (str): log_file, log file to write cast data.
# param3 (str): p_col, pressure data column name
# param4 (str): time_col, time data column name
# param5 (ndarray): inMat, numpy ndarray with dtype array
#
# Returns:
# Narray: The return value is ndarray with adjusted time of parameter
# specified.
#
# """
#
#
# if inMat is None:
# print("In cast_details: No data")
# return
# else:
# # Top of cast time, bottom of cast time, end of cast time,
# start_cast_time = 0.0
# bottom_cast_time = 0.0
# end_cast_time = 0.0
# # Test cycle time constant
# fl = 24
# # starting P
# start_pressure = 2.0
# # Max P
# max_pressure = 10000.0
# lm = len(inMat)-1
# rev = np.arange(int(lm/4),0,-1)
#
# # Find starting top of cast
# # Smallest P from reverse array search
# for i in rev:
# if start_pressure < inMat[p_col][i]:
# tmp = i
# elif start_pressure > inMat[p_col][i]:
# start_pressure = inMat[p_col][i]
# tmp = abs(i - 24) #patched to not break through the c(sea)-floor, can be made cleaner
# break
# start_cast_time = inMat[time_col][tmp]
#
# # Remove everything before cast start
# inMat = inMat[tmp:]
#
# # Max P and bottom time
# max_pressure = max(inMat[p_col])
# tmp = np.argmax((inMat[p_col]))
# bottom_cast_time = inMat[time_col][tmp]
# b_lat = inMat[b_lat_col][tmp]
# b_lon = inMat[b_lon_col][tmp]
# b_alti = inMat[alt_col][tmp]
#
# tmp = len(inMat)
# # Find ending top of cast time
# for i in range(int(tmp/2),tmp):
# if start_pressure > inMat[p_col][i]:
# end_cast_time = inMat[time_col][i]
# if i < tmp: tmp = i + 24
# break
#
# # Remove everything after cast end
# inMat = inMat[:tmp]
#
# report_ctd.report_cast_details(stacast, log_file, start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_alti, b_lat, b_lon)
#
# return start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_lat, b_lon, b_alti, inMat
def ctd_align(inMat=None, col=None, time=0.0):
"""ctd_align function
Function takes full NUMPY ndarray with predefined dtype array
and adjusts time of sensor responce and water flow relative to
the time frame of temperature sensor.
Args:
param1 (ndarray): inMat, numpy ndarray with dtype array
param2 (float): col, column to apply time advance to.
param3 (float): time, advance in seconds to apply to raw data.
Returns:
Narray: The return value is ndarray with adjusted time of parameter
specified.
"""
# Num of frames per second.
fl = 24
if (inMat is not None) & (col is not None) & ( time > 0.0):
# Time to advance
advnc = int(fl * time)
tmp = np.arange(advnc, dtype=np.float)
last = inMat[col][len(inMat)-1]
tmp.fill(float(last))
inMat[col] = np.concatenate((inMat[col][advnc:],tmp))
return inMat
def ctd_quality_codes(column=None, p_range=None, qual_code=None, oxy_fit=False, p_qual_col=None, qual_one=None, inMat=None):
"""ctd_quality_codes function
Function takes full NUMPY ndarray with predefined dtype array
Args:
param1 (ndarray):
param2 (float):
Returns:
Narray: The return value is ndarray with adjusted time of parameter
specified.
"""
#If p_range set apply qual codes to part of array and return
if p_range is not None:
print("Some algoirythm for formatting qual codes per pressure range")
return
else:
q_df = pd.DataFrame(index=np.arange(len(inMat)), columns=p_qual_col)
for pq in p_qual_col:
if pq in list(qual_one):
q_df[pq] = q_df[pq].fillna(1)
elif oxy_fit and pq is column:
q_df[pq] = q_df[pq].fillna(2)
else:
q_df[pq] = q_df[pq].fillna(2)
q_nd = q_df.as_matrix(columns=q_df.columns)
return q_nd
def formatTimeEpoc(time_zone='UTC', time_pattern='%Y-%m-%d %H:%M:%S', input_time = None):
"""formatTimeEpoc function
Function takes pattern of time input, relative time zone, and
date time data array and returns array of epoc time.
title and the second row are the units for each column.
Args:
param1 (str): relative time zone for data.
param2 (str): pattern of incoming data.
param3 (ndarray): input_time, numpy 1d ndarray time array
Returns:
1D ndarray: The return array of epoch time
"""
if input_time is None:
print("In formatTimeEpoc: No data entered.")
return
else:
os.environ['TZ'] = 'UTC'
epoch_time = input_time
for i in range(0,len(input_time)):
epoch_time[i] = int(time.mktime(time.strptime(str(input_time[i], "utf-8"), time_pattern)))
return epoch_time
def dataToDataFrame(inFile):
"""dataToDataFrame function
Function takes full file path to csv type data file and returns a
PANDAS dataframe for data treatment with a two row header.
Data file should have a two row header. The first row being the column
title and the second row are the units for each column.
Args:
param1 (str): Full path to data file.
Returns:
DataFrame: The return value is a full dataframe with header.
.. REF PAGE:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv
"""
#df = pd.read_csv(inFile, header=[0,2])
df = pd.read_csv(inFile)
return df
def dataToNDarray(inFile, dtype=None, names=None, separator=',', skip=None):
"""dataToNDarray function
Function takes full file path to csv type data file and returns NUMPY
ndarray type ndarray for data manipulation with a two row header.
Data file should have a two row header. The first row being the column
title and the second row are the units for each column.
Args:
param1 (str): inFile, full path to csv file
param2 (arr): dtype list
param3 (str): separator, default comma ','
Returns:
Narray: The return value is a full data ndarray with two row header.
Reference Page:
https://scipy.github.io/old-wiki/pages/Cookbook/InputOutput.html
"""
try:
return pd.read_pickle(inFile).to_records()
except:
if skip is None:
arr = np.genfromtxt(inFile, delimiter=separator, dtype=dtype, names=names)
else:
arr = np.genfromtxt(inFile, delimiter=separator, dtype=dtype, names=names, skip_header=skip)
return arr
def hysteresis_correction(H1=-0.033, H2=5000, H3=1450, inMat = None):
"""Hysteresis Correction function
Function takes data ndarray and hysteresis coefficiants
and returns hysteresis corrected oxygen data.
Args:
param1 (float): H1, hysteresis correction coefficiant 1
param2 (float): H2, hysteresis correction coefficiant 2
param3 (float): H3, hysteresis correction coefficiant 3
param5 (array): inMat, raw ctd data.
Returns:
array: Return dissolved oxygen hysteresis corrected data.
.. REF PAGE:
http://http://www.seabird.com/document/an64-3-sbe-43-dissolved-oxygen-do-sensor-hysteresis-corrections
"""
Oxnewconc = np.arange(0,len(inMat),1)
Oxnewconc[0] = inMat['o1_mll'][1]
if inMat is None:
print("Hysteresis Correction function: No data")
return
else:
for i in range(1,len(inMat)-1):
D = 1 + H1 * (math.exp(inMat['p_dbar'][i] / H2) - 1)
C = math.exp(-1 * 0.04167/ H3)
Oxnewconc[i] = ((inMat['o1_mll'][i] + (Oxnewconc[i-1] * C * D)) - (inMat['o1_mll'][i-1] * C)) / D
inMat['o1_mll'][:] = Oxnewconc[:]
return inMat
def data_interpolater(inArr):
"""data_interpolater to handle indices and logical indices of NaNs.
Input:
- inArr, 1d numpy array with return True np.isnans()
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
- interpolated array
Example:
>>> # linear interpolation of NaNs
>>> outArray = data_interpolater(inArr)
"""
nans, tmp= np.isnan(inArr), lambda z: z.nonzero()[0]
inArr[nans] = np.interp(tmp(nans), tmp(~nans), inArr[~nans])
return inArr
def o2pl2pkg(p_col, t_col, sal_col, dopl_col, dopkg_col, lat_col, lon_col, inMat):
"""o2pl2pkg convert ml/l dissolved oxygen to umol/kg
Input:
- t_col, temperature column header deg c.
- sal_col, salinity column header psu.
- dopl_col, dissolved column header ml/l.
- dopkg_col, dissolved column header umol/kg
- lat_col, latitude for entire cast deg.
- lon_col, longitude for entire cast deg.
- inMat, dtype ndarray processed ctd time data.
Output:
- Converted Oxygen column umol/kg
Example:
>>> # linear interpolation of NaNs
>>> outArray = o2pl2kg(inArr)
"""
pkg = np.ndarray(shape=len(inMat), dtype=[(dopkg_col, np.float)])
# Absolute sailinity from Practical salinity.
SA = gsw.SA_from_SP(inMat[sal_col], inMat[p_col], inMat[lat_col], inMat[lon_col])
# Conservative temperature from insitu temperature.
CT = gsw.CT_from_t(SA, inMat[t_col], inMat[p_col])
s0 = gsw.sigma0(SA, CT) # Potential density from Absolute Salinity g/Kg Conservative temperature deg C.
# Convert DO ml/l to umol/kg
for i in range(0,len(inMat[dopl_col])):
pkg[i] = inMat[dopl_col][i] * 44660 / (s0[i] + 1000)
return pkg
def oxy_to_umolkg(df_sal, df_pressure, df_lat, df_lon, df_temp, df_oxy):
'''Rewritten from Courtney's method to use array-likes (aka use dataframes and ndarrays).
'''
# Absolute salinity from Practical salinity.
SA = gsw.SA_from_SP(df_sal, df_pressure, df_lat, df_lon)
# Conservative temperature from insitu temperature.
CT = gsw.CT_from_t(SA, df_temp, df_pressure)
s0 = gsw.sigma0(SA, CT) # Potential density from Absolute Salinity g/Kg Conservative temperature deg C.
series = df_oxy * 44660 / (s0 + 1000)
return series
def raw_ctd_filter(input_array=None, filter_type='triangle', win_size=24, parameters=None):
"""raw_ctd_filter function
Function takes NUMPY array
of raw ctd data and returns filtered data. This function also needs
one of three filter types (boxcar, gaussian, triangle) as well as
window size.
Args:
param1 (ndarray): Numpy ndarray with predefined header with at
param2 (str): One of three tested filter types
boxcar, gaussian_std, triangle.
default is triangle
param3 (int): A window size for the filter. Default is 24, which
is the number of frames per second from a SBE9+/11 CTD/Dech unit.
param4 (ndarray): parameters the dtype names used in filtering the
analytical inputs.
Returns:
Narray: The return value is a matrix of filtered ctd data with
the above listed header values.
"""
if input_array is None:
print("In raw_ctd_filter: No data array.")
return
else:
return_array = input_array
if parameters is None:
print("In raw_ctd_filter: Empty parameter list.")
else:
for p in parameters:
if filter_type is 'boxcar':
win = sig.boxcar(win_size)
return_array[str(p)] = sig.convolve(input_array[str(p)], win, mode='same')/len(win)
elif filter_type is 'gaussian':
sigma = np.std(arr)
win = sig.general_gaussian(win_size, 1.0, sigma)
return_array[str(p)] = sig.convolve(input_array[str(p)], win, mode='same')/(len(win))
elif filter_type is 'triangle':
win = sig.triang(win_size)
return_array[p] = 2*sig.convolve(input_array[p], win, mode='same')/len(win)
return return_array
def ondeck_pressure(stacast, p_col, c1_col, c2_col, time_col, inMat=None, conductivity_startup=20.0, log_file=None):
"""ondeck_pressure function
Function takes full NUMPY ndarray with predefined dtype array
of filtered ctd raw data the stores, analizes and removes ondeck
values from data.
Args:
param1 (str): stacast, station cast info
param1 (str): p_col, pressure data column name
param2 (str): c1_col, cond1 data column name
param3 (str): c2_col, cond2 data column name
param4 (str): time_col, time data column name
param5 (ndarray): numpy ndarray with dtype array
param6 (float): conductivity_startup, threshold value
param7 (str): log_file, log file name
Returns:
Narray: The return ndarray with ondeck data removed.
Also output start/end ondeck pressure.
"""
start_pressure = []
tmpMat = []
outMat = []
tmp = 0
start_p = 0.0
n = 0
ep = []
end_p = 0.0
# Frequency
fl = 24
fl2 = fl*2
# One minute
mt = 60
# Half minute
ms = 30
time_delay = fl*ms
if inMat is None:
print("Ondeck_pressure function: No data.")
return
else:
# Searches first quarter of matrix, uses conductivity
# threshold min to capture startup pressure
for j in range(0,int(len(inMat)/4)):
if ((inMat[c1_col][j] < conductivity_startup) and (inMat[c2_col][j] < conductivity_startup)):
tmp = j
start_pressure.append(inMat[p_col][j])
# Evaluate starting pressures
if not start_pressure: start_p = "Started in Water"
else:
n = len(start_pressure)
if (n > time_delay): start_p = np.average(start_pressure[fl2:n-(time_delay)])
else: start_p = np.average(start_pressure[fl2:n])
# Remove on-deck startup
inMat = inMat[tmp:]
tmp = len(inMat);
# Searches last half of NDarray for conductivity threshold
if len(inMat) % 2 == 0:
inMat_2 = inMat.copy()
else:
inMat_2 = inMat.iloc[1:].copy()
inMat_half1, inMat_half2 = np.split(inMat_2,2)
ep = inMat_half2[(inMat_half2[c1_col] < conductivity_startup) & (inMat_half2[c2_col] < conductivity_startup)][p_col]
# for j in range(int(len(inMat)*0.5), len(inMat)):
# if ((inMat[c1_col][j] < conductivity_startup) and (inMat[c2_col][j] < conductivity_startup)):
# ep.append(inMat[p_col][j])
# if (tmp > j): tmp = j
# Evaluate ending pressures
if (len(ep) > (time_delay)): end_p = np.average(ep[(time_delay):])
else: end_p = np.average(ep[(len(ep)):])
# Remove on-deck ending
outMat = inMat[:tmp]
# Store ending on-deck pressure
report_ctd.report_pressure_details(stacast, log_file, start_p, end_p)
return outMat
def _roll_filter(df, pressure_column="CTDPRS", direction="down"):
#fix/remove try/except once serialization is fixed
try:
if direction == 'down':
monotonic_sequence = df[pressure_column].expanding().max()
elif direction == 'up':
monotonic_sequence = df[pressure_column].expanding().min()
else:
raise ValueError("direction must be one of (up, down)")
except KeyError:
pressure_column = 'CTDPRS'
if direction == 'down':
monotonic_sequence = df[pressure_column].expanding().max()
elif direction == 'up':
monotonic_sequence = df[pressure_column].expanding().min()
else:
raise ValueError("direction must be one of (up, down)")
return df[df[pressure_column] == monotonic_sequence]
def roll_filter(inMat, p_col, up='down', frames_per_sec=24, search_time=15, **kwargs):
"""roll_filter function
Function takes full NUMPY ndarray with predefined dtype array
and subsample arguments to return a roll filtered ndarray.
Args:
param1 (str): stacast, station cast info
param2 (ndarray): inMat, numpy ndarray with dtype array
param3 (str): up, direction to filter cast (up vs down)
param4 (int): frames_per_sec, subsample selection rate
param5 (int): seach_time, search time past pressure inversion
Returns:
Narray: The return value ndarray of data with ship roll removed
"""
#When the "pressure sequence" code is fixed, uncomment and use this instead
start = kwargs.get("start", 0)
end = kwargs.get("end", -1)
full_matrix = kwargs.get("full_matrix", inMat)
tmp_df = pd.DataFrame.from_records(full_matrix[start:end])
tmp_df = _roll_filter(tmp_df)
#return tmp_df.to_records(index=False)
return tmp_df
remove = []
frequency = 24 # Hz of package
if (frames_per_sec > 0) & (frames_per_sec <= 24):
sample = int(frequency/frames_per_sec) # establish subsample rate to time ratio
else: sample = frequency
# Adjusted search time with subsample rate
search_time = int(sample*frequency*int(search_time))
if inMat is None:
print("Roll filter function: No input data.")
return
else:
P = inMat[p_col]
dP = np.diff(P,1)
if up is 'down':
index_to_remove = np.where(dP < 0)[0] # Differential filter
subMat = np.delete(inMat, index_to_remove, axis=0)
P = subMat[p_col]
tmp = np.array([])
for i in range(0,len(P)-1):
if P[i] > P[i+1]:
deltaP = P[i+1] + abs(P[i] - P[i+1])
# Remove aliasing
k = np.where(P == min(P[i+1:i+search_time], key=lambda x:abs(x-deltaP)))[0]
tmp = np.arange(i+1,k[0]+1,1)
remove = np.append(remove,tmp)
deltaP = 0
elif up is 'up':
index_to_remove = np.where(dP > 0)[0] # Differential filter
subMat = np.delete(inMat, index_to_remove, axis=0)
P = subMat[p_col]
tmp = np.array([])
for i in range(0,len(P)-1):
if P[i] < P[i+1]:
deltaP = P[i+1] - abs(P[i] - P[i+1])
# Remove aliasing
k = np.where(P == min(P[i+1:i+search_time], key=lambda x:abs(x-deltaP)))[0]
tmp = np.arange(i+1,k[0]+1,1)
remove = np.append(remove,tmp)
deltaP = 0
subMat = np.delete(subMat,remove,axis=0)
return subMat
def pressure_sequence(df, p_col='CTDPRS', intP=2.0, startT=-1.0, startP=0.0, up='down', sample_rate=12, search_time=15):
"""pressure_sequence function
Function takes a dataframe and several arguments to return a pressure
sequenced data ndarray.
Pressure sequencing includes rollfilter.
Necessary inputs are input Matrix (inMat) and pressure interval (intP).
The other inputs have default settings. The program will figure out
specifics for those settings if left blank.
Start time (startT), start pressure (startP) and up are mutually exclusive.
If sensors are not not fully functional when ctd starts down cast
analyst can select a later start time or start pressure but not both.
There is no interpolation to the surface for other sensor values.
'up' indicates direction for pressure sequence. If up is set startT and startP
are void.
Args:
param1 (Dataframe: Dataframe containing measurement data
param2 (str): p_col, pressure column name
param3 (float): starting pressure interval
param5 (float): start time (startT) for pressure sequence
param6 (float): start pressure (startP) for pressure sequence
param7 (str): pressure sequence direction (down/up)
param8 (int): sample_rate, sub sample rate for roll_filter. Cleans & speeds processing.
param9 (int): search_time, truncate search index for the aliasing part of ship roll.
param10 (ndarray): inMat, input data ndarray
Returns:
Narray: The return value is a matrix of pressure sequenced data
todo: deep data bin interpolation to manage empty slices
"""
# change to take dataframe with the following properties
# * in water data only (no need to find cast start/end)
# * The full down and up time series (not already split since this method will do it)
# New "algorithm" (TODO spell this right)
# * if direction is "down", use the input as is
# * if direction is "up", invert the row order of the input dataframe
# Use the "roll filter" method to get only the rows to be binned
# * the roll filter will treat the "up" part of the cast as a giant roll to be filtered out
# * the reversed dataframe will ensure we get the "up" or "down" part of the cast
# * there is no need to reverse the dataframe again as the pressure binning process will remove any "order" information (it doesn't care about the order)
# That's basically all I (barna) have so far TODO Binning, etc...
# pandas.cut() to do binning
#lenP, prvPrs not used
# Passed Time-Series, Create Pressure Series
start = 0
# Roll Filter
roll_filter_matrix = roll_filter(df, p_col, up, sample_rate, search_time, start=start)
df_roll_surface = fill_surface_data(roll_filter_matrix, bin_size=2)
#bin_size should be moved into config
binned_df = binning_df(df_roll_surface, bin_size=2)
binned_df = binned_df.reset_index(drop=True)
return binned_df
def binning_df(df, **kwargs):
'''Bins records according to bin_size, then finds the mean of each bin and returns a df.
'''
bin_size = kwargs.get("bin_size", 2)
try:
labels_in = [x for x in range(0,int(np.ceil(df['CTDPRS_DBAR'].max())),2)]
df['bins'] = pd.cut(df['CTDPRS_DBAR'], range(0,int(np.ceil(df['CTDPRS_DBAR'].max()))+bin_size,bin_size), right=False, include_lowest=True, labels=labels_in)
df['CTDPRS_DBAR'] = df['bins'].astype('float64')
df_out = df.groupby('bins').mean()
return df_out
except KeyError:
labels_in = [x for x in range(0,int(np.ceil(df['CTDPRS'].max())),2)]
df['bins'] = pd.cut(df['CTDPRS'], range(0,int(np.ceil(df['CTDPRS'].max()))+bin_size,bin_size), right=False, include_lowest=True, labels=labels_in)
df['CTDPRS'] = df['bins'].astype('float64')
df_out = df.groupby('bins').mean()
return df_out
def fill_surface_data(df, **kwargs):
'''Copy first scan from top of cast, and propgate up to surface
'''
surface_values = []
bin_size = kwargs.get("bin_size", 2)
try:
for x in range(1, int(np.floor(df.iloc[0]['CTDPRS_DBAR'])), bin_size):
surface_values.append(x)
df_surface = pd.DataFrame({'CTDPRS_DBAR': surface_values})
df_surface['interp_bol'] = 1
df_merged = pd.merge(df_surface, df, on='CTDPRS_DBAR', how='outer')
except KeyError:
for x in range(1, int(np.floor(df.iloc[0]['CTDPRS'])), bin_size):
surface_values.append(x)
df_surface = pd.DataFrame({'CTDPRS': surface_values})
# Added by KJ to keep track of backfilled values
df_surface['interp_bol'] = 1
if len(df_surface['interp_bol']) == 1:
df_surface['interp_bol'] = 0
df_merged = pd.merge(df_surface.astype('float64'), df, on='CTDPRS', how='outer')
if 'interp_bol' not in df_merged.columns:
df_merged['interp_bol'] = np.NaN
df_merged['interp_bol'].fillna(0,inplace=True)
return df_merged.fillna(method='bfill')
def load_reft_data(reft_file,index_name = 'btl_fire_num'):
""" Loads reft_file to dataframe and reindexes to match bottle data dataframe"""
reft_data = pd.read_csv(reft_file,usecols=['btl_fire_num','T90'])
reft_data.set_index(index_name)
reft_data['SSSCC_TEMP'] = reft_file[-14:-9]
reft_data['REFTMP'] = reft_data['T90']
return reft_data
def load_salt_data(salt_file, index_name= 'SAMPNO'):
salt_data = pd.read_csv(salt_file,usecols = ['SAMPNO','SALNTY','BathTEMP','CRavg'])
salt_data.set_index(index_name)
salt_data['SSSCC_SALT'] = salt_file[-15:-10]
salt_data.rename(columns={'SAMPNO':'SAMPNO_SALT'}, inplace=True)
return salt_data
def load_btl_data(btl_file,cols=None):
"""ex. '/Users/k3jackson/p06e/data/bottle/00201_btl_mean.pkl'"""
btl_data = dataToNDarray(btl_file,float,True,',',0)
btl_data = pd.DataFrame.from_records(btl_data)
if cols != None:
btl_data = btl_data[cols]
ssscc = btl_file[-18:-13]
btl_data['SSSCC'] = ssscc
return btl_data
def load_time_data(time_file):
time_data = dataToNDarray(time_file,float,True,',',1)
time_data = pd.DataFrame.from_records(time_data)
return time_data
def calibrate_param(param,ref_param,press,calib,order,ssscc,btl_num,xRange=None,):
### NOTE: REF VALUES DEEMED QUESTIONABLE ARE STILL BEING USED FOR CALIBRATION
df_good = quality_check(param,ref_param,press,ssscc,btl_num,find='good')
df_ques = quality_check(param,ref_param,press,ssscc,btl_num,find='quest')
df_ques['Parameter'] = param.name
#report questionable data to a csv file
#constrain pressure to within limits of xRange
if xRange != None:
x0 = int(xRange.split(":")[0])
x1 = int(xRange.split(":")[1])
df_good_cons = df_good[(df_good[press.name] >= x0) & (df_good[press.name] <= x1)]
else:
#Take full range of temperature values
x0 = df_good[param.name].min()
x1 = df_good[param.name].max()
df_good_cons = df_good[(df_good[param.name] >= x0) & (df_good[param.name] <= x1)]
if 'P' in calib:
coef = get_param_coef(df_good_cons[press.name],df_good_cons['Diff'],order,calib)
elif 'T' or 'C' in calib:
coef = get_param_coef(df_good_cons[param.name],df_good_cons['Diff'],order,calib)
else:
print('calib argument not valid, use CP TP T or C')
return coef,df_ques
def quality_check(param,param_2,press,ssscc,btl_num,find,thresh=[0.002, 0.005, 0.010, 0.020]):
param = fit_ctd.array_like_to_series(param)
param_2 = fit_ctd.array_like_to_series(param_2)
press = fit_ctd.array_like_to_series(press)
ssscc = fit_ctd.array_like_to_series(ssscc)
btl_num = fit_ctd.array_like_to_series(btl_num)
diff = param_2 - param
df = pd.concat([ssscc,btl_num.rename('Bottle'),param.rename('Param_1'),param_2.rename('Param_2'),press.rename('CTDPRS'),diff.rename('Diff')],axis=1)
if find == 'good':
# Find data values for each sensor that are below the threshold (good)
df['Flag'] = 1
#df_range_comp = df_range[(df_range[diff].abs() < threshold)]# & (df_range[d_2].abs() < threshold) & (df_range[d_12].abs() < threshold)]
df.loc[(df.CTDPRS > 2000) & (df.Diff.abs() < thresh[0]), 'Flag'] = 2
df.loc[(df.CTDPRS <= 2000) & (df.CTDPRS >1000) & (df.Diff.abs() < thresh[1]), 'Flag'] = 2
df.loc[(df.CTDPRS <= 1000) & (df.CTDPRS >500) & (df.Diff.abs() < thresh[2]), 'Flag'] = 2
df.loc[(df.CTDPRS <= 500) & (df.Diff.abs() < thresh[3]), 'Flag'] = 2
#
# Filter out bad values
df = df[df['Flag'] == 2]
# Rename Columns back to what they were
if param.name != None:
df.rename(columns = {'Param_1' : param.name}, inplace=True)
if param_2.name != None:
df.rename(columns = {'Param_2' : param_2.name},inplace=True)
if press.name != None:
df.rename(columns = {'CTDPRS' : press.name}, inplace=True )
elif find == 'quest':
# Find data values for each sensor that are above the threshold (questionable)
df['Flag'] = 1
df.loc[(df.CTDPRS > 2000) & (df.Diff.abs() > thresh[0]), 'Flag'] = 3
df.loc[(df.CTDPRS <= 2000) & (df.CTDPRS >1000) & (df.Diff.abs() > thresh[1]), 'Flag'] = 3
df.loc[(df.CTDPRS <= 1000) & (df.CTDPRS >500) & (df.Diff.abs() > thresh[2]), 'Flag'] = 3
df.loc[(df.CTDPRS <= 500) & (df.Diff.abs() > thresh[3]), 'Flag'] = 3
# Filter out good values
df = df[df['Flag'] == 3]
# Remove unneeded columns
df = df.drop(['Param_1','Param_2'],axis=1)
# Re-Order Columns for better readability
df = df[[ssscc.name,'Bottle',press.name,'Flag','Diff']]
else:
print('Find argument not valid, please enter "good" or "quest" to find good or questionable values')
return df
def get_param_coef(calib_param,diff,order,calib):
cf1 = np.polyfit(calib_param, diff, order)
if 'T' in calib:
coef = np.zeros(shape=5)
if order is 0:
coef[4] = cf1[0]
elif (order is 1) and (calib == 'TP'):
coef[1] = cf1[0]
coef[4] = cf1[1]
elif (order is 2) and (calib == 'TP'):
coef[0] = cf1[0]
coef[1] = cf1[1]
coef[4] = cf1[2]
elif (order is 1) and (calib == 'T'):
coef[3] = cf1[0]
coef[4] = cf1[1]
elif (order is 2) and (calib == 'T'):
coef[2] = cf1[0]
coef[3] = cf1[1]
coef[4] = cf1[2]
if 'C' in calib:
coef = np.zeros(shape=7)
if order is 0:
coef[6] = cf1[0]
elif (order is 1) and (calib == 'CP'):
coef[1] = cf1[0]
coef[6] = cf1[1]
elif (order is 2) and (calib == 'CP'):
coef[0] = cf1[0]
coef[1] = cf1[1]
coef[6] = cf1[2]
elif (order is 1) and (calib == 'C'):
coef[5] = cf1[0]
coef[6] = cf1[1]
elif (order is 2) and (calib == 'C'):
coef[4] = cf1[0]
coef[5] = cf1[1]
coef[6] = cf1[2]
return coef
def combine_quality_flags(df_list):
combined_df = pd.concat(df_list)
combined_df = combined_df.sort_values(['SSSCC','Bottle'])
combined_df = combined_df.round(4)
return combined_df
#Combine these three into a dataframe and write out to a csv
#Sort by sta/cast, bottle number, rev. press
def calibrate_conductivity(df,order,calib_param,sensor,xRange=None,
refc_col='BTLCOND',cond_col_1='CTDCOND1',cond_col_2='CTDCOND2',
p_col='CTDPRS'):#refc_data
### NOTE: REF VALUES DEEMED QUESTIONABLE ARE STILL BEING USED FOR CALIBRATION
if sensor == 1:
postfix = 'c1'
cond_col = 'CTDCOND1'
t_col = 'CTDTMP1'
elif sensor ==2:
postfix = 'c2'
cond_col = 'CTDCOND2'
t_col = 'CTDTMP2'
else:
print('No sensor name supplied, difference column name will be: diff')
if calib_param == 'P':
calib_col = p_col
elif calib_param == 'T':
calib_col = t_col
elif calib_param == 'C':
calib_col = cond_col
else:
print('No calib_param supplied')
diff = 'd_'+postfix #Difference between ref and prim sensor
# Calculate absolute differences between sensors and salt sample data
#df[diff] = refc_data[refc_col] - df[cond_col]
df[diff] = df[refc_col] - df[cond_col]
#df['primary_diff'] = refc_data[refc_col] - df[cond_col_1]
df['primary_diff'] = df[refc_col] - df[cond_col_1]
#df['secondary_diff'] = refc_data[refc_col] - df[cond_col_2]
df['secondary_diff'] = df[refc_col] - df[cond_col_2]
df['P-S'] = df[cond_col_1] - df[cond_col_2]
#Greater than 2000 dBar
lower_lim = 2000
upper_lim = df[p_col].max()
threshold = 0.002
df_deep_good = quality_check(df,diff,lower_lim,upper_lim,threshold)
df_deep_ques = quality_check(df,diff,lower_lim,upper_lim,threshold,find='quest')
df_deep_ref = quality_check(df,diff,lower_lim,upper_lim,threshold,find='ref')
#Between 2000 and 1000
lower_lim = 1000
upper_lim = 2000
threshold = 0.005
df_lmid_good = quality_check(df,diff,lower_lim,upper_lim,threshold)
df_lmid_ques = quality_check(df,diff,lower_lim,upper_lim,threshold,find='quest')
df_lmid_ref = quality_check(df,diff,lower_lim,upper_lim,threshold,find='ref')
#Between 1000 and 500
lower_lim = 500
upper_lim = 1000
threshold = 0.010
df_umid_good = quality_check(df,diff,lower_lim,upper_lim,threshold)
df_umid_ques = quality_check(df,diff,lower_lim,upper_lim,threshold,find='quest')
df_umid_ref = quality_check(df,diff,lower_lim,upper_lim,threshold,find='ref')
#Less than 500
lower_lim = df[p_col].min() - 1
upper_lim = 500
threshold = 0.020
df_shal_good = quality_check(df,diff,lower_lim,upper_lim,threshold)
df_shal_ques = quality_check(df,diff,lower_lim,upper_lim,threshold,find='quest')
df_shal_ref = quality_check(df,diff,lower_lim,upper_lim,threshold,find='ref')
#concat dataframes into two main dfs
df_good = pd.concat([df_deep_good,df_lmid_good,df_umid_good,df_shal_good])
df_ques = pd.concat([df_deep_ques,df_lmid_ques,df_umid_ques,df_shal_ques])
df_ref = pd.concat([df_deep_ref,df_lmid_ref,df_umid_ref,df_shal_ref])
if sensor == 1:
df_ques['Parameter'] = 'C1'
df_ques['Flag'] = 3
df_ref['Parameter'] = 'C'
df_ref['Flag'] = 3
elif sensor == 2:
df_ques['Parameter'] = 'C2'
df_ques['Flag'] = 3
df_ref['Flag'] = 3
if xRange != None:
x0 = int(xRange.split(":")[0])
x1 = int(xRange.split(":")[1])
df_good_cons = df_good[(df_good[calib_col] >= x0) & (df_good[calib_col] <= x1)]
else:
#Take full range of temperature values
# x0 = df_good[t_col].min()
# x1 = df_good[t_col].max()
df_good_cons = df_good#[(df_good[calib_col] >= x0) & (df_good[calib_col] <= x1)]
cf = np.polyfit(df_good_cons[calib_col], df_good_cons[diff], order)
sensor = '_c'+str(sensor)
coef = np.zeros(shape=7)
if order is 0:
coef[6] = cf[0]
elif (order is 1) and (calib_param == 'P'):
coef[1] = cf[0]
coef[6] = cf[1]
elif (order is 2) and (calib_param == 'P'):
coef[0] = cf[0]
coef[1] = cf[1]
coef[6] = cf[2]
elif (order is 1) and (calib_param == 'T'):
coef[3] = cf[0]
coef[6] = cf[1]
elif (order is 2) and (calib_param == 'T'):
coef[2] = cf[0]
coef[3] = cf[1]
coef[6] = cf[2]
elif (order is 1) and (calib_param == 'C'):
coef[5] = cf[0]
coef[6] = cf[1]
elif (order is 2) and (calib_param == 'C'):
coef[4] = cf[0]
coef[5] = cf[1]
coef[6] = cf[2]
return coef,df_ques,df_ref
def prepare_fit_data(df,ref_col):
good_data = df.copy()
good_data = good_data[np.isfinite(good_data[ref_col])]
return good_data
def prepare_conductivity_data(ssscc,df,refc,ssscc_col = 'SSSCC',index_col = 'btl_fire_num'):
btl_concat = pd.DataFrame()
for x in ssscc:
btl_data = df[df[ssscc_col] == x]
refc_data = refc[refc[ssscc_col] == x]
btl_data_clean = prepare_fit_data(btl_data,refc_data,'C')
btl_concat = pd.concat([btl_concat,btl_data_clean])
refc = refc[refc[index_col] != 0]
refc = refc.reset_index(drop=True)
btl_concat = btl_concat.reset_index(drop=True)
return btl_concat, refc
def prepare_all_fit_data(ssscc,df,ref_data,param):
data_concat = pd.DataFrame()
for x in ssscc:
btl_data = df[df['SSSCC']==x]
ref_data_stn= ref_data[ref_data['SSSCC']==x]
btl_data_good = prepare_fit_data(btl_data,ref_data_stn,param)
data_concat = pd.concat([data_concat,btl_data_good])
return data_concat
def get_pressure_offset(start_vals,end_vals):
"""
Finds unique values and calclates mean for pressure offset
Parameters
----------
start_vals :array_like
Array containing initial ondeck pressure values
end_vals :array_like
Array containing ending ondeck pressure values
Returns
-------
p_off :float
Average pressure offset
"""
p_start = pd.Series(np.unique(start_vals))
p_end = pd.Series(np.unique(end_vals))
p_start = p_start[p_start.notnull()]
p_end = p_end[p_end.notnull()]
p_off = p_start.mean() - p_end.mean()
# <NAME> THIS METHOD SHOULD BE USED TO KEEP START END PAIRS
# p_df = pd.DataFrame()
# p_df['p_start'] = p_start
# p_df['p_end'] = p_end
# p_df = p_df[p_df['p_end'].notnull()]
# p_df = p_df[p_df['p_start'].notnull()]
# p_off = p_df['p_start'].mean() - p_df['p_end'].mean()
##########################################################
p_off = np.around(p_off,decimals=4)
return p_off
def load_pressure_logs(file):
"""
Loads pressure offset file from logs.
Parameters
----------
file : string
Path to ondeck_pressure log
Returns
-------
df : DataFrame
Pandas DataFrame containing ondeck start and end pressure values
"""
df = pd.read_csv(file,names=['SSSCC','ondeck_start_p','ondeck_end_p'])
# Change vaules in each row by removing non-number parts
df['SSSCC'] = df['SSSCC'].str[-5:]
df['ondeck_start_p'] = df['ondeck_start_p'].str[16:]
df['ondeck_end_p'] = df['ondeck_end_p'].str[14:]
df.loc[df['ondeck_start_p'].str[-5:] == 'Water','ondeck_start_p'] = np.NaN
df['ondeck_start_p'] = df['ondeck_start_p'].astype(float)
df['ondeck_end_p'] = df['ondeck_end_p'].astype(float)
return df
def write_offset_file(df,p_off,write_file='data/logs/poffset_test.csv'):
"""
"""
df_out = pd.DataFrame()
df_out['SSSCC'] = df['SSSCC']
df_out['offset'] = p_off
df_out.to_csv(write_file,index=False)
return
def pressure_calibrate(file):
pressure_log = load_pressure_logs(file)
p_off = get_pressure_offset(pressure_log)
return p_off
def load_hy_file(path_to_hyfile):
df = pd.read_csv(path_to_hyfile, comment='#', skiprows=[0])
df = df[df['EXPOCODE'] != 'END_DATA']
return df
def load_all_ctd_files(ssscc,prefix,postfix,series,cols,reft_prefix='data/reft/',reft_postfix='_reft.csv',
refc_prefix='data/salt/',refc_postfix='_salts.csv',press_file='data/logs/ondeck_pressure.csv', cast_details = 'data/logs/cast_details.csv',
oxy_prefix='data/oxygen/', oxy_postfix='',index_col='btl_fire_num',t_col='CTDTMP1',
p_col='CTDPRS',ssscc_col='SSSCC'):
"""
LOAD ALL CTD FILES was changed (commented out)
Lines 1324-1328,1335,1337, 1338,345
"""
df_data_all = pd.DataFrame()
if series == 'bottle':
for x in ssscc:
print('Loading BTL data for station: ' + x + '...')
btl_file = prefix + x + postfix
btl_data = load_btl_data(btl_file,cols)
reft_file = reft_prefix + x + reft_postfix
try:
reft_data = load_reft_data(reft_file)
except FileNotFoundError:
print('Missing (or misnamed) REFT Data Station: ' + x + '...filling with NaNs')
reft_data = pd.DataFrame()
reft_data[index_col] = pd.Series(btl_data[index_col].values.astype(int))
reft_data['T90'] = pd.Series([np.nan]*len(btl_data))
ref_ssscc = ssscc_col + '_TEMP'
reft_data[ref_ssscc] = x
reft_data.index = btl_data.index
#refc_file = refc_prefix + x + refc_postfix
refc_file = refc_prefix + x + refc_postfix
try:
#refc_data = fit_ctd.salt_calc(refc_file,index_col,t_col,p_col,btl_data)
refc_data = load_salt_data(refc_file, index_name= 'SAMPNO')
except FileNotFoundError:
print('Missing (or misnamed) REFC Data Station: ' + x + '...filling with NaNs')
refc_data = pd.DataFrame()
refc_data['SAMPNO_SALT'] = pd.Series(btl_data[index_col].values.astype(int))
refc_data['CRavg'] = pd.Series([np.nan]*len(btl_data))
refc_data['BathTEMP'] = pd.Series([np.nan]*len(btl_data))
refc_data['BTLCOND'] = pd.Series([np.nan]*len(btl_data))
refc_data.index = btl_data.index
#Fix Index for each parameter to bottle number
# btl_data[index_col] = btl_data[index_col].astype(int)
# btl_data=btl_data.set_index(btl_data[index_col].values)
#
# reft_data = reft_data.set_index(reft_data[index_col].values)
oxy_file = oxy_prefix + x + oxy_postfix
try:
oxy_data,params = oxy_fitting.oxy_loader(oxy_file)
except FileNotFoundError:
print('Missing (or misnamed) REFO Data Station: ' + x + '...filling with NaNs')
oxy_data = pd.DataFrame()
oxy_data['BOTTLENO_OXY'] = pd.Series(btl_data[index_col].values.astype(int))
oxy_data['STNNO_OXY'] = pd.Series([np.nan]*len(btl_data))
oxy_data['CASTNO_OXY'] = pd.Series([np.nan]*len(btl_data))
oxy_data['FLASKNO'] = pd.Series([np.nan]*len(btl_data))
oxy_data['TITR_VOL'] = pd.Series([np.nan]*len(btl_data))
oxy_data['TITR_TEMP'] = pd.Series([np.nan]*len(btl_data))
oxy_data['DRAW_TEMP'] = pd.Series([np.nan]*len(btl_data))
oxy_data['TITER_TIME'] = pd.Series([np.nan]*len(btl_data))
oxy_data['END_VOLTS'] = pd.Series([np.nan]*len(btl_data))
oxy_data.index = btl_data.index
# #Horizontally concat DFs to have all data in one DF
# btl_data_full = pd.concat([btl_data,reft_data,refc_data,oxy_data],axis=1)
btl_data = pd.merge(btl_data,reft_data,on='btl_fire_num',how='outer')
btl_data = pd.merge(btl_data,refc_data,left_on='btl_fire_num',right_on='SAMPNO_SALT',how='outer')
btl_data = pd.merge(btl_data,oxy_data,left_on='btl_fire_num',right_on='BOTTLENO_OXY',how='outer')
if len(btl_data) > 36:
print("***** Len of btl data for station: ",x,' is > 36, check for multiple stations/casts in reference parameter files *****')
### Calculate dv/dt for oxygen fitting
btl_data['dv_dt'] = oxy_fitting.calculate_dVdT(btl_data['CTDOXYVOLTS'],btl_data['scan_datetime'])
#btl_data = get_btl_time(btl_data,'btl_fire_num','scan_datetime')
# Add bottom of cast information (date,time,lat,lon,etc.)
btl_data = add_btl_bottom_data(btl_data, x, cast_details)
# #Drop columns that have no CTD data
# btl_data_full = btl_data_full.dropna(subset=cols)
#btl_data = btl_data.set_index(['SSSCC','GPSLAT','GPSLON','CTDPRS'],drop=True)
try:
df_data_all = pd.concat([df_data_all,btl_data],sort=False)
except AssertionError:
raise AssertionError('Colums of ' + x + ' do not match those of previous columns')
print('* Finished BTL data station: ' + x + ' *')
#Drops duplicated columns generated by concatenation
df_data_all = df_data_all.loc[:,~df_data_all.columns.duplicated()]
#if 'GPSLAT' in df_data_all.keys():
# df_data_all['LATITUDE'] = df_data_all['GPSLAT']
#if 'GPSLON' in df_data_all.keys():
# df_data_all['LONGITUDE'] = df_data_all['GPSLON']
elif series == 'time':
for x in ssscc:
print('Loading TIME data for station: ' + x + '...')
file = prefix + x + postfix
time_data = load_time_data(file)
time_data['SSSCC'] = str(x)
time_data['dv_dt'] = oxy_fitting.calculate_dVdT(time_data['CTDOXYVOLTS'],time_data['scan_datetime'])
df_data_all = pd.concat([df_data_all,time_data], sort=False)
print('** Finished TIME data station: ' + x + ' **')
df_data_all['master_index'] = range(len(df_data_all))
return df_data_all
def merge_refcond_flags(btl_data, qual_flag_cond):
# Merge df
mask = qual_flag_cond[qual_flag_cond['Parameter'] == 'REF_COND'].copy()
mask['SSSCC'] = mask['SSSCC'].astype(str)
btl_data = btl_data.merge(mask,left_on=['SSSCC','btl_fire_num'], right_on=['SSSCC','Bottle'],how='left')
# Rename Columns
btl_data.rename(columns={'CTDPRS_x':'CTDPRS','SSSCC_x':'SSSCC','Flag':'SALNTY_FLAG_W'},inplace=True)
btl_data.drop(columns=['Parameter','CTDPRS_y','Bottle','Diff'],inplace=True)
btl_data['SALNTY_FLAG_W'].fillna(value=2,inplace=True)
try:
btl_data.loc[btl_data['BTLCOND'].isna(),'SALNTY_FLAG_W'] = 9
except:
btl_data[btl_data['SALNTY'].isna(),'SALNTY_FLAG_W'] = 9
btl_data['SALNTY_FLAG_W'] = btl_data['SALNTY_FLAG_W'].astype(int)
return btl_data
def merge_cond_flags(btl_data, qual_flag_cond,parameter):
# Merge df
#if sensor == 1:
# parameter = 'CTDCOND1'
#elif sensor == 2:
# parameter = 'CTDCOND2'
mask = qual_flag_cond[qual_flag_cond['Parameter'] == parameter].copy()
mask['SSSCC'] = mask['SSSCC'].astype(str)
btl_data = btl_data.merge(mask,left_on=['SSSCC','btl_fire_num'], right_on=['SSSCC','Bottle'],how='left')
# Rename Columns
btl_data.rename(columns={'CTDPRS_x':'CTDPRS','SSSCC_x':'SSSCC','Flag':'CTDSAL_FLAG_W'},inplace=True)
btl_data.drop(columns=['Parameter','CTDPRS_y','Bottle','Diff'],inplace=True)
btl_data['CTDSAL_FLAG_W'].fillna(value=2,inplace=True)
btl_data.loc[btl_data[parameter].isna(),'CTDSAL_FLAG_W'] = 9
btl_data['CTDSAL_FLAG_W'] = btl_data['CTDSAL_FLAG_W'].astype(int)
return btl_data
def merged_reftemp_flags(btl_data, qual_flag_temp):
mask = qual_flag_temp[qual_flag_temp['Parameter'] == 'REF_TEMP'].copy()
mask['SSSCC'] = mask['SSSCC'].astype(str)
btl_data = btl_data.merge(mask,left_on=['SSSCC','btl_fire_num'], right_on=['SSSCC','Bottle'],how='left')
# Rename Columns
btl_data.rename(columns={'CTDPRS_x':'CTDPRS','SSSCC_x':'SSSCC','Flag':'REFTMP_FLAG_W'},inplace=True)
btl_data.drop(columns=['Parameter','CTDPRS_y','Bottle','Diff'],inplace=True)
btl_data['REFTMP_FLAG_W'].fillna(value=2,inplace=True)
try:
btl_data.loc[btl_data['T90'].isna(),'REFTMP_FLAG_W'] = 9
except:
btl_data[btl_data['REFTMP'].isna()]['REFTMP_FLAG_W'] = 9
btl_data['REFTMP_FLAG_W'] = btl_data['REFTMP_FLAG_W'].astype(int)
return btl_data
def merge_temp_flags(btl_data, qual_flag_temp, parameter):
mask = qual_flag_temp[qual_flag_temp['Parameter'] == parameter].copy()
mask['SSSCC'] = mask['SSSCC'].astype(str)
btl_data = btl_data.merge(mask,left_on=['SSSCC','btl_fire_num'], right_on=['SSSCC','Bottle'],how='left')
# Rename Columns
btl_data.rename(columns={'CTDPRS_x':'CTDPRS','SSSCC_x':'SSSCC','Flag':'CTDTMP_FLAG_W'},inplace=True)
btl_data.drop(columns=['Parameter','CTDPRS_y','Bottle','Diff'],inplace=True)
btl_data['CTDTMP_FLAG_W'] = btl_data['CTDTMP_FLAG_W'].fillna(value=2)
btl_data['CTDTMP_FLAG_W'] = btl_data['CTDTMP_FLAG_W'].astype(int)
return btl_data
def merge_oxy_flags(btl_data):
mask = (btl_data['OXYGEN'].isna())
btl_data.loc[mask,'OXYGEN_FLAG_W'] = 9
def find_cast_depth(press,lat,alt,threshold=80):
# Create Dataframe containing args
df = pd.DataFrame()
df['CTDPRS'] = press
df['LAT'] = lat
df['ALT'] = alt
# Calculate DEPTH using gsw
df['DEPTH'] = np.abs(gsw.z_from_p(df['CTDPRS'],df['LAT']))
# Find max depth and see if ALT has locked in
bottom_alt = df.loc[df['CTDPRS'] == df['CTDPRS'].max(),'ALT']
if bottom_alt.values[0] <= threshold:
max_depth = bottom_alt + df['CTDPRS'].max()
max_depth = int(max_depth.values[0])
else:
print('Altimeter reading is not reporting values below the threshold ',threshold,' setting max depth to NaN')
max_depth = np.NaN
return max_depth
def format_time_data(df):
format_columns = settings.pressure_series_output['column_names'].copy()
if 'SSSCC' not in format_columns:
print('Adding SSSCC')
format_columns.append('SSSCC')
if 'DEPTH' not in format_columns:
print('Adding DEPTH')
format_columns.append('DEPTH')
try:
df = df[format_columns]
except:
print('missing required pressure series output columns!')
df_columns = list(df.keys())
missing_list = list(np.setdiff1d(format_columns,df_columns))
raise KeyError('missing columns: ',missing_list)
return df
def add_btlnbr_cols(df,btl_num_col):
df['BTLNBR'] = df[btl_num_col].astype(int)
# default to everything being good
df['BTLNBR_FLAG_W'] = 2
return df
def castno_from_ssscc(ssscc):
# ssscc: column (pandas series) containing station and cast numbers in the SSSCC format
ssscc = pd.Series(ssscc)
castno = ssscc.str[3:].astype(int)
return castno
def stnnbr_from_ssscc(ssscc):
# ssscc: column (pandas series) containing station and cast numbers in the SSSCC format
ssscc = | pd.Series(ssscc) | pandas.Series |
import numpy as np
import pandas as pd
import logging
DISTANCE_THRESHOLD = 1.4 #: max threshold for distnr
SCORE_THRESHOLD = 0.4 #: max threshold for sgscore
CHINR_THRESHOLD = 2 #: max threshold for chinr
SHARPNR_MAX = 0.1 #: max value for sharpnr
SHARPNR_MIN = -0.13 #: min value for sharpnr
ZERO_MAG = 100. #: default value for zero magnitude (a big value!)
TRIPLE_NAN = (np.nan, np.nan, np.nan)
MAGNITUDE_THRESHOLD = 13.2
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(name)s.%(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',)
def correction(magnr, magpsf, sigmagnr, sigmapsf, isdiffpos, oid=None):
"""
Correction function. Implement of correction formula.
:param magnr: Magnitude of nearest source in reference image PSF-catalog within 30 arcsec [mag]
:type magnr: float
:param magpsf: Magnitude from PSF-fit photometry [mag]
:type magpsf: float
:param sigmagnr: 1-sigma uncertainty in magnr within 30 arcsec [mag]
:type sigmagnr: float
:param sigmapsf: 1-sigma uncertainty in magpsf [mag]
:type sigmapsf: float
:param isdiffpos: 1 => candidate is from positive (sci minus ref) subtraction; 0 => candidate is from negative (ref minus sci) subtraction
:type isdiffpos: int
:return: Correction for magnitude, sigma and sigma_ext
:rtype: tuple
Example::
(m_corr, s_corr, s_corr_ext) = correction(a, b, c, d, e)
"""
if magnr < 0 or magpsf < 0:
return TRIPLE_NAN
try:
aux1 = 10**(-0.4 * magnr)
aux2 = 10**(-0.4 * magpsf)
aux3 = aux1 + isdiffpos * aux2
if aux3 > 0:
magpsf_corr = -2.5 * np.log10(aux3)
aux4 = aux2**2 * sigmapsf**2 - aux1**2 * sigmagnr**2
if aux4 >= 0:
sigmapsf_corr = np.sqrt(aux4) / aux3
else:
sigmapsf_corr = ZERO_MAG
sigmapsf_corr_ext = aux2 * sigmapsf / aux3
else:
magpsf_corr = ZERO_MAG
sigmapsf_corr = ZERO_MAG
sigmapsf_corr_ext = ZERO_MAG
return magpsf_corr, sigmapsf_corr, sigmapsf_corr_ext
except Exception as e:
logging.error('Object {}: {}'.format(oid, e))
return TRIPLE_NAN
def apply_correction(candidate):
"""
Correction function for a set of detections
:param candidate: A dataframe with detections of a candidate.
:type candidate: :py:class:`pd.DataFrame`
:return: Wrapper for correction for magnitude, sigma and sigma_ext
:rtype: tuple
Example::
(m_corr, s_corr, s_corr_ext) = correction(a, b, c, d, e)
"""
isdiffpos = 1 if (candidate["isdiffpos"] in ["t", "1"]) else -1
magnr = candidate["magnr"]
magpsf = candidate['magpsf']
sigmagnr = candidate['sigmagnr']
sigmapsf = candidate['sigmapsf']
magpsf_corr, sigmapsf_corr, sigmapsf_corr_ext = correction(magnr, magpsf, sigmagnr, sigmapsf, isdiffpos)
return magpsf_corr, sigmapsf_corr, sigmapsf_corr_ext
def near_stellar(first_distnr, first_distpsnr1, first_sgscore1, first_chinr, first_sharpnr):
"""
Get if object is near stellar
:param first_distnr: Distance to nearest source in reference image PSF-catalog within 30 arcsec [pixels]
:type first_distnr: :py:class:`float`
:param first_distpsnr1: Distance of closest source from PS1 catalog; if exists within 30 arcsec [arcsec]
:type first_distpsnr1: :py:class:`float`
:param first_sgscore1: Star/Galaxy score of closest source from PS1 catalog 0 <= sgscore <= 1 where closer to 1 implies higher likelihood of being a star
:type first_sgscore1: :py:class:`float`
:param first_chinr: DAOPhot chi parameter of nearest source in reference image PSF-catalog within 30 arcsec
:type first_chinr: :py:class:`float`
:param first_sharpnr: DAOPhot sharp parameter of nearest source in reference image PSF-catalog within 30 arcsec
:type first_sharpnr: :py:class:`float`
:return: if the object is near stellar
:rtype: tuple
"""
nearZTF = 0 <= first_distnr < DISTANCE_THRESHOLD
nearPS1 = 0 <= first_distpsnr1 < DISTANCE_THRESHOLD
stellarPS1 = first_sgscore1 > SCORE_THRESHOLD
stellarZTF = first_chinr < CHINR_THRESHOLD and SHARPNR_MIN < first_sharpnr < SHARPNR_MAX
return nearZTF, nearPS1, stellarPS1, stellarZTF
def is_stellar(nearZTF, nearPS1, stellarPS1, stellarZTF):
"""
Get if object is stellar
:param nearZTF:
:type nearZTF: bool
:param nearPS1:
:type nearPS1: bool
:param stellarPS1:
:type stellarPS1: bool
:param stellarZTF:
:type stellarZTF: bool
:return: if the object is stellar
:rtype: bool
"""
return (nearZTF & nearPS1 & stellarPS1) | (nearZTF & ~nearPS1 & stellarZTF)
def is_dubious(corrected, isdiffpos, corr_magstats):
"""Get if object is dubious
:param corrected:
:type corrected: bool
:param isdiffpos:
:type isdiffpos: bool
:param corr_magstats:
:type corr_magstats: bool
:return: if the object is dubious
:rtype: bool
"""
return (~corrected & (isdiffpos == -1)) | (corr_magstats & ~corrected) | (~corr_magstats & corrected)
def dmdt(magpsf_first, sigmapsf_first, nd_diffmaglim, mjd_first, nd_mjd):
"""
Calculate dm/dt
:param magpsf_first:
:type magpsf_first: float
:param sigmapsf_first:
:type sigmapsf_first: float
:param nd_diffmaglim:
:type nd_diffmaglim: float
:param mjd_first:
:type mjd_first: float
:param nd_mjd:
:type nd_mjd: float
:return: dm_sigma, dt, dmsigdt
:rtype: tuple
Example::
dm_sigma, dt, dmsigdt = dmdt(magpsf_first,
sigmapsf_first,
nd.diffmaglim,
mjd_first,
nd.mjd)
"""
dm_sigma = magpsf_first + sigmapsf_first - nd_diffmaglim
dt = mjd_first - nd_mjd
dmsigdt = (dm_sigma / dt)
frame = {
"dm_sigma": dm_sigma,
"dt": dt,
"dmsigdt": dmsigdt
}
df = | pd.DataFrame(frame) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
from sklearn.metrics import mean_squared_error
from math import sqrt
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
# 1. 抽取2012年8月至2013年12月的数据,总共14个月
# Index 11856 marks the end of year 2013
df = pd.read_csv("./jetrail_train.csv", nrows=11856)
print("===== df.head():\n", df.head())
print("===== df.tail():\n", df.tail())
# 2. 构建模型的训练数据和测试数据,前12个月(2012年8月到2013年10月)作为训练数据,后两个月的数据作为预测数据(2013-11 2013-12)
#Index 10392 marks the end of October 2013
train=df[0:10392] #train data : 2012-08 ~ 2013-10
test=df[10392:] #test data : 2013-11 2013-12
print("===== train data:", train)
print("===== test data:", test)
# 3. 聚合数据到天级别
# 抽取的14个月的数据按天进行聚合(取平均)
# D - day, 可选参数: 表示重采样频率,例如‘M’、‘5min’,Second(15)
# mean() : 聚合函数 - 取平均
# df.Datetime : 01-11-2013 01:00
df['Timestamp'] = pd.to_datetime(df.Datetime,format='%d-%m-%Y %H:%M')
print("===== df.Timestamp:\n", df['Timestamp'])
df.index = df.Timestamp
print("===== df.index:\n", df.index)
df = df.resample('D').mean()
print("===== df:", df)
#output: 2012-08-25 11.5 3.166667
# 训练数据按天进行聚合
train['Timestamp'] = | pd.to_datetime(train.Datetime,format='%d-%m-%Y %H:%M') | pandas.to_datetime |
from collections import OrderedDict
import pydoc
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Series,
TimedeltaIndex,
date_range,
period_range,
timedelta_range,
)
from pandas.core.arrays import PeriodArray
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
import pandas.io.formats.printing as printing
class TestSeriesMisc:
def test_scalarop_preserve_name(self, datetime_series):
result = datetime_series * 2
assert result.name == datetime_series.name
def test_copy_name(self, datetime_series):
result = datetime_series.copy()
assert result.name == datetime_series.name
def test_copy_index_name_checking(self, datetime_series):
# don't want to be able to modify the index stored elsewhere after
# making a copy
datetime_series.index.name = None
assert datetime_series.index.name is None
assert datetime_series is datetime_series
cp = datetime_series.copy()
cp.index.name = "foo"
printing.pprint_thing(datetime_series.index.name)
assert datetime_series.index.name is None
def test_append_preserve_name(self, datetime_series):
result = datetime_series[:5].append(datetime_series[5:])
assert result.name == datetime_series.name
def test_binop_maybe_preserve_name(self, datetime_series):
# names match, preserve
result = datetime_series * datetime_series
assert result.name == datetime_series.name
result = datetime_series.mul(datetime_series)
assert result.name == datetime_series.name
result = datetime_series * datetime_series[:-2]
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "something else"
result = datetime_series + cp
assert result.name is None
result = datetime_series.add(cp)
assert result.name is None
ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]
ops = ops + ["r" + op for op in ops]
for op in ops:
# names match, preserve
s = datetime_series.copy()
result = getattr(s, op)(s)
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "changed"
result = getattr(s, op)(cp)
assert result.name is None
def test_combine_first_name(self, datetime_series):
result = datetime_series.combine_first(datetime_series[:5])
assert result.name == datetime_series.name
def test_getitem_preserve_name(self, datetime_series):
result = datetime_series[datetime_series > 0]
assert result.name == datetime_series.name
result = datetime_series[[0, 2, 4]]
assert result.name == datetime_series.name
result = datetime_series[5:10]
assert result.name == datetime_series.name
def test_pickle_datetimes(self, datetime_series):
unp_ts = self._pickle_roundtrip(datetime_series)
tm.assert_series_equal(unp_ts, datetime_series)
def test_pickle_strings(self, string_series):
unp_series = self._pickle_roundtrip(string_series)
tm.assert_series_equal(unp_series, string_series)
def _pickle_roundtrip(self, obj):
with tm.ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self, datetime_series):
result = datetime_series.argsort()
assert result.name == datetime_series.name
def test_sort_index_name(self, datetime_series):
result = datetime_series.sort_index(ascending=False)
assert result.name == datetime_series.name
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d)
expected = Series(d, index=sorted(d.keys()))
tm.assert_series_equal(result, expected)
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
expected = Series(dict(data.items()))
tm.assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
# GH3283
data = OrderedDict(
("col{i}".format(i=i), np.random.random()) for i in range(12)
)
series = Series(data)
expected = Series(list(data.values()), list(data.keys()))
tm.assert_series_equal(series, expected)
# Test with subclass
class A(OrderedDict):
pass
series = Series(A(data))
tm.assert_series_equal(series, expected)
def test_constructor_dict_multiindex(self):
d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}
_d = sorted(d.items())
result = Series(d)
expected = Series(
[x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d])
)
tm.assert_series_equal(result, expected)
d["z"] = 111.0
_d.insert(0, ("z", d["z"]))
result = Series(d)
expected = Series(
[x[1] for x in _d], index=pd.Index([x[0] for x in _d], tupleize_cols=False)
)
result = result.reindex(index=expected.index)
tm.assert_series_equal(result, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = Series(
data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")
)
result = Series(
data={
pd.to_timedelta(0, unit="s"): "A",
pd.to_timedelta(10, unit="s"): "B",
pd.to_timedelta(20, unit="s"): "C",
},
index=pd.to_timedelta([0, 10, 20], unit="s"),
)
tm.assert_series_equal(result, expected)
def test_sparse_accessor_updates_on_inplace(self):
s = pd.Series([1, 1, 2, 3], dtype="Sparse[int]")
s.drop([0, 1], inplace=True)
assert s.sparse.density == 1.0
def test_tab_completion(self):
# GH 9910
s = Series(list("abcd"))
# Series of str values should have .str but not .dt/.cat in __dir__
assert "str" in dir(s)
assert "dt" not in dir(s)
assert "cat" not in dir(s)
# similarly for .dt
s = Series(date_range("1/1/2015", periods=5))
assert "dt" in dir(s)
assert "str" not in dir(s)
assert "cat" not in dir(s)
# Similarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str.
s = Series(list("abbcd"), dtype="category")
assert "cat" in dir(s)
assert "str" in dir(s) # as it is a string categorical
assert "dt" not in dir(s)
# similar to cat and str
s = Series(date_range("1/1/2015", periods=5)).astype("category")
assert "cat" in dir(s)
assert "str" not in dir(s)
assert "dt" in dir(s) # as it is a datetime categorical
def test_tab_completion_with_categorical(self):
# test the tab completion display
ok_for_cat = [
"categories",
"codes",
"ordered",
"set_categories",
"add_categories",
"remove_categories",
"rename_categories",
"reorder_categories",
"remove_unused_categories",
"as_ordered",
"as_unordered",
]
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith("_")]
return sorted(set(results))
s = Series(list("aabbcde")).astype("category")
results = get_dir(s)
tm.assert_almost_equal(results, sorted(set(ok_for_cat)))
@pytest.mark.parametrize(
"index",
[
tm.makeUnicodeIndex(10),
tm.makeStringIndex(10),
tm.makeCategoricalIndex(10),
Index(["foo", "bar", "baz"] * 2),
tm.makeDateIndex(10),
tm.makePeriodIndex(10),
tm.makeTimedeltaIndex(10),
tm.makeIntIndex(10),
tm.makeUIntIndex(10),
| tm.makeIntIndex(10) | pandas.util.testing.makeIntIndex |
""" This file originated from the online analysis project at:
https://github.com/OlafHaag/UCM-WebApp
"""
import itertools
import pandas as pd
import pingouin as pg
import numpy as np
from scipy.stats import wilcoxon
from sklearn.decomposition import PCA
from sklearn.covariance import EllipticEnvelope
def preprocess_data(users, blocks, trials):
""" Clean data.
:param users: Data from users table
:type users: pandas.DataFrame
:param blocks: Data from circletask_blocks table.
:type blocks: pandas.DataFrame
:param trials: Data from circletask_trials table.
:type trials: pandas.DataFrame
:returns: Joined and recoded DataFrame. Number of erroneous blocks. Number of sessions removed as a consequence.
Number of removed trials.
:rtype: tuple[pandas.DataFrame, int, int, int]
"""
blocks, n_errors, invalid_sessions = remove_erroneous_blocks(blocks)
# Merge to 1 table.
df = join_data(users, blocks, trials)
# Remove invalid trials.
cleaned, n_trials_removed = get_valid_trials(df)
return cleaned, n_errors, len(invalid_sessions), n_trials_removed
def remove_erroneous_blocks(blocks, delta_time=2.0, n_blocks=3):
""" Remove sessions with erroneous data due to a NeuroPsy Research App malfunction.
The error causes block data to be duplicated and the values for df1 & df2 multiplied again by 100.
The duplicated blocks are identified by comparing their time stamps to the previous block (less than 2 seconds
difference). If the error caused the session to end early, the whole session is removed.
NeuroPsyResearchApp issue #1.
:param pandas.DataFrame blocks: Data about blocks.
:param float delta_time: Threshold in seconds for which a consecutive block in a session is considered invalid
if it was completed within this period after the previous. Default is 2.0 seconds.
:param int n_blocks: Required number of blocks per session. If a session doesn't have this many blocks,
it gets removed.
:returns: Cleaned block data. Number of errors found. List of sessions that were removed as a consequence.
:rtype: tuple[pandas.DataFrame, int, list]
"""
# Identify duplicated blocks. Consecutive time stamps are usually less than 2 seconds apart.
mask = blocks.groupby(['session_uid'])['time'].diff() < delta_time
try:
n_errors = mask.value_counts()[True]
except KeyError:
n_errors = 0
blocks = blocks.loc[~mask, :]
# Now, after removal of erroneous data a session might not have all 3 blocks we expect. Exclude whole session.
invalid_sessions = blocks['session_uid'].value_counts() != n_blocks
invalid_sessions = invalid_sessions.loc[invalid_sessions].index.to_list()
blocks = blocks.loc[~blocks['session_uid'].isin(invalid_sessions), :]
return blocks, n_errors, invalid_sessions
def join_data(users, blocks, trials):
""" Take data from different database tables and join them to a single DataFrame. Some variables are renamed and
recoded in the process, some are dropped.
:param users: Data from users table
:type users: pandas.DataFrame
:param blocks: Data from circletask_blocks table.
:type blocks: pandas.DataFrame
:param trials: Data from circletask_trials table.
:type trials: pandas.DataFrame
:return: Joined and recoded DataFrame.
:rtype: pandas.DataFrame
"""
# Use users' index instead of id for obfuscation and shorter display.
users_inv_map = pd.Series(users.index, index=users.id)
# Remove trials that don't belong to any block. Those have been excluded.
trials = trials.loc[trials['block_id'].isin(blocks.index), :]
# Start a new table for trials and augment with data from other tables.
df = pd.DataFrame(index=trials.index)
df['user'] = trials.user_id.map(users_inv_map).astype('category')
df['session'] = trials['block_id'].map(blocks['nth_session']).astype('category')
# Map whole sessions to the constraint in the treatment block as a condition for easier grouping during analysis.
df['condition'] = trials['block_id'].map(blocks[['session_uid', 'treatment']].replace(
{'treatment': {'': np.nan}}).groupby('session_uid')['treatment'].ffill().bfill()).astype('category')
df['block'] = trials['block_id'].map(blocks['nth_block']).astype('category')
# Add pre and post labels to trials for each block. Name it task instead of treatment.
# Theoretically, one could have changed number of blocks and order of treatment, but we assume default order here.
df['task'] = trials['block_id'].map(blocks['treatment'].replace('', np.nan).where(~blocks['treatment'].isna(),
blocks['nth_block'].map(
{1: 'pre',
3: 'post'
})
)
).astype('category')
#df['task'] = trials['block_id'].map(blocks['treatment'].replace(to_replace={r'\w+': 1, r'^\s*$': 0}, regex=True)
# ).astype('category')
df = pd.concat((df, trials), axis='columns')
# Add columns for easier filtering.
df['grab_diff'] = (df['df2_grab'] - df['df1_grab']).abs()
df['duration_diff'] = (df['df2_duration'] - df['df1_duration']).abs()
# Exclude columns.
df.drop(columns=['user_id'], inplace=True)
return df
def get_valid_trials(dataframe):
""" Remove trials where sliders where not grabbed concurrently or grabbed at all.
:param dataframe: Trial data.
:type dataframe: pandas.DataFrame
:returns: Filtered trials. Number of removed trials.
:rtype: tuple[pandas.DataFrame, int]
"""
# Remove trials with missing values. This means at least one slider wasn't grabbed.
df = dataframe.dropna(axis='index', how='any')
# Remove trials where sliders where not grabbed concurrently.
mask = ~((df['df1_release'] <= df['df2_grab']) | (df['df2_release'] <= df['df1_grab']))
df = df.loc[mask, :]
n_removed = len(dataframe) - len(df)
return df, n_removed
def get_outlyingness(data, contamination=0.1):
""" Outlier detection from covariance estimation in a Gaussian distributed dataset.
:param data: Data in which to detect outliers. Take care that n_samples > n_features ** 2 .
:type data: pandas.DataFrame
:param contamination: The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
Range is (0, 0.5).
:type contamination: float
:returns: Decision on each row if it's an outlier. And contour array for drawing ellipse in graph.
:rtype: tuple[numpy.ndarray, numpy.ndarray]
"""
robust_cov = EllipticEnvelope(support_fraction=1., contamination=contamination)
outlyingness = robust_cov.fit_predict(data)
decision = (outlyingness-1).astype(bool)
# Visualisation.
xx, yy = np.meshgrid(np.linspace(0, 100, 101),
np.linspace(0, 100, 101))
z = robust_cov.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
return decision, z
#ToDo: remove blocks/sessions with sum mean way off.
#ToDo: remove sessions with less than 10 trials in any block.
def get_performance_data(dataframe):
"""[summary]
:param dataframe: [description]
:type dataframe: [type]
"""
dataframe.groupby(['user', 'block', 'task'])[['df1', 'df2']].mean().dropna().sort_index(level=['user','block'])
def get_pca_data(dataframe):
""" Conduct Principal Component Analysis on 2D dataset.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:return: Explained variance, components and means.
:rtype: pandas.DataFrame
"""
# We don't reduce dimensionality, but overlay the 2 principal components in 2D.
pca = PCA(n_components=2)
x = dataframe[['df1', 'df2']].values
try:
# df1 and df2 have the same scale. No need to standardize. Standardizing might actually distort PCA here.
pca.fit(x)
except ValueError:
# Return empty.
df = pd.DataFrame(columns=['var_expl', 'var_expl_ratio', 'x', 'y', 'meanx', 'meany'])
else:
df = pd.DataFrame({'var_expl': pca.explained_variance_.T,
'var_expl_ratio': pca.explained_variance_ratio_.T * 100, # In percent
'x': pca.components_[:, 0],
'y': pca.components_[:, 1],
'meanx': pca.mean_[0],
'meany': pca.mean_[1],
},
index=[1, 2] # For designating principal components.
)
df.index.rename('PC', inplace=True)
return df
def get_pca_vectors(dataframe):
""" Get principal components as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Tabular PCA data.
:type dataframe: pandas.DataFrame
:return: Principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
# Use the "components" to define the direction of the vectors,
# and the "explained variance" to define the squared-length of the vectors.
directions = dataframe[['x', 'y']] * np.sqrt(dataframe[['var_expl']].values) * 3
# Move the directions by the mean, so we get vectors pointing to the start and vectors pointing to the destination.
vector2 = directions + dataframe[['meanx', 'meany']].values
vectors = list(zip(dataframe[['meanx', 'meany']].values, vector2.values))
return vectors
def get_pca_vectors_by(dataframe, by=None):
""" Get principal components for each group as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:param by: Column to group data by and return 2 vectors for each group.
:type by: str|list
:return: list of principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
vector_pairs = list()
if by is None:
pca_df = get_pca_data(dataframe)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
else:
grouped = dataframe.groupby(by, observed=True) # With categorical groupers we want only non-empty groups.
for group, data in grouped:
pca_df = get_pca_data(data)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
# ToDo: Augment by groupby criteria.
return vector_pairs
def get_interior_angle(vec0, vec1):
""" Get the smaller angle between vec0 and vec1 in degrees.
:param vec0: Vector 0
:type vec0: numpy.ndarray
:param vec1: Vector 1
:type vec1: numpy.ndarray
:return: Interior angle between vector0 and vector1 in degrees.
:rtype: float
"""
angle = np.math.atan2(np.linalg.det([vec0, vec1]), np.dot(vec0, vec1))
degrees = abs(np.degrees(angle))
# Min and max should be between 0° an 90°.
degrees = min(degrees, 180.0 - degrees)
return degrees
def get_ucm_vec(p0=None, p1=None):
""" Returns 2D unit vector in direction of uncontrolled manifold. """
if p0 is None:
p0 = np.array([25, 100])
if p1 is None:
p1 = np.array([100, 25])
parallel = p1 - p0
parallel = parallel / np.linalg.norm(parallel) # Normalize.
return parallel
def get_orthogonal_vec2d(vec):
""" Get a vector that is orthogonal to vec and has same length.
:param vec: 2D Vector
:return: 2D Vector orthogonal to vec.
:rtype: numpy.ndarray
"""
ortho = np.array([-vec[1], vec[0]])
return ortho
def get_pc_ucm_angles(dataframe, vec_ucm):
""" Computes the interior angles between pca vectors and ucm parallel/orthogonal vectors.
:param dataframe: PCA data.
:type dataframe: pandas.DataFrame
:param vec_ucm: Vector parallel to UCM.
:type vec_ucm: numpy.ndarray
:return: Each angle between principal components and UCM parallel and orthogonal vector.
:rtype: pandas.DataFrame
"""
df_angles = dataframe[['x', 'y']].transform(lambda x: (a:=get_interior_angle(vec_ucm, x), 90.0 - a),
axis='columns').rename(columns={'x': 'parallel', 'y': 'orthogonal'})
df_angles = pd.concat((dataframe[['task', 'PC']], df_angles), axis='columns')
return df_angles
def get_projections(points, vec_ucm):
""" Returns coefficients a and b in x = a*vec_ucm + b*vec_ortho with x being the difference of a data point and
the mean.
Projection is computed using a transformation matrix with ucm parallel and orthogonal vectors as basis.
:param points: Data of 2D points.
:type points: pandas.Dataframe
:param vec_ucm: Unit vector parallel to uncontrolled manifold.
:type vec_ucm: numpy.ndarray
:return: Array with projected lengths onto vector parallel to UCM as 'a', onto vector orthogonal to UCM as 'b'.
:rtype: pandas.Dataframe
"""
# Get the vector orthogonal to the UCM.
vec_ortho = get_orthogonal_vec2d(vec_ucm)
# Build a transformation matrix with vec_ucm and vec_ortho as new basis vectors.
A = np.vstack((vec_ucm, vec_ortho)).T # A is not an orthogonal projection matrix (A=A.T), but this works.
# Centralize the data. Analogous to calculating across trials deviation from average for each time step.
diffs = points - points.mean()
# For computational efficiency we shortcut the projection calculation with matrix multiplication.
# The actual math behind it:
# coeffs = vec_ucm.T@diff/np.sqrt(vec_ucm.T@vec_ucm), vec_ortho.T@diff/np.sqrt(vec_ortho.T@vec_ortho)
# Biased variance (normalized by (n-1)) of projection onto UCM vector:
# var_ucm = [email protected](diffs, bias=True, rowvar=False)@vec_ucm/(vec_ucm.T@vec_ucm) # Rayleigh fraction.
coeffs = diffs@A
coeffs.columns = ['parallel', 'orthogonal']
return coeffs
def get_synergy_indices(variances, n=2, d=1):
"""
n: Number of degrees of freedom. In our case 2.
d: Dimensionality of performance variable. In our case a scalar (1).
Vucm = 1/N * 1/(n - d) * sum(ProjUCM**2)
Vort = 1/N * 1/(d) * sum(ProjORT**2)
Vtotal = 1/n * (d * Vort + (n-d) * Vucm) # Anull the weights on Vucm and Vort for the sum.
dV = (Vucm - Vort) / Vtotal
dV = n*(Vucm - Vort) / ((n - d)*Vucm + d*Vort)
Zhang (2008) without weighting Vucm, Vort and Vtotal first:
dV = n * (Vucm/(n - d) - Vort/d) / (Vucm + Vort)
dVz = 0.5*ln((n / d + dV) / (n / ((n - d) - dV))
dVz = 0.5*ln((2 + dV) / (2 - dV))
Reference: https://www.frontiersin.org/articles/10.3389/fnagi.2019.00032/full#supplementary-material
:param variances: Unweighted variances of parallel and orthogonal projections to the UCM.
:type variances: pandas.DataFrame
:param n: Number of degrees of freedom. Defaults to 2.
:type: int
:param d: Dimensionality of performance variable. Defaults to 1.
:type d: int
:returns: Synergy index, Fisher's z-transformed synergy index.
:rtype: pandas.DataFrame
"""
try:
dV = n * (variances['parallel']/(n-d) - variances['orthogonal']/d) \
/ variances[['parallel', 'orthogonal']].sum(axis='columns')
except KeyError:
synergy_indices = pd.DataFrame(columns=["dV", "dVz"])
else:
dVz = 0.5 * np.log((n/d + dV)/(n/(n-d) - dV))
synergy_indices = pd.DataFrame({"dV": dV, "dVz": dVz})
return synergy_indices
def get_synergy_idx_bounds(n=2, d=1):
""" Get lower and upper bounds of the synergy index.
dV = n * (Vucm/(n - d) - Vort/d) / (Vucm + Vort)
If all variance lies within the UCM, then Vort=0 and it follows for the upper bound: dV = n/(n-d)
If all variance lies within Vort, then Vucm=0 and it follows for the lower bound: dV = -n/d
:param n: Number of degrees of freedom.
:type: int
:param d: Dimensionality of performance variable.
:type d: int
:returns: Lower and upper bounds of synergy index.
:rtype: tuple
"""
dV_lower = -n/d
dV_upper = n/(n-d)
return dV_lower, dV_upper
def get_mean(dataframe, column, by=None):
""" Return mean values of column x (optionally grouped)
:param dataframe: Data
:type dataframe: pandas.Dataframe
:param column: Column name
:type column: str
:param by: Column names by which to group.
:type by: str|list
:return: mean value, optionally for each group.
:rtype: numpy.float64|pandas.Series
"""
if by is None:
means = dataframe[column].mean()
else:
means = dataframe.groupby(by, observed=True)[column].mean()
return means
def get_descriptive_stats(data, by=None):
""" Return mean and variance statistics for data.
:param data: numerical data.
:type data: pandas.Dataframe
:param by: groupby column name(s)
:type by: str|List
:return: Dataframe with columns mean, var, count and column names of data as rows.
:rtype: pandas.Dataframe
"""
# There's a bug in pandas 1.0.4 where you can't use custom numpy functions in agg anymore (ValueError).
# Note that the variance of projections is usually divided by (n-d) for Vucm and d for Vort. Both are 1 in our case.
# Pandas default var returns unbiased population variance /(n-1). Doesn't make a difference for synergy indices.
f_var = lambda series: series.var(ddof=0)
f_var.__name__ = 'variance' # Column name gets function name.
# When there're no data, return empty DataFrame with columns.
if data.empty:
if by:
data.set_index(by, drop=True, inplace=True)
col_idx = pd.MultiIndex.from_product([data.columns, ['mean', f_var.__name__]])
stats = pd.DataFrame(None, index=data.index, columns=col_idx)
stats['count'] = None
return stats
if not by:
stats = data.agg(['mean', f_var, 'count']).T
stats['count'] = stats['count'].astype(int)
else:
grouped = data.groupby(by, observed=True)
stats = grouped.agg(['mean', f_var])
stats['count'] = grouped.size()
stats.dropna(inplace=True)
return stats
def get_statistics(dataframe):
""" Calculate descriptive statistics including synergy indices for key values of the anaylsis.
:param dataframe: Data from joined table on trials with projections.
:type dataframe: pandas.DataFrame
:return: Descriptive statistics and synergy indices.
:rtype: pandas.DataFrame
"""
groupers = ['user', 'session', 'condition', 'block_id', 'block', 'task']
try:
dataframe[groupers] = dataframe[groupers].astype('category')
except (KeyError, ValueError):
df_stats = get_descriptive_stats(pd.DataFrame(columns=dataframe.columns))
cov = pd.DataFrame(columns=[('df1,df2 covariance', '')])
else:
df_stats = get_descriptive_stats(dataframe[groupers + ['df1', 'df2', 'sum', 'parallel', 'orthogonal']],
by=groupers).drop(columns=[('parallel', 'mean'), # Always equal 0.
('orthogonal', 'mean')])
# Get statistic characteristics of absolute lengths of projections.
length = dataframe.groupby(groupers, observed=True)[['parallel', 'orthogonal']].agg(lambda x: x.abs().mean())
length.columns = pd.MultiIndex.from_product([length.columns, ['absolute average']])
# Get covariance between degrees of freedom.
cov = dataframe.groupby(groupers, observed=True)[['df1', 'df2']].apply(lambda x: np.cov(x.T, ddof=0)[0, 1])
try:
cov = cov.to_frame(('df1,df2 covariance', '')) # MultiIndex.
except AttributeError: # In case cov is an empty Dataframe.
cov = pd.DataFrame(columns=pd.MultiIndex.from_tuples([('df1,df2 covariance', '')]))
# Get synergy indices based on projection variances we just calculated.
df_synergies = get_synergy_indices(df_stats[['parallel', 'orthogonal']].xs('variance', level=1, axis='columns'))
# Before we merge dataframes, give this one a Multiindex, too.
df_synergies.columns = pd.MultiIndex.from_product([df_synergies.columns, ['']])
# Join the 3 statistics to be displayed in a single table.
df = | pd.concat((df_stats, cov, length, df_synergies), axis='columns') | pandas.concat |
from datetime import datetime, timedelta
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lrange, range, zip
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.resample import _get_period_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture()
def _index_factory():
return period_range
@pytest.fixture
def _series_name():
return 'pi'
class TestPeriodIndex(object):
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + obj.index.freq).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self, series):
# test for fill value during resampling, issue 3715
s = series
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
@pytest.mark.parametrize('kwargs', [dict(on='date'), dict(level='d')])
def test_selection(self, index, freq, kind, kwargs):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
msg = ("Resampling from level= or on= selection with a PeriodIndex is"
r" not currently supported, use \.set_index\(\.\.\.\) to"
" explicitly set index")
with pytest.raises(NotImplementedError, match=msg):
df.resample(freq, kind=kind, **kwargs)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('meth', ['ffill', 'bfill'])
@pytest.mark.parametrize('conv', ['start', 'end'])
@pytest.mark.parametrize('targ', ['D', 'B', 'M'])
def test_annual_upsample_cases(self, targ, conv, meth, month,
simple_period_range_series):
ts = simple_period_range_series(
'1/1/1990', '12/31/1991', freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
@pytest.mark.parametrize('rule,expected_error_msg', [
('a-dec', '<YearEnd: month=12>'),
('q-mar', '<QuarterEnd: startingMonth=3>'),
('M', '<MonthEnd>'),
('w-thu', '<Week: weekday=3>')
])
def test_not_subperiod(
self, simple_period_range_series, rule, expected_error_msg):
# These are incompatible period rules for resampling
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='w-wed')
msg = ("Frequency <Week: weekday=2> cannot be resampled to {}, as they"
" are not sub or super periods").format(expected_error_msg)
with pytest.raises(IncompatibleFrequency, match=msg):
ts.resample(rule).mean()
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('target', ['D', 'B', 'M'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_quarterly_upsample(self, month, target, convention,
simple_period_range_series):
freq = 'Q-{month}'.format(month=month)
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_monthly_upsample(self, target, convention,
simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='M')
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
@pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]),
('2M', [31 + 29, 31 + 9])])
def test_resample_count(self, freq, expected_vals):
# GH12774
series = Series(1, index=pd.period_range(start='2000', periods=100))
result = series.resample(freq).count()
expected_index = pd.period_range(start='2000', freq=freq,
periods=len(expected_vals))
expected = Series(expected_vals, index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self, resample_method):
# GH12770
series = Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
result = getattr(series.resample('M'), resample_method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
msg = ("Frequency <MonthEnd> cannot be resampled to <Week: weekday=6>,"
" as they are not sub or super periods")
with pytest.raises(IncompatibleFrequency, match=msg):
Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# see gh-5430
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') -
offsets.Day())
expected = | Series(1, index=expected_index) | pandas.Series |
###########################################################################
# Librairies
import pandas as pd
import os
import unicodedata
###########################################################################
# Fonctions
def remove_accents(input_str):
nfkd_form = unicodedata.normalize('NFKD', input_str)
only_ascii = nfkd_form.encode('ASCII', 'ignore')
only_ascii= only_ascii.decode('utf-8')
return only_ascii
###########################################################################
# Afficher tous les fichiers du repertoire Mauritania FSMS data
# dans df_run
from os import walk
monRepertoire=r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch"
df_run = pd.DataFrame(columns=['Repertoire','Fichier','Total','Extension'], dtype = str)
i=0
for (repertoire, sousRepertoires, fichiers) in walk(monRepertoire):
for name in fichiers:
df_run.loc[i,"Repertoire"]=str(repertoire)
df_run.loc[i,"Fichier"]=str(name)
df_run.loc[i,"Total"]=os.path.join(str(repertoire),name)
filename, file_extension = os.path.splitext(os.path.join(str(repertoire),str(sousRepertoires),name))
df_run.loc[i,"Extension"]=file_extension
i=i+1
# nettoyage
del monRepertoire , fichiers , i , filename , file_extension , name , repertoire , sousRepertoires
############################################################################
# Exploration des fichiers du repertoire Mauritania FSMS data
# Affichage de tous les types de fichiers
df_run.Extension.value_counts()
# division des fichiers par extension
df_run_sav=df_run.loc[df_run.Extension==".sav"]
# df_run_sps=df_run.loc[df_run.Extension==".sps"]
# df_run_xls=df_run.loc[df_run.Extension==".xls"]
# df_run_xlsx=df_run.loc[df_run.Extension==".xlsx"]
# df_run_xml=df_run.loc[df_run.Extension==".xml"]
# df_run_csv=df_run.loc[df_run.Extension==".csv"]
############################################################################
# Lecture des fichiers sav
import pyreadstat
#df1, meta1 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2011\Decembre11\Données_FSMS_nov11_26_12.sav")
#df2, meta2 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2011\Janvier11\Données\FSMS_2011_4-2-11_URBAN.sav")
#df3, meta3 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2011\Janvier11\Données\FSMS_2011_RURAL_FINAL.sav")
df4, meta4 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\Mauritania FSMS data\2011\Juin11")
#df5, meta5 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2012\Analyse croise SA_NUT\RIM_FSMS_SMART_juil2012.sav")
#df6, meta6 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2012\Decembre\Donnes_FSMSdec12_HH_commun.sav")
df7, meta7 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2012\Juin\Données_FSMS_juil_12.sav")
#df8, meta8 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2013\Decembre\Données FSMS 13Dec_20_01_14.sav")
df9, meta9 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2013\Juin\FSMS_HH_juil13b_1.sav")
#df10, meta10 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2014\Decembre\Données_FSMS_24_06_15.sav")
df11, meta11 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2014\Juin\Données_FSMS_juin_2014.sav")
#df12, meta12 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2015\Decembre\Données FSMS Jan16_18_02.sav")
df13, meta13 = pyreadstat.read_sav(r"C:\Users\simon\Documents\Simon\Data4Good\9_GeoWatch\2015\Juin\Données_FSMS_juin_15.sav")
# Ajout de la date
df4["FICHIER"]="June-11"
df7["FICHIER"]="June-12"
df9["FICHIER"]="June-13"
df11["FICHIER"]="June-14"
df13["FICHIER"]="June-15"
# Ajout num questionnaire
df4["QUEST"]="Q4"
df7["QUEST"]="Q7"
df9["QUEST"]="Q9"
df11["QUEST"]="Q11"
df13["QUEST"]="Q13"
############################################################################
# Modification des noms des colonnes des df:
# Majuscules sans accent de toutes les tables df
# Noms de colonnes dans listes
Lst_NomCol_df4=[c for c in df4]
Lst_NomCol_df7=[c for c in df7]
Lst_NomCol_df9=[c for c in df9]
Lst_NomCol_df11=[c for c in df11]
Lst_NomCol_df13=[c for c in df13]
# Suppression accents + mise en majuscule
Lst_NomCol_df4=[remove_accents(string) for string in Lst_NomCol_df4]
Lst_NomCol_df4=[string.upper() for string in Lst_NomCol_df4]
Lst_NomCol_df7=[remove_accents(string) for string in Lst_NomCol_df7]
Lst_NomCol_df7=[string.upper() for string in Lst_NomCol_df7]
Lst_NomCol_df9=[remove_accents(string) for string in Lst_NomCol_df9]
Lst_NomCol_df9=[string.upper() for string in Lst_NomCol_df9]
Lst_NomCol_df11=[remove_accents(string) for string in Lst_NomCol_df11]
Lst_NomCol_df11=[string.upper() for string in Lst_NomCol_df11]
Lst_NomCol_df13=[remove_accents(string) for string in Lst_NomCol_df13]
Lst_NomCol_df13=[string.upper() for string in Lst_NomCol_df13]
# Remplacement des noms de colonne
df4.columns = [Lst_NomCol_df4]
df7.columns = [Lst_NomCol_df7]
df9.columns = [Lst_NomCol_df9]
df11.columns = [Lst_NomCol_df11]
df13.columns = [Lst_NomCol_df13]
df4.columns = ['/'.join(x) for x in df4.columns.values]
df7.columns = ['/'.join(x) for x in df7.columns.values]
df9.columns = ['/'.join(x) for x in df9.columns.values]
df11.columns = ['/'.join(x) for x in df11.columns.values]
df13.columns = ['/'.join(x) for x in df13.columns.values]
# nettoyage
del Lst_NomCol_df4,Lst_NomCol_df7,Lst_NomCol_df9,Lst_NomCol_df11,Lst_NomCol_df13
############################################################################
# Analyse des colonnes des df
# Noms de colonnes dans listes
Lst_NomCol_df4=[c for c in df4]
Lst_NomCol_df7=[c for c in df7]
Lst_NomCol_df9=[c for c in df9]
Lst_NomCol_df11=[c for c in df11]
Lst_NomCol_df13=[c for c in df13]
# Noms de colonnes dans une dataframe
Df_Lst_NomCol_df4=pd.DataFrame(Lst_NomCol_df4)
Df_Lst_NomCol_df4["QUEST"]="df4"
Df_Lst_NomCol_df7=pd.DataFrame(Lst_NomCol_df7)
Df_Lst_NomCol_df7["QUEST"]="df7"
Df_Lst_NomCol_df9=pd.DataFrame(Lst_NomCol_df9)
Df_Lst_NomCol_df9["QUEST"]="df9"
Df_Lst_NomCol_df11=pd.DataFrame(Lst_NomCol_df11)
Df_Lst_NomCol_df11["QUEST"]="df11"
Df_Lst_NomCol_df13=pd.DataFrame(Lst_NomCol_df13)
Df_Lst_NomCol_df13["QUEST"]="df13"
# Rassemblement des noms de colonne
List_col_df = pd.concat([Df_Lst_NomCol_df4,Df_Lst_NomCol_df7,Df_Lst_NomCol_df9,Df_Lst_NomCol_df11,Df_Lst_NomCol_df13])
# Tableau avec noms de colonnes et présence dans les differents questionnaires
Tab_col=pd.crosstab([List_col_df[0]], List_col_df["QUEST"], margins=True)
# nettoyage
del Df_Lst_NomCol_df4,Df_Lst_NomCol_df7,Df_Lst_NomCol_df9,Df_Lst_NomCol_df11,Df_Lst_NomCol_df13,Lst_NomCol_df4,Lst_NomCol_df7,Lst_NomCol_df9,Lst_NomCol_df11,Lst_NomCol_df13
############################################################################
# Reconstitution des questionnaires à partir des meta données
# Objectif : Comprendre les colonnes à partir des questionnaires
# Retait des accents et minuscules dans les noms de colonne
# Commentaire : on pourrait mettre le code qui suit dans une boucle mais je ne
# sais pas comment créer des noms de table avec l'indice utilisé dans la boucle
# Q4
col_names=meta4.column_names
col_names=[remove_accents(string) for string in col_names]
col_names=[string.upper() for string in col_names]
col_lab=meta4.column_labels
df_col_names=pd.DataFrame(col_names,columns=["col_names"])
df_col_lab=pd.DataFrame(col_lab,columns=["col_lab4"])
col=pd.concat([df_col_names,df_col_lab],axis=1)
var_lab=meta4.variable_value_labels
df_var_lab=pd.DataFrame(list(var_lab.items()),columns=["col_names","var_lab4"])
quest4=col.merge(df_var_lab,how='left',on='col_names')
quest4["ID4"]="Q4"
# Q7
col_names=meta7.column_names
col_names=[remove_accents(string) for string in col_names]
col_names=[string.upper() for string in col_names]
col_lab=meta7.column_labels
df_col_names=pd.DataFrame(col_names,columns=["col_names"])
df_col_lab= | pd.DataFrame(col_lab,columns=["col_lab7"]) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3,4,7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEquals(self.ts.ix[d1], self.ts[d1])
self.assertEquals(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][::-1]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assert_((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assert_((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s[::-1]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_ix_setitem(self):
inds = self.series.index[[3,4,7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3,4,7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEquals(self.series[d1], 4)
self.assertEquals(self.series[d2], 6)
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.order()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# tuple name, e.g. from hierarchical index
self.series.name = ('foo', 'bar', 'baz')
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
def test_to_string(self):
from cStringIO import StringIO
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
self.assert_(retval is None)
self.assertEqual(buf.getvalue().strip(), s)
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')]
expected = [format(x) for x in self.ts]
self.assertEqual(result, expected)
# empty string
result = self.ts[:0].to_string()
self.assertEqual(result, '')
result = self.ts[:0].to_string(length=0)
self.assertEqual(result, '')
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True)
last_line = result.split('\n')[-1].strip()
self.assertEqual(last_line, "Name: foo, Length: %d" % len(cp))
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 -1.23\n'
'3 4.56')
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 5\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = ('0 NaN\n'
'1 1.568\n'
'2 NaN\n'
'3 -3.000\n'
'4 NaN')
self.assertEqual(result, expected)
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assert_(getkeys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert_almost_equal(s.sum(), s2.sum())
import pandas.core.nanops as nanops
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
res = nanops.nansum(arr, axis=1)
expected = nanops._nansum(arr, axis=1)
assert_almost_equal(res, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = TimeSeries(np.ones(10, dtype=int), index=range(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_var(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_skew(self):
from scipy.stats import skew
alt =lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assert_(issubclass(argsorted.dtype.type, np.integer))
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def _check_stat_op(self, name, alternate, check_objects=False):
from pandas import DateRange
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assert_(notnull(f(self.series)))
self.assert_(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona))
allna = self.series * nan
self.assert_(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# check DateRange
if check_objects:
s = Series(DateRange('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_(np.array_equal(func(self.ts), func(np.array(self.ts))))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_(np.array_equal(result, expected))
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index)
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.] , index=range(3))
result = s.prod()
self.assert_(not isinstance(result, Series))
def test_quantile(self):
from scipy.stats import scoreatpercentile
q = self.ts.quantile(0.1)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 90))
def test_describe(self):
_ = self.series.describe()
_ = self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count' : 7, 'unique' : 4,
'top' : 'a', 'freq' : 3}, index=result.index)
assert_series_equal(result, expected)
def test_append(self):
appendedSeries = self.series.append(self.ts)
for idx, value in appendedSeries.iteritems():
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.ts.index:
self.assertEqual(value, self.ts[idx])
else:
self.fail("orphaned index!")
self.assertRaises(Exception, self.ts.append, self.ts)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
np.random.seed(12345)
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assert_(not bool_series.all())
self.assert_(bool_series.any())
def test_operators(self):
series = self.ts
other = self.ts[::2]
def _check_op(other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv',
'gt', 'ge', 'lt', 'le']
for opname in simple_ops:
_check_op(other, getattr(operator, opname))
_check_op(other, operator.pow, pos_only=True)
_check_op(other, lambda x, y: operator.add(y, x))
_check_op(other, lambda x, y: operator.sub(y, x))
_check_op(other, lambda x, y: operator.truediv(y, x))
_check_op(other, lambda x, y: operator.floordiv(y, x))
_check_op(other, lambda x, y: operator.mul(y, x))
_check_op(other, lambda x, y: operator.pow(y, x),
pos_only=True)
check(self.ts * 2)
check(self.ts * 0)
check(self.ts[::2])
check(5)
def check_comparators(other):
_check_op(other, operator.gt)
_check_op(other, operator.ge)
_check_op(other, operator.eq)
_check_op(other, operator.lt)
_check_op(other, operator.le)
check_comparators(5)
check_comparators(self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x' : 0.})
# it works!
_ = s1 * s2
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assert_(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmin()))
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assert_(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmax()))
def test_operators_date(self):
result = self.objSeries + timedelta(1)
result = self.objSeries - timedelta(1)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assert_(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assert_(len(result) == 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_(np.array_equal(added[:-5], expected))
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
from pandas.util.testing import rands
import operator
# GH 353
vals = Series([rands(5) for _ in xrange(10)])
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals' : vals})
result = 'foo_' + frame
expected = DataFrame({'vals' : vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A' : self.ts})
tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
ops = [Series.add, Series.sub, Series.mul, Series.div]
equivs = [operator.add, operator.sub, operator.mul]
if py3compat.PY3:
equivs.append(operator.truediv)
else:
equivs.append(operator.div)
fillvals = [0, 0, 1, 1]
for op, equiv_op, fv in zip(ops, equivs, fillvals):
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
self.assert_(np.array_equal(combined, series))
# Holes filled from input
combined = series_copy.combine_first(series)
self.assert_(np.isfinite(combined).all())
self.assert_(np.array_equal(combined[::2], series[::2]))
self.assert_(np.array_equal(combined[1::2], series_copy[1::2]))
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_dict_equal(strings, combined, compare_keys=False)
tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_corr(self):
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
# No overlap
self.assert_(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assert_(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
self.assertAlmostEqual(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
if int(scipy.__version__.split('.')[1]) < 9:
raise nose.SkipTest
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
0.76910310, -0.06430576, -2.09704447, 0.40660407,
-0.89926396, 0.94209606])
B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,
-0.01680292, 1.17258718, -1.06009347, -0.10222060,
-0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)
self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std()**2)
# partial overlap
self.assertAlmostEqual(self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std()**2)
# No overlap
self.assert_(np.isnan(self.ts[::2].cov(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assert_(isnull(cp.cov(cp)))
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
def test_value_counts_nunique(self):
s = Series(['a', 'b', 'b', 'b', 'b', 'a', 'c', 'd', 'd', 'a'])
hist = s.value_counts()
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
assert_series_equal(hist, expected)
self.assertEquals(s.nunique(), 4)
# handle NA's properly
s[5:7] = np.nan
hist = s.value_counts()
expected = s.dropna().value_counts()
assert_series_equal(hist, expected)
s = Series({})
hist = s.value_counts()
expected = Series([])
assert_series_equal(hist, expected)
def test_sort(self):
ts = self.ts.copy()
ts.sort()
self.assert_(np.array_equal(ts, self.ts.order()))
self.assert_(np.array_equal(ts.index, self.ts.order().index))
def test_sort_index(self):
import random
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index()
assert_series_equal(sorted_series, self.ts)
# descending
sorted_series = random_order.sort_index(ascending=False)
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
def test_order(self):
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.order()
self.assert_(np.isnan(result[-5:]).all())
self.assert_(np.array_equal(result[:-5], np.sort(vals[5:])))
result = ts.order(na_last=False)
self.assert_(np.isnan(result[:5]).all())
self.assert_(np.array_equal(result[5:], np.sort(vals[5:])))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.order()
# ascending=False
ordered = ts.order(ascending=False)
expected = np.sort(ts.valid().values)[::-1]
assert_almost_equal(expected, ordered.valid().values)
ordered = ts.order(ascending=False, na_last=False)
assert_almost_equal(expected, ordered.valid().values)
def test_rank(self):
from scipy.stats import rankdata
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
exp = rankdata(filled)
exp[mask] = np.nan
assert_almost_equal(ranks, exp)
def test_from_csv(self):
self.ts.to_csv('_foo')
ts = Series.from_csv('_foo')
assert_series_equal(self.ts, ts)
self.series.to_csv('_foo')
series = Series.from_csv('_foo')
assert_series_equal(self.series, series)
outfile = open('_foo', 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
outfile.close()
series = Series.from_csv('_foo',sep='|')
checkseries = Series({datetime(1998,1,1): 1.0, datetime(1999,1,1): 2.0})
assert_series_equal(checkseries, series)
series = Series.from_csv('_foo',sep='|',parse_dates=False)
checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
assert_series_equal(checkseries, series)
os.remove('_foo')
def test_to_csv(self):
self.ts.to_csv('_foo')
lines = open('_foo', 'U').readlines()
assert(lines[1] != '\n')
os.remove('_foo')
def test_to_dict(self):
self.assert_(np.array_equal(Series(self.ts.to_dict()), self.ts))
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
self.assert_(isinstance(expected, Series))
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_dict_equal(result, ts, compare_keys=False)
def test_isnull(self):
ser = Series([0,5.4,3,nan,-0.001])
assert_series_equal(ser.isnull(), Series([False,False,False,True,False]))
ser = Series(["hi","",nan])
assert_series_equal(ser.isnull(), Series([False,False,True]))
def test_notnull(self):
ser = Series([0,5.4,3,nan,-0.001])
assert_series_equal(ser.notnull(), Series([True,True,True,False,True]))
ser = Series(["hi","",nan])
assert_series_equal(ser.notnull(), Series([True,True,False]))
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
tm.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)
offset = datetools.bday
shifted = self.ts.shift(1, offset=offset)
unshifted = shifted.shift(-1, offset=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, offset=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, timeRule='WEEKDAY')
unshifted = shifted.shift(-1, timeRule='WEEKDAY')
assert_series_equal(unshifted, self.ts)
# corner case
unshifted = self.ts.shift(0)
assert_series_equal(unshifted, self.ts)
def test_shift_int(self):
ts = self.ts.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_truncate(self):
offset = datetools.bday
ts = self.ts[::3]
start, end = self.ts.index[3], self.ts.index[6]
start_missing, end_missing = self.ts.index[2], self.ts.index[7]
# neither specified
truncated = ts.truncate()
assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=self.ts.index[0] - offset)
assert(len(truncated) == 0)
truncated = ts.truncate(before=self.ts.index[-1] + offset)
assert(len(truncated) == 0)
self.assertRaises(Exception, ts.truncate,
before=self.ts.index[-1] + offset,
after=self.ts.index[0] - offset)
def test_asof(self):
self.ts[5:10] = np.NaN
self.ts[15:20] = np.NaN
val1 = self.ts.asof(self.ts.index[7])
val2 = self.ts.asof(self.ts.index[19])
self.assertEqual(val1, self.ts[4])
self.assertEqual(val2, self.ts[14])
# accepts strings
val1 = self.ts.asof(str(self.ts.index[7]))
self.assertEqual(val1, self.ts[4])
# in there
self.assertEqual(self.ts.asof(self.ts.index[3]), self.ts[3])
# no as of value
d = self.ts.index[0] - datetools.bday
self.assert_(np.isnan(self.ts.asof(d)))
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in merged.iteritems():
self.assertEqual(v, source[target[k]])
# input could be a dict
merged = target.map(source.to_dict())
for k, v in merged.iteritems():
self.assertEqual(v, source[target[k]])
# function
result = self.ts.map(lambda x: x * 2)
self.assert_(np.array_equal(result, self.ts * 2))
def test_map_int(self):
left = Series({'a' : 1., 'b' : 2., 'c' : 3., 'd' : 4})
right = Series({1 : 11, 2 : 22, 3 : 33})
self.assert_(left.dtype == np.float_)
self.assert_(issubclass(right.dtype.type, np.integer))
merged = left.map(right)
self.assert_(merged.dtype == np.float_)
self.assert_(isnull(merged['d']))
self.assert_(not isnull(merged['c']))
def test_map_decimal(self):
from decimal import Decimal
result = self.series.map(lambda x: Decimal(str(x)))
self.assert_(result.dtype == np.object_)
self.assert_(isinstance(result[0], Decimal))
def test_apply(self):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# does not return Series
result = self.ts.apply(lambda x: x.values * 2)
assert_series_equal(result, self.ts * 2)
def test_align(self):
def _check_align(a, b, how='left'):
aa, ab = a.align(b, join=how)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
| assert_series_equal(aa, ea) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
| tm.assert_frame_equal(result, exp) | pandas.util.testing.assert_frame_equal |
#definition of add_dataset that creates the meta-dataset
import pandas as pd
from pandas.core.dtypes.common import is_numeric_dtype
from scipy.stats import pearsonr
from sklearn.model_selection import train_test_split
from supervised.automl import AutoML
import os
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import numpy as np
rootdir = os.path.dirname(__file__)
results_dir = rootdir + '/results/'
dataset_dir = rootdir + '/datasets_list_final/'
datasets_to_add_dir = rootdir + '/datasets_list_toadd/'
algorithm_list = ['Linear', 'Random Forest', 'Decision Tree', 'Neural Network']
def encode_y(y):
le = LabelEncoder()
le.fit(y)
y_enc = le.transform(y)
return y_enc
def compute_max_corr(df):
y = encode_y(df[df.columns[-1]])
y = pd.Series(y)
corr = df[df.columns[:-1]].corrwith(y)
return np.max(np.absolute(corr))
def compute_max_corr_between_X_and_y(X, y):
y = encode_y(y)
y = pd.Series(y)
X = X.apply(pd.to_numeric, errors='ignore')
return np.max(np.absolute(X.apply(lambda x: x.corr(y) if | is_numeric_dtype(x) | pandas.core.dtypes.common.is_numeric_dtype |
import gzip
import os
import sys
import logging
from collections import defaultdict
import pandas as pd
from dae.utils.regions import Region
logger = logging.getLogger(__name__)
#
# Exon
#
class Exon:
def __init__(
self,
start=None,
stop=None,
frame=None,
number=None,
cds_start=None,
cds_stop=None,
):
self.start = start
self.stop = stop
self.frame = frame # related to cds
# for GTF
self.number = number # exon number
self.cds_start = cds_start #
self.cds_stop = cds_stop
def __repr__(self):
return (
f"Exon(start={self.start}; stop={self.stop}; "
f"number={self.number})"
)
class TranscriptModel:
def __init__(
self,
gene=None,
tr_id=None,
tr_name=None,
chrom=None,
strand=None,
tx=None,
cds=None,
exons=None,
start_codon=None,
stop_codon=None,
is_coding=False,
attributes={},
):
self.gene = gene
self.tr_id = tr_id
self.tr_name = tr_name
self.chrom = chrom
self.strand = strand
self.tx = tx
self.cds = cds
self.exons = exons if exons is not None else []
# for GTF
self.utrs = []
self.start_codon = start_codon
self.stop_codon = stop_codon
self._is_coding = (
is_coding # it can be derivable from cds' start and end
)
self.attributes = attributes
def is_coding(self):
if self.cds[0] >= self.cds[1]:
return False
return True
def CDS_regions(self, ss=0):
if self.cds[0] >= self.cds[1]:
return []
cds_regions = []
k = 0
while self.exons[k].stop < self.cds[0]:
k += 1
if self.cds[1] <= self.exons[k].stop:
cds_regions.append(
Region(chrom=self.chrom, start=self.cds[0], stop=self.cds[1])
)
return cds_regions
cds_regions.append(
Region(
chrom=self.chrom,
start=self.cds[0],
stop=self.exons[k].stop + ss,
)
)
k += 1
while k < len(self.exons) and self.exons[k].stop <= self.cds[1]:
if self.exons[k].stop < self.cds[1]:
cds_regions.append(
Region(
chrom=self.chrom,
start=self.exons[k].start - ss,
stop=self.exons[k].stop + ss,
)
)
k += 1
else:
cds_regions.append(
Region(
chrom=self.chrom,
start=self.exons[k].start - ss,
stop=self.exons[k].stop,
)
)
return cds_regions
if k < len(self.exons) and self.exons[k].start <= self.cds[1]:
cds_regions.append(
Region(
chrom=self.chrom,
start=self.exons[k].start - ss,
stop=self.cds[1],
)
)
return cds_regions
def UTR5_regions(self):
if self.cds[0] >= self.cds[1]:
return []
utr5_regions = []
k = 0
if self.strand == "+":
while self.exons[k].stop < self.cds[0]:
utr5_regions.append(
Region(
chrom=self.chrom,
start=self.exons[k].start,
stop=self.exons[k].stop,
)
)
k += 1
if self.exons[k].start < self.cds[0]:
utr5_regions.append(
Region(
chrom=self.chrom,
start=self.exons[k].start,
stop=self.cds[0] - 1,
)
)
else:
while self.exons[k].stop < self.cds[1]:
k += 1
if self.exons[k].stop == self.cds[1]:
k += 1
else:
utr5_regions.append(
Region(
chrom=self.chrom,
start=self.cds[1] + 1,
stop=self.exons[k].stop,
)
)
k += 1
for e in self.exons[k:]:
utr5_regions.append(
Region(chrom=self.chrom, start=e.start, stop=e.stop)
)
return utr5_regions
def UTR3_regions(self):
if self.cds[0] >= self.cds[1]:
return []
utr3_regions = []
k = 0
if self.strand == "-":
while self.exons[k].stop < self.cds[0]:
utr3_regions.append(
Region(
chrom=self.chrom,
start=self.exons[k].start,
stop=self.exons[k].stop,
)
)
k += 1
if self.exons[k].start < self.cds[0]:
utr3_regions.append(
Region(
chrom=self.chrom,
start=self.exons[k].start,
stop=self.cds[0] - 1,
)
)
else:
while self.exons[k].stop < self.cds[1]:
k += 1
if self.exons[k].stop == self.cds[1]:
k += 1
else:
utr3_regions.append(
Region(
chrom=self.chrom,
start=self.cds[1] + 1,
stop=self.exons[k].stop,
)
)
k += 1
for e in self.exons[k:]:
utr3_regions.append(
Region(chrom=self.chrom, start=e.start, stop=e.stop)
)
return utr3_regions
def all_regions(self, ss=0, prom=0):
all_regions = []
if ss == 0:
for e in self.exons:
all_regions.append(
Region(chrom=self.chrom, start=e.start, stop=e.stop)
)
else:
for e in self.exons:
if e.stop <= self.cds[0]:
all_regions.append(
Region(chrom=self.chrom, start=e.start, stop=e.stop)
)
elif e.start <= self.cds[0]:
if e.stop >= self.cds[1]:
all_regions.append(
Region(
chrom=self.chrom, start=e.start, stop=e.stop
)
)
else:
all_regions.append(
Region(
chrom=self.chrom,
start=e.start,
stop=e.stop + ss,
)
)
elif e.start > self.cds[1]:
all_regions.append(
Region(chrom=self.chrom, start=e.start, stop=e.stop)
)
else:
if e.stop >= self.cds[1]:
all_regions.append(
Region(
chrom=self.chrom,
start=e.start - ss,
stop=e.stop,
)
)
else:
all_regions.append(
Region(
chrom=self.chrom,
start=e.start - ss,
stop=e.stop + ss,
)
)
if prom != 0:
if self.strand == "+":
all_regions[0] = Region(
chrom=all_regions[0].chrom,
start=all_regions[0].start - prom,
stop=all_regions[0].stop,
)
else:
all_regions[-1] = Region(
chrom=all_regions[-1].chrom,
start=all_regions[-1].start,
stop=all_regions[-1].stop + prom,
)
return all_regions
def total_len(self):
length = 0
for reg in self.exons:
length += reg.stop - reg.start + 1
return length
def CDS_len(self):
cds_region = self.CDS_regions()
length = 0
for reg in cds_region:
length += reg.stop - reg.start + 1
return length
def UTR3_len(self):
utr3 = self.UTR3_regions()
length = 0
for reg in utr3:
length += reg.stop - reg.start + 1
return length
def UTR5_len(self):
utr5 = self.UTR5_regions()
length = 0
for reg in utr5:
length += reg.stop - reg.start + 1
return length
def calc_frames(self):
length = len(self.exons)
fms = []
if self.cds[0] > self.cds[1]:
fms = [-1] * length
elif self.strand == "+":
k = 0
while self.exons[k].stop < self.cds[0]:
fms.append(-1)
k += 1
fms.append(0)
if self.exons[k].stop < self.cds[1]:
fms.append((self.exons[k].stop - self.cds[0] + 1) % 3)
k += 1
while self.exons[k].stop < self.cds[1] and k < length:
fms.append(
(fms[k] + self.exons[k].stop - self.exons[k].start + 1) % 3
)
k += 1
fms += [-1] * (length - len(fms))
else:
k = length - 1
while self.exons[k].start > self.cds[1]:
fms.append(-1)
k -= 1
fms.append(0)
if self.cds[0] < self.exons[k].start:
fms.append((self.cds[1] - self.exons[k].start + 1) % 3)
k -= 1
while self.cds[0] < self.exons[k].start and k > -1:
fms.append(
(fms[-1] + self.exons[k].stop - self.exons[k].start + 1)
% 3
)
k -= 1
fms += [-1] * (length - len(fms))
fms = fms[::-1]
assert len(self.exons) == len(fms)
return fms
def update_frames(self):
fms = self.calc_frames()
for e, f in zip(self.exons, fms):
e.frame = f
def test_frames(self, update=False):
fms = self.calc_frames()
for e, f in zip(self.exons, fms):
if e.frame != f:
return False
return True
#
# GeneModel's
#
class GeneModels:
def __init__(self, name=None, location=None):
self.name = name
self.location = location
self._shift = None
self._alternative_names = None
self.utr_models = defaultdict(lambda: defaultdict(list))
self.transcript_models = {}
self.gene_models = defaultdict(list)
def _add_transcript_model(self, tm):
assert tm.tr_id not in self.transcript_models
self.transcript_models[tm.tr_id] = tm
self.gene_models[tm.gene].append(tm)
self.utr_models[tm.chrom][tm.tx].append(tm)
def _update_indexes(self):
self.gene_models = defaultdict(list)
self.utr_models = defaultdict(lambda: defaultdict(list))
for tm in self.transcript_models.values():
self.gene_models[tm.gene].append(tm)
self.utr_models[tm.chrom][tm.tx].append(tm)
def gene_names(self):
if self.gene_models is None:
print(
"Gene Models haven't been created/uploaded yet! "
"Use either loadGeneModels function or "
"self.createGeneModelDict function"
)
return None
return list(self.gene_models.keys())
def gene_models_by_gene_name(self, name):
return self.gene_models.get(name, None)
# def gene_models_by_location(self, chrom, pos1, pos2=None):
# result = []
# if pos2 is None:
# for key in self.utr_models[chrom]:
# if pos1 >= key[0] and pos1 <= key[1]:
# result.extend(self.utr_models[chrom][key])
# else:
# if pos2 < pos1:
# pos1, pos2 = pos2, pos1
# for key in self.utr_models[chrom]:
# if (pos1 <= key[0] and pos2 >= key[0]) or (
# pos1 >= key[0] and pos1 <= key[1]
# ):
# result.extend(self.utr_models[chrom][key])
# return result
def relabel_chromosomes(self, relabel=None, map_file=None):
assert relabel or map_file
if not relabel:
with open(map_file) as f:
relabel = dict([line.strip("\n\r").split()[:2] for line in f])
self.utr_models = {
relabel[chrom]: v
for chrom, v in self.utr_models.items()
if chrom in relabel
}
self.transcript_models = {
tid: tm
for tid, tm in self.transcript_models.items()
if tm.chrom in relabel
}
for tm in self.transcript_models.values():
tm.chrom = relabel[tm.chrom]
def _save_gene_models(self, outfile):
outfile.write(
"\t".join(
[
"chr",
"trID",
"trOrigId",
"gene",
"strand",
"tsBeg",
"txEnd",
"cdsStart",
"cdsEnd",
"exonStarts",
"exonEnds",
"exonFrames",
"atts",
]
)
)
outfile.write("\n")
for tm in self.transcript_models.values():
exon_starts = ",".join([str(e.start) for e in tm.exons])
exon_ends = ",".join([str(e.stop) for e in tm.exons])
exon_frames = ",".join([str(e.frame) for e in tm.exons])
add_atts = ";".join(
[
k + ":" + str(v).replace(":", "_")
for k, v in list(tm.attributes.items())
]
)
cs = [
tm.chrom,
tm.tr_id,
tm.tr_name,
tm.gene,
tm.strand,
tm.tx[0],
tm.tx[1],
tm.cds[0],
tm.cds[1],
exon_starts,
exon_ends,
exon_frames,
add_atts,
]
outfile.write("\t".join([str(x) if x else "" for x in cs]))
outfile.write("\n")
def save(self, output_filename, gzipped=True):
if gzipped:
if not output_filename.endswith(".gz"):
output_filename = f"{output_filename}.gz"
with gzip.open(output_filename, "wt") as outfile:
self._save_gene_models(outfile)
else:
with open(output_filename, "wt") as outfile:
self._save_gene_models(outfile)
def load_default_gene_models_format(
filename, gene_mapping_file=None, nrows=None
):
df = pd.read_csv(
filename,
sep="\t",
nrows=nrows,
dtype={
"chr": str,
"trID": str,
"trOrigId": str,
"gene": str,
"strand": str,
"atts": str,
},
)
expected_columns = [
"chr",
"trID",
"gene",
"strand",
"tsBeg",
"txEnd",
"cdsStart",
"cdsEnd",
"exonStarts",
"exonEnds",
"exonFrames",
"atts",
]
assert set(expected_columns) <= set(df.columns)
if not set(expected_columns) <= set(df.columns):
return None
if "trOrigId" not in df.columns:
tr_names = pd.Series(data=df["trID"].values)
df["trOrigId"] = tr_names
gm = GeneModels(location=filename)
records = df.to_dict(orient="records")
for line in records:
exon_starts = list(map(int, line["exonStarts"].split(",")))
exon_ends = list(map(int, line["exonEnds"].split(",")))
exon_frames = list(map(int, line["exonFrames"].split(",")))
assert len(exon_starts) == len(exon_ends) == len(exon_frames)
exons = []
for start, end, frame in zip(exon_starts, exon_ends, exon_frames):
exons.append(Exon(start=start, stop=end, frame=frame))
attributes = {}
atts = line.get("atts")
if atts and isinstance(atts, str):
attributes = dict(
[a.split(":") for a in line.get("atts").split(";")]
)
tm = TranscriptModel(
gene=line["gene"],
tr_id=line["trID"],
tr_name=line["trOrigId"],
chrom=line["chr"],
strand=line["strand"],
tx=(line["tsBeg"], line["txEnd"]),
cds=(line["cdsStart"], line["cdsEnd"]),
exons=exons,
attributes=attributes,
)
gm.transcript_models[tm.tr_id] = tm
gm._update_indexes()
if nrows is not None:
return True
return gm
def load_ref_flat_gene_models_format(
filename, gene_mapping_file=None, nrows=None
):
expected_columns = [
"#geneName",
"name",
"chrom",
"strand",
"txStart",
"txEnd",
"cdsStart",
"cdsEnd",
"exonCount",
"exonStarts",
"exonEnds",
]
df = parse_raw(filename, expected_columns, nrows=nrows)
if df is None:
return None
gm = GeneModels(location=filename)
records = df.to_dict(orient="records")
transcript_ids_counter = defaultdict(int)
for rec in records:
gene = rec["#geneName"]
tr_name = rec["name"]
chrom = rec["chrom"]
strand = rec["strand"]
tx = (int(rec["txStart"]) + 1, int(rec["txEnd"]))
cds = (int(rec["cdsStart"]) + 1, int(rec["cdsEnd"]))
exon_starts = list(map(int, rec["exonStarts"].strip(",").split(",")))
exon_ends = list(map(int, rec["exonEnds"].strip(",").split(",")))
assert len(exon_starts) == len(exon_ends)
exons = [
Exon(start + 1, end) for start, end in zip(exon_starts, exon_ends)
]
transcript_ids_counter[tr_name] += 1
tr_id = f"{tr_name}_{transcript_ids_counter[tr_name]}"
tm = TranscriptModel(
gene=gene,
tr_id=tr_id,
tr_name=tr_name,
chrom=chrom,
strand=strand,
tx=tx,
cds=cds,
exons=exons,
)
tm.update_frames()
gm._add_transcript_model(tm)
return gm
def load_ref_seq_gene_models_format(
filename, gene_mapping_file=None, nrows=None
):
expected_columns = [
"#bin",
"name",
"chrom",
"strand",
"txStart",
"txEnd",
"cdsStart",
"cdsEnd",
"exonCount",
"exonStarts",
"exonEnds",
"score",
"name2",
"cdsStartStat",
"cdsEndStat",
"exonFrames",
]
df = parse_raw(filename, expected_columns, nrows=nrows)
if df is None:
return None
gm = GeneModels(location=filename)
records = df.to_dict(orient="records")
transcript_ids_counter = defaultdict(int)
for rec in records:
gene = rec["name2"]
tr_name = rec["name"]
chrom = rec["chrom"]
strand = rec["strand"]
tx = (int(rec["txStart"]) + 1, int(rec["txEnd"]))
cds = (int(rec["cdsStart"]) + 1, int(rec["cdsEnd"]))
exon_starts = list(map(int, rec["exonStarts"].strip(",").split(",")))
exon_ends = list(map(int, rec["exonEnds"].strip(",").split(",")))
assert len(exon_starts) == len(exon_ends)
exons = [
Exon(start + 1, end) for start, end in zip(exon_starts, exon_ends)
]
transcript_ids_counter[tr_name] += 1
tr_id = f"{tr_name}_{transcript_ids_counter[tr_name]}"
attributes = {
k: rec[k]
for k in [
"#bin",
"score",
"exonCount",
"cdsStartStat",
"cdsEndStat",
"exonFrames",
]
}
tm = TranscriptModel(
gene=gene,
tr_id=tr_id,
tr_name=tr_name,
chrom=chrom,
strand=strand,
tx=tx,
cds=cds,
exons=exons,
attributes=attributes,
)
tm.update_frames()
gm._add_transcript_model(tm)
return gm
def probe_header(filename, expected_columns, comment=None):
df = pd.read_csv(filename, sep="\t", nrows=1, header=None, comment=comment)
return list(df.iloc[0, :]) == expected_columns
def probe_columns(filename, expected_columns, comment=None):
df = pd.read_csv(filename, sep="\t", nrows=1, header=None, comment=comment)
return list(df.columns) == list(range(0, len(expected_columns)))
def parse_raw(filename, expected_columns, nrows=None, comment=None):
if probe_header(filename, expected_columns, comment=comment):
df = pd.read_csv(filename, sep="\t", nrows=nrows, comment=comment)
assert list(df.columns) == expected_columns
return df
elif probe_columns(filename, expected_columns, comment=comment):
df = pd.read_csv(
filename,
sep="\t",
nrows=nrows,
header=None,
names=expected_columns,
comment=comment,
)
assert list(df.columns) == expected_columns
return df
def load_ccds_gene_models_format(filename, gene_mapping_file=None, nrows=None):
expected_columns = [
# CCDS is identical with RefSeq
"#bin",
"name",
"chrom",
"strand",
"txStart",
"txEnd",
"cdsStart",
"cdsEnd",
"exonCount",
"exonStarts",
"exonEnds",
"score",
"name2",
"cdsStartStat",
"cdsEndStat",
"exonFrames",
]
df = parse_raw(filename, expected_columns, nrows=nrows)
if df is None:
return None
gm = GeneModels(location=filename)
records = df.to_dict(orient="records")
transcript_ids_counter = defaultdict(int)
gm._alternative_names = {}
if gene_mapping_file is not None:
gm._alternative_names = gene_mapping(gene_mapping_file)
for rec in records:
gene = rec["name"]
gene = gm._alternative_names.get(gene, gene)
tr_name = rec["name"]
chrom = rec["chrom"]
strand = rec["strand"]
tx = (int(rec["txStart"]) + 1, int(rec["txEnd"]))
cds = (int(rec["cdsStart"]) + 1, int(rec["cdsEnd"]))
exon_starts = list(map(int, rec["exonStarts"].strip(",").split(",")))
exon_ends = list(map(int, rec["exonEnds"].strip(",").split(",")))
assert len(exon_starts) == len(exon_ends)
exons = [
Exon(start + 1, end) for start, end in zip(exon_starts, exon_ends)
]
transcript_ids_counter[tr_name] += 1
tr_id = f"{tr_name}_{transcript_ids_counter[tr_name]}"
attributes = {
k: rec[k]
for k in [
"#bin",
"score",
"exonCount",
"cdsStartStat",
"cdsEndStat",
"exonFrames",
]
}
tm = TranscriptModel(
gene=gene,
tr_id=tr_id,
tr_name=tr_name,
chrom=chrom,
strand=strand,
tx=tx,
cds=cds,
exons=exons,
attributes=attributes,
)
tm.update_frames()
gm._add_transcript_model(tm)
return gm
def load_known_gene_models_format(
filename, gene_mapping_file=None, nrows=None):
expected_columns = [
"name",
"chrom",
"strand",
"txStart",
"txEnd",
"cdsStart",
"cdsEnd",
"exonCount",
"exonStarts",
"exonEnds",
"proteinID",
"alignID",
]
df = parse_raw(filename, expected_columns, nrows=nrows)
if df is None:
return None
gm = GeneModels(location=filename)
records = df.to_dict(orient="records")
transcript_ids_counter = defaultdict(int)
gm._alternative_names = {}
if gene_mapping_file is not None:
gm._alternative_names = gene_mapping(gene_mapping_file)
for rec in records:
gene = rec["name"]
gene = gm._alternative_names.get(gene, gene)
tr_name = rec["name"]
chrom = rec["chrom"]
strand = rec["strand"]
tx = (int(rec["txStart"]) + 1, int(rec["txEnd"]))
cds = (int(rec["cdsStart"]) + 1, int(rec["cdsEnd"]))
exon_starts = list(map(int, rec["exonStarts"].strip(",").split(",")))
exon_ends = list(map(int, rec["exonEnds"].strip(",").split(",")))
assert len(exon_starts) == len(exon_ends)
exons = [
Exon(start + 1, end) for start, end in zip(exon_starts, exon_ends)
]
transcript_ids_counter[tr_name] += 1
tr_id = f"{tr_name}_{transcript_ids_counter[tr_name]}"
attributes = {k: rec[k] for k in ["proteinID", "alignID"]}
tm = TranscriptModel(
gene=gene,
tr_id=tr_id,
tr_name=tr_name,
chrom=chrom,
strand=strand,
tx=tx,
cds=cds,
exons=exons,
attributes=attributes,
)
tm.update_frames()
gm._add_transcript_model(tm)
return gm
def load_ucscgenepred_models_format(
filename, gene_mapping_file=None, nrows=None):
"""
table genePred
"A gene prediction."
(
string name; "Name of gene"
string chrom; "Chromosome name"
char[1] strand; "+ or - for strand"
uint txStart; "Transcription start position"
uint txEnd; "Transcription end position"
uint cdsStart; "Coding region start"
uint cdsEnd; "Coding region end"
uint exonCount; "Number of exons"
uint[exonCount] exonStarts; "Exon start positions"
uint[exonCount] exonEnds; "Exon end positions"
)
table genePredExt
"A gene prediction with some additional info."
(
string name; "Name of gene (usually transcript_id from GTF)"
string chrom; "Chromosome name"
char[1] strand; "+ or - for strand"
uint txStart; "Transcription start position"
uint txEnd; "Transcription end position"
uint cdsStart; "Coding region start"
uint cdsEnd; "Coding region end"
uint exonCount; "Number of exons"
uint[exonCount] exonStarts; "Exon start positions"
uint[exonCount] exonEnds; "Exon end positions"
int score; "Score"
string name2; "Alternate name (e.g. gene_id from GTF)"
string cdsStartStat; "Status of CDS start annotation (none,
unknown, incomplete, or complete)"
string cdsEndStat; "Status of CDS end annotation (none, unknown,
incomplete, or complete)"
lstring exonFrames; "Exon frame offsets {0,1,2}"
)
"""
expected_columns = [
"name",
"chrom",
"strand",
"txStart",
"txEnd",
"cdsStart",
"cdsEnd",
"exonCount",
"exonStarts",
"exonEnds",
"score",
"name2",
"cdsStartStat",
"cdsEndStat",
"exonFrames",
]
df = parse_raw(filename, expected_columns[:10], nrows=nrows)
if df is None:
df = parse_raw(filename, expected_columns, nrows=nrows)
if df is None:
return None
gm = GeneModels(location=filename)
records = df.to_dict(orient="records")
transcript_ids_counter = defaultdict(int)
gm._alternative_names = {}
if gene_mapping_file is not None:
gm._alternative_names = gene_mapping(gene_mapping_file)
for rec in records:
gene = rec.get("name2")
if not gene:
gene = rec["name"]
gene = gm._alternative_names.get(gene, gene)
tr_name = rec["name"]
chrom = rec["chrom"]
strand = rec["strand"]
tx = (int(rec["txStart"]) + 1, int(rec["txEnd"]))
cds = (int(rec["cdsStart"]) + 1, int(rec["cdsEnd"]))
exon_starts = list(map(int, rec["exonStarts"].strip(",").split(",")))
exon_ends = list(map(int, rec["exonEnds"].strip(",").split(",")))
assert len(exon_starts) == len(exon_ends)
exons = [
Exon(start + 1, end) for start, end in zip(exon_starts, exon_ends)
]
transcript_ids_counter[tr_name] += 1
tr_id = f"{tr_name}_{transcript_ids_counter[tr_name]}"
attributes = {}
for attr in expected_columns[10:]:
if attr in rec:
attributes[attr] = rec.get(attr)
tm = TranscriptModel(
gene=gene,
tr_id=tr_id,
tr_name=tr_name,
chrom=chrom,
strand=strand,
tx=tx,
cds=cds,
exons=exons,
attributes=attributes,
)
tm.update_frames()
gm._add_transcript_model(tm)
return gm
def load_gtf_gene_models_format(filename, gene_mapping_file=None, nrows=None):
expected_columns = [
"seqname",
"source",
"feature",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
# "comments",
]
df = parse_raw(filename, expected_columns, nrows=nrows, comment="#",)
if df is None:
expected_columns.append("comment")
df = parse_raw(filename, expected_columns, nrows=nrows, comment="#")
if df is None:
return None
def parse_gtf_attributes(attributes):
attributes = list(
filter(lambda x: x, [a.strip() for a in attributes.split(";")])
)
result = {}
for attr in attributes:
key, value = attr.split(" ")
result[key.strip()] = value.strip('"').strip()
return result
gm = GeneModels(location=filename)
records = df.to_dict(orient="records")
for rec in records:
feature = rec["feature"]
if feature == "gene":
continue
attributes = parse_gtf_attributes(rec["attributes"])
tr_id = attributes["transcript_id"]
if feature in set(["transcript", "Selenocysteine"]):
if feature == "Selenocysteine" and tr_id in gm.transcript_models:
continue
if tr_id in gm.transcript_models:
raise ValueError(
f"{tr_id} of {feature} already in transcript models"
)
tm = TranscriptModel(
gene=attributes["gene_name"],
tr_id=tr_id,
tr_name=tr_id,
chrom=rec["seqname"],
strand=rec["strand"],
tx=(rec["start"], rec["end"]),
cds=(rec["end"], rec["start"]),
attributes=attributes,
)
gm._add_transcript_model(tm)
continue
if feature in {"CDS", "exon"}:
if tr_id not in gm.transcript_models:
raise ValueError(
f"exon or CDS transcript {tr_id} not found "
f"in transctipt models"
)
exon_number = int(attributes["exon_number"])
tm = gm.transcript_models[tr_id]
if len(tm.exons) < exon_number:
tm.exons.append(Exon())
assert len(tm.exons) >= exon_number
exon = tm.exons[exon_number - 1]
if feature == "exon":
exon.start = rec["start"]
exon.stop = rec["end"]
exon.frame = -1
exon.number = exon_number
continue
if feature == "CDS":
exon.cds_start = rec["start"]
exon.cds_stop = rec["end"]
exon.frame = rec["phase"]
tm._is_coding = True
continue
if feature in {"UTR", "5UTR", "3UTR", "start_codon", "stop_codon"}:
exon_number = int(attributes["exon_number"])
tm = gm.transcript_models[tr_id]
if feature in {"UTR", "5UTR", "3UTR"}:
tm.utrs.append((rec["start"], rec["end"], exon_number))
continue
if feature == "start_codon":
tm.start_codon = (rec["start"], rec["end"], exon_number)
if feature == "stop_codon":
tm.stop_codon = (rec["start"], rec["end"], exon_number)
cds = tm.cds
tm.cds = (min(cds[0], rec["start"]), max(cds[1], rec["end"]))
continue
raise ValueError(f"unknown feature {feature} found in {filename}")
for tm in gm.transcript_models.values():
tm.exons = sorted(tm.exons, key=lambda x: x.start)
tm.utrs = sorted(tm.utrs, key=lambda x: x[0])
tm.update_frames()
return gm
def gene_mapping(filename):
"""
alternative names for genes
assume that its first line has two column names
"""
df = | pd.read_csv(filename, sep="\t") | pandas.read_csv |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Quantify the variability of behavioral metrics within and between labs of mouse behavior.
This script doesn't perform any analysis but plots summary statistics over labs.
<NAME>
16 Jan 2020
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from os.path import join
import seaborn as sns
from paper_behavior_functions import (query_sessions_around_criterion, seaborn_style,
institution_map, group_colors, figpath)
from dj_tools import dj2pandas, fit_psychfunc
from ibl_pipeline import behavior, subject, reference
import scikit_posthocs as sp
# Settings
fig_path = figpath()
# Query sessions
sessions = query_sessions_around_criterion(criterion='trained', days_from_criterion=[2, 0])[0]
sessions = sessions * subject.Subject * subject.SubjectLab * reference.Lab
# Create dataframe with behavioral metrics of all mice
learned = pd.DataFrame(columns=['mouse', 'lab', 'perf_easy', 'n_trials',
'threshold', 'bias', 'reaction_time',
'lapse_low', 'lapse_high'])
for i, nickname in enumerate(np.unique(sessions.fetch('subject_nickname'))):
if np.mod(i+1, 10) == 0:
print('Loading data of subject %d of %d' % (i+1, len(
np.unique(sessions.fetch('subject_nickname')))))
# Get the trials of the sessions around criterion
trials = (sessions * behavior.TrialSet.Trial
& 'subject_nickname = "%s"' % nickname).fetch(format='frame')
trials = trials.reset_index()
# Add n-trials per day
ntrials_perday = trials.groupby('session_uuid').count()['trial_id'].mean()
# Fit a psychometric function to these trials and get fit results
fit_df = dj2pandas(trials)
fit_result = fit_psychfunc(fit_df)
# Get RT, performance and number of trials
reaction_time = trials['rt'].median()*1000
perf_easy = trials['correct_easy'].mean()*100
ntrials_perday = trials.groupby('session_uuid').count()['trial_id'].mean()
# Add results to dataframe
learned.loc[i, 'mouse'] = nickname
learned.loc[i, 'lab'] = (sessions & 'subject_nickname = "%s"' % nickname).fetch(
'institution_short')[0]
learned.loc[i, 'perf_easy'] = perf_easy
learned.loc[i, 'n_trials'] = ntrials_perday
learned.loc[i, 'reaction_time'] = reaction_time
learned.loc[i, 'threshold'] = fit_result.loc[0, 'threshold']
learned.loc[i, 'bias'] = fit_result.loc[0, 'bias']
learned.loc[i, 'lapse_low'] = fit_result.loc[0, 'lapselow']
learned.loc[i, 'lapse_high'] = fit_result.loc[0, 'lapsehigh']
# Drop mice with faulty RT
learned = learned[learned['reaction_time'].notnull()]
# Change lab name into lab number
learned['lab_number'] = learned.lab.map(institution_map()[0])
learned = learned.sort_values('lab_number')
# Convert to float
learned[['perf_easy', 'reaction_time', 'threshold', 'n_trials',
'bias', 'lapse_low', 'lapse_high']] = learned[['perf_easy', 'reaction_time',
'threshold', 'n_trials', 'bias',
'lapse_low', 'lapse_high']].astype(float)
# Add all mice to dataframe seperately for plotting
learned_2 = learned.copy()
learned_2['lab'] = 'All'
learned_2['lab_number'] = 'All'
learned_2 = learned.append(learned_2)
# Stats
stats_tests = pd.DataFrame(columns=['variable', 'test_type', 'p_value'])
posthoc_tests = {}
test_df = learned_2.loc[learned_2['lab_number'].isin(['Lab 1', 'Lab 2', 'Lab 3', 'Lab 4', 'Lab 5',
'Lab 6', 'Lab 7'])]
for i, var in enumerate(['perf_easy', 'reaction_time', 'n_trials', 'threshold', 'bias']):
_, normal = stats.normaltest(test_df[var])
if normal < 0.05:
test_type = 'kruskal'
test = stats.kruskal(*[group[var].values
for name, group in test_df.groupby('lab_number')])
if test[1] < 0.05: # Proceed to posthocs
posthoc = sp.posthoc_dunn(test_df, val_col=var, group_col='lab_number')
else:
posthoc = np.nan
else:
test_type = 'anova'
test = stats.f_oneway(*[group[var].values
for name, group in test_df.groupby('lab_number')])
if test[1] < 0.05:
posthoc = sp.posthoc_tukey(test_df, val_col=var, group_col='lab_number')
else:
posthoc = np.nan
posthoc_tests['posthoc_'+str(var)] = posthoc
stats_tests.loc[i, 'variable'] = var
stats_tests.loc[i, 'test_type'] = test_type
stats_tests.loc[i, 'p_value'] = test[1]
# Z-score data
learned_zs = | pd.DataFrame() | pandas.DataFrame |
import itertools
from typing import List, Optional, Union
import numpy as np
import pandas._libs.algos as libalgos
import pandas._libs.reshape as libreshape
from pandas._libs.sparse import IntIndex
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import notna
import pandas.core.algorithms as algos
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import factorize_from_iterable
from pandas.core.construction import extract_array
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_compressed_ids,
get_group_index,
)
class _Unstacker:
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(
self, index, level=-1, constructor=None,
):
if constructor is None:
constructor = DataFrame
self.constructor = constructor
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.codes[self.level] else 0
# Note: the "pop" below alters these in-place.
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
# Bug fix GH 20601
# If the data frame is too big, the number of unique index combination
# will cause int32 overflow on windows environments.
# We want to check and raise an error before this happens
num_rows = np.max([index_level.size for index_level in self.new_index_levels])
num_columns = self.removed_level.size
# GH20601: This forces an overflow if the number of cells is too high.
num_cells = np.multiply(num_rows, num_columns, dtype=np.int32)
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
raise ValueError("Unstacked DataFrame is too big, causing int32 overflow")
self._make_selectors()
@cache_readonly
def _indexer_and_to_sort(self):
v = self.level
codes = list(self.index.codes)
levs = list(self.index.levels)
to_sort = codes[:v] + codes[v + 1 :] + [codes[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = libalgos.groupsort_indexer(comp_index, ngroups)[0]
indexer = ensure_platform_int(indexer)
return indexer, to_sort
@cache_readonly
def sorted_labels(self):
indexer, to_sort = self._indexer_and_to_sort
return [l.take(indexer) for l in to_sort]
def _make_sorted_values(self, values):
indexer, _ = self._indexer_and_to_sort
sorted_values = algos.take_nd(values, indexer, axis=0)
return sorted_values
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError("Index contains duplicate entries, cannot reshape")
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self, values, value_columns, fill_value):
if values.ndim == 1:
values = values[:, np.newaxis]
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError("must pass column labels for multi-column data")
values, _ = self.get_new_values(values, fill_value)
columns = self.get_new_columns(value_columns)
index = self.new_index
return self.constructor(values, index=index, columns=columns)
def get_new_values(self, values, fill_value=None):
if values.ndim == 1:
values = values[:, np.newaxis]
sorted_values = self._make_sorted_values(values)
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (
sorted_values.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view("i8")
new_values = new_values.view("i8")
elif is_bool_dtype(values):
sorted_values = sorted_values.astype("object")
new_values = new_values.astype("object")
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
libreshape.unstack(
sorted_values,
mask.view("u1"),
stride,
length,
width,
new_values,
new_mask.view("u1"),
)
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self, value_columns):
if value_columns is None:
if self.lift == 0:
return self.removed_level._shallow_copy(name=self.removed_name)
lev = self.removed_level.insert(0, item=self.removed_level._na_value)
return lev.rename(self.removed_name)
stride = len(self.removed_level) + self.lift
width = len(value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(value_columns, MultiIndex):
new_levels = value_columns.levels + (self.removed_level_full,)
new_names = value_columns.names + (self.removed_name,)
new_codes = [lab.take(propagator) for lab in value_columns.codes]
else:
new_levels = [value_columns, self.removed_level_full]
new_names = [value_columns.name, self.removed_name]
new_codes = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new codes to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_codes.append(np.tile(repeater, width))
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
@cache_readonly
def new_index(self):
# Does not depend on values or value_columns
result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
level, level_codes = self.new_index_levels[0], result_codes[0]
if (level_codes == -1).any():
level = level.insert(len(level), level._na_value)
return level.take(level_codes).rename(self.new_index_names[0])
return MultiIndex(
levels=self.new_index_levels,
codes=result_codes,
names=self.new_index_names,
verify_integrity=False,
)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
# GH 19966 Make sure if MultiIndexed index has tuple name, they will be
# recognised as a whole
if clocs in index.names:
clocs = [clocs]
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
ccodes = [index.codes[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rcodes = [index.codes[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(ccodes, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False)
if not rlocs:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name="__placeholder__")
else:
dummy_index = MultiIndex(
levels=rlevels + [obs_ids],
codes=rcodes + [comp_ids],
names=rnames + ["__placeholder__"],
verify_integrity=False,
)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack("__placeholder__", fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_codes = recons_codes
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val, fill_value=fill_value)
clocs = [v if v < val else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack("__placeholder__", fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
assert isinstance(unstcols, MultiIndex) # for mypy
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_codes = [unstcols.codes[0]]
for rec in recons_codes:
new_codes.append(rec.take(unstcols.codes[-1]))
new_columns = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
# Prioritize integer interpretation (GH #21677):
if not is_integer(level) and not level == "__placeholder__":
level = obj.index._get_level_number(level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
if is_extension_array_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(
obj.index, level=level, constructor=obj._constructor_expanddim,
)
return unstacker.get_result(
obj.values, value_columns=None, fill_value=fill_value
)
def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
unstacker = _Unstacker(obj.index, level=level)
blocks = obj._mgr.unstack(unstacker, fill_value=fill_value)
return obj._constructor(blocks)
else:
return _Unstacker(
obj.index, level=level, constructor=obj._constructor,
).get_result(obj.values, value_columns=obj.columns, fill_value=fill_value)
def _unstack_extension_series(series, level, fill_value):
"""
Unstack an ExtensionArray-backed Series.
The ExtensionDtype is preserved.
Parameters
----------
series : Series
A Series with an ExtensionArray for values
level : Any
The level name or number.
fill_value : Any
The user-level (not physical storage) fill value to use for
missing values introduced by the reshape. Passed to
``series.values.take``.
Returns
-------
DataFrame
Each column of the DataFrame will have the same dtype as
the input Series.
"""
# Implementation note: the basic idea is to
# 1. Do a regular unstack on a dummy array of integers
# 2. Followup with a columnwise take.
# We use the dummy take to discover newly-created missing values
# introduced by the reshape.
from pandas.core.reshape.concat import concat
dummy_arr = np.arange(len(series))
# fill_value=-1, since we will do a series.values.take later
result = _Unstacker(series.index, level=level).get_result(
dummy_arr, value_columns=None, fill_value=-1
)
out = []
values = extract_array(series, extract_numpy=False)
for col, indices in result.items():
out.append(
Series(
values.take(indices.values, allow_fill=True, fill_value=fill_value),
name=col,
index=result.index,
)
)
return concat(out, axis="columns", copy=False, keys=result.columns)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_codes.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns)))
codes = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(
levels=levels,
codes=codes,
names=[frame.index.name, frame.columns.name],
verify_integrity=False,
)
if frame._is_homogeneous_type:
# For homogeneous EAs, frame._values will coerce to object. So
# we concatenate instead.
dtypes = list(frame.dtypes._values)
dtype = dtypes[0]
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type(
[col._values for _, col in frame.items()]
)
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame._values.ravel()
else:
# non-homogeneous
new_values = frame._values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError(
"level should contain all level names or all level "
"numbers, not a mixture of the two."
)
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(
zip(
*[
lev.take(level_codes)
for lev, level_codes in zip(
this.columns.levels[:-1], this.columns.codes[:-1]
)
]
)
)
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = this.columns.levels[0]._shallow_copy(name=this.columns.names[0])
unique_groups = new_columns
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_codes = sorted(set(this.columns.codes[-1]))
level_vals_used = level_vals[level_codes]
levsize = len(level_codes)
drop_cols = []
for key in unique_groups:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.codes[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_homogeneous_type and is_extension_array_dtype(
frame.dtypes.iloc[0]
):
dtype = this[this.columns[loc]].dtypes.iloc[0]
subset = this[this.columns[loc]]
value_slice = dtype.construct_array_type()._concat_same_type(
[x._values for _, x in subset.items()]
)
N, K = this.shape
idx = np.arange(N * K).reshape(K, N).T.ravel()
value_slice = value_slice.take(idx)
elif frame._is_mixed_type:
value_slice = this[this.columns[loc]].values
else:
value_slice = this.values[:, loc]
if value_slice.ndim > 1:
# i.e. not extension
value_slice = value_slice.ravel()
new_data[key] = value_slice
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_codes = [lab.repeat(levsize) for lab in this.index.codes]
else:
old_codes, old_levels = factorize_from_iterable(this.index)
new_levels = [old_levels]
new_codes = [old_codes.repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_codes.append(np.tile(level_codes, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how="all")
return result
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=None,
) -> "DataFrame":
"""
Convert categorical variable into dummy/indicator variables.
Parameters
----------
data : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
Dummy-coded data.
See Also
--------
Series.str.get_dummies : Convert Series to dummy codes.
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas.core.reshape.concat import concat
dtypes_to_encode = ["object", "category"]
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(include=dtypes_to_encode)
elif not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = (
f"Length of '{name}' ({len(item)}) did not match the "
"length of the columns being encoded "
f"({data_to_encode.shape[1]})."
)
raise ValueError(len_msg)
check_len(prefix, "prefix")
check_len(prefix_sep, "prefix_sep")
if isinstance(prefix, str):
prefix = itertools.cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, str):
prefix_sep = itertools.cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
with_dummies: List[DataFrame]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(
col[1],
prefix=pre,
prefix_sep=sep,
dummy_na=dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(
data,
prefix,
prefix_sep,
dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
return result
def _get_dummies_1d(
data,
prefix,
prefix_sep="_",
dummy_na=False,
sparse=False,
drop_first=False,
dtype=None,
):
from pandas.core.reshape.concat import concat
# Series avoids inconsistent NaN handling
codes, levels = factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
dtype = np.dtype(dtype)
if | is_object_dtype(dtype) | pandas.core.dtypes.common.is_object_dtype |
### Old
import csv
import pandas as pd
import numpy as np
import zipfile
import os
from datetime import datetime, timedelta
import pickle
import gzip
import time
import timeit
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
def find_between_r( s, first, last ):
try:
start = s.rindex( first ) + len( first )
end = s.rindex( last, start )
return s[start:end]
except ValueError:
return ""
def reputation_calc_p1(new_subset,first_occurance):
### This can be optimized; no need for sorted merge.
new_subset = new_subset.sort_values(['To'], ascending=[True])
new_subset = new_subset.reset_index()
del(new_subset['level_0'])
new_array = new_subset[['From','To','Amount']].values
dates_array = np.array(new_subset['Timestamp'])
### Now we will just store the first occurance of each account in a dictionary (first_occurance)
### The easiest (and in pandas probably the fastest) way would be to create sorted dataframe and then iterate
### and check if we already have certain record. If no, create, if yes, it was created before, so pass.
### When it is created we also store date of creation.
sorted_merge = new_subset.sort_values(['Timestamp'], ascending=[True])
sorted_merge = sorted_merge.reset_index()
### Time to do some refinements. Let's get rid of Pandas dataframe and save it to something else.
### Let us sort the dataset alphabetically by "To". This can fasten up the algo later on...
new_array = new_subset[['From','To','Amount']].values
dates_array = np.array(sorted_merge['Timestamp'])
to_array = np.array(new_subset['To'].values)
#del(new_subset)
i = 0
while i<len(sorted_merge):
### if it is alredy there, pass, otherwise create new key.
if new_array[i][0] in first_occurance:
first_occurance[new_array[i][0]] += 1
else:
first_occurance[new_array[i][0]] = 0
### So, we store date from "From" column, but we should do the same from "To" column. This means that we are really just
### looking for first occurance.
if new_array[i][1] in first_occurance:
first_occurance[new_array[i][0]] += 1
else:
first_occurance[new_array[i][1]] = 0
### Just in case, we post the progress, so we have an idea when we will finish.
i+=1
del(sorted_merge)
return(new_array,dates_array,to_array,first_occurance)
### Here we update add new records to our dictionary if new reputation is detected. We assume that we start with value of 0.1,
### but this could be adjusted.
def update_reputation(reputation,new_array):
i = 0
while i<len(new_array):
if new_array[i][0] in reputation:
pass
else:
reputation[new_array[i][0]] = 0.1
if new_array[i][1] in reputation:
pass
else:
reputation[new_array[i][1]] = 0.1
i+=1
return(reputation)
def calculate_new_reputation(new_array,to_array,reputation):
### This is needed; we calculate reputations in our timeframe in this specific function.
mys = {}
i = 0
while i<len(new_array):
if new_array[i][1] in mys:
pass
else:
mys[new_array[i][1]] = 0
i+=1
### We use formula where we make sure the growth of reputation won't be too high.
unique_ids = np.unique(to_array)
k=0
i = 0
while i<len(unique_ids):
amounts = []
while unique_ids[i]==to_array[k]:
amounts.append(np.log10(1+new_array[k][2])* reputation[new_array[k][0]])
if k==len(to_array)-1:
break
k+=1
mys[unique_ids[i]] = sum(amounts)
i+=1
### nr 5. in paper on how to calculate the reputation.
for k in mys.keys():
if mys[k]<0:
mys[k] = -np.log10(1 - mys[k])
else:
mys[k] = np.log10(1 + mys[k])
### Nr 6;
max_value = max(mys.values())
for k in mys.keys():
mys[k] = mys[k] /max_value
return(mys)
### Below we have 3 different proposals on how to update reputations. They mainly differ in the speed of decay. Only
### one is generally used when calculating the updates.
def update_reputation_approach_a(reputation,mys,since,date_zero,our_date):
### Define times.
since_datezero = since - date_zero
date_since = our_date - since
since_datezero = since_datezero.days
date_since = date_since.days
date_datezero = our_date - date_zero
date_datezero = date_datezero.days
### 1st approach or a);
for k in reputation.keys():
if k in mys.keys():
reputation[k] = reputation[k] * since_datezero + mys[k] * date_since
reputation[k] = reputation[k]/date_datezero
else:
reputation[k] = (reputation[k] * since_datezero)/date_datezero
return(reputation)
def update_reputation_approach_b(reputation,mys):
### 2nd approach or b)
for k in reputation.keys():
if k in mys.keys():
reputation[k] = reputation[k] + mys[k]
reputation[k] = reputation[k]/2
else:
reputation[k] = reputation[k]/2
return(reputation)
def update_reputation_approach_c(first_occurance,reputation,mys,since,our_date):
### 3rd approach or c)
date_since = (our_date - since).days
print("Here we are")
j = 0
all_keys = set(mys.keys())
for k in reputation.keys():
since_datebirth = first_occurance[k]
date_datebirth = since_datebirth + date_since
if date_datebirth==0:
date_datebirth = 1
if k in all_keys:
reputation[k] = reputation[k] * since_datebirth + mys[k] * date_since
reputation[k] = reputation[k]/date_datebirth
else:
reputation[k] = (reputation[k] * since_datebirth)/date_datebirth
j+=1
return(reputation)
### Save zipped file
def save_zipped_pickle(obj, filename, protocol=-1):
with gzip.open(filename, 'wb') as f:
pickle.dump(obj, f, protocol)
### Load zipped file.
def load_zipped_pickle(filename):
with gzip.open(filename, 'rb') as f:
loaded_object = pickle.load(f)
return loaded_object
### Main script.
def run_script():
### First we just define the dictionaries.
first_occurance = dict()
reputation = dict()
previous_reputation = dict()
### Start of our period
date_zero = datetime(2015, 7, 30, 0, 0)
folder="data"
start = timeit.timeit()
### Get each filename (sort them first) and read it.
for filename in np.sort(os.listdir(folder)):
filepath = os.path.join(folder, filename)
print(filename)
daily_data = []
mydate = find_between_r(filename,"ethereum_",".tsv")
### Get dates. Since means what was day before because we update ranking every day.
our_date = datetime.strptime(mydate, '%Y-%m-%d')
since = our_date - timedelta(days=1)
with open(filepath) as f:
lines = [x for x in f.readlines()]
for line in lines:
line = line.replace("\t",",")
daily_data.append(line)
i=0
while i < len(daily_data):
daily_data[i] = daily_data[i].split(',')
i+=1
### All data is now in daily_data. We add columns to the file.
daily_data = | pd.DataFrame(daily_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
from unittest import TestCase
import h5py
import numpy as np
import pandas as pd
from exatomic import Universe
from exatomic.base import resource
from exatomic.molcas.output import Output, Orb, HDF
# TODO : change df.shape[0] == num to len(df.index) == num everywhere
class TestOutput(TestCase):
"""Test the Molcas output file editor."""
def setUp(self):
self.cdz = Output(resource('mol-carbon-dz.out'))
self.uo2sp = Output(resource('mol-uo2-anomb.out'))
self.mamcart = Output(resource('mol-ch3nh2-631g.out'))
self.mamsphr = Output(resource('mol-ch3nh2-anovdzp.out'))
self.c2h6 = Output(resource('mol-c2h6-basis.out'))
def test_add_orb(self):
"""Test adding orbital file functionality."""
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'))
self.assertTrue(hasattr(self.mamcart, 'momatrix'))
self.assertTrue(hasattr(self.mamcart, 'orbital'))
with self.assertRaises(ValueError):
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'))
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'),
mocoefs='same')
self.assertTrue('same' in self.mamcart.momatrix.columns)
self.assertTrue('same' in self.mamcart.orbital.columns)
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'),
mocoefs='diff', orbocc='diffocc')
self.assertTrue('diff' in self.mamcart.momatrix.columns)
self.assertTrue('diffocc' in self.mamcart.orbital.columns)
uni = self.mamcart.to_universe()
self.assertTrue(hasattr(uni, 'momatrix'))
self.assertTrue(hasattr(uni, 'orbital'))
def test_add_overlap(self):
"""Test adding an overlap matrix."""
self.cdz.add_overlap(resource('mol-carbon-dz.overlap'))
self.assertTrue(hasattr(self.cdz, 'overlap'))
uni = self.cdz.to_universe()
self.assertTrue(hasattr(uni, 'overlap'))
def test_parse_atom(self):
"""Test the atom table parser."""
self.uo2sp.parse_atom()
self.assertEqual(self.uo2sp.atom.shape[0], 3)
self.assertTrue(np.all(pd.notnull( | pd.DataFrame(self.uo2sp.atom) | pandas.DataFrame |
"""
Module: libfmp.b.b_annotation
Author: <NAME>, <NAME>
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the FMP Notebooks (https://www.audiolabs-erlangen.de/FMP)
"""
import numpy as np
import pandas as pd
import librosa
import libfmp.b
def read_csv(fn, header=True, add_label=False):
"""Read a CSV file in table format and creates a pd.DataFrame from it, with observations in the
rows and variables in the columns.
Args:
fn (str): Filename
header (bool): Boolean (Default value = True)
add_label (bool): Add column with constant value of `add_label` (Default value = False)
Returns:
df (pd.DataFrame): Pandas DataFrame
"""
df = pd.read_csv(fn, sep=';', keep_default_na=False, header=0 if header else None)
if add_label:
assert 'label' not in df.columns, 'Label column must not exist if `add_label` is True'
df = df.assign(label=[add_label] * len(df.index))
return df
def write_csv(df, fn, header=True):
"""Write a pd.DataFrame to a CSV file, with observations in the rows and variables in the columns.
Args:
df (pd.DataFrame): Pandas DataFrame
fn (str): Filename
header (bool): Boolean (Default value = True)
"""
df.to_csv(fn, sep=';', index=False, quoting=2, header=header)
def cut_audio(fn_in, fn_out, start_sec, end_sec, normalize=True, write=True, Fs=22050):
"""Cut an audio file using specificed start and end time positions and writes the result to a new audio file.
Args:
fn_in (str): Filename and path for input audio file
fn_out (str): Filename and path for input audio file
start_sec (float): Start time position (in seconds) of cut
end_sec (float): End time position (in seconds) of cut
normalize (bool): If True, then normalize audio (with max norm) (Default value = True)
write (bool): If True, then write audio (Default value = True)
Fs (scalar): Sampling rate of audio (Default value = 22050)
Returns:
x_cut (np.ndarray): Cut audio
"""
x_cut, Fs = librosa.load(fn_in, sr=Fs, offset=start_sec, duration=end_sec-start_sec)
if normalize is True:
x_cut = x_cut / np.max(np.abs(x_cut))
if write is True:
libfmp.b.write_audio(fn_out, x_cut, Fs)
return x_cut
def cut_csv_file(fn_in, fn_out, start_sec, end_sec, write=True):
"""Cut a annotation CSV file (where each row corresponds to the four variables ``start``, ``end``, ``pitch``,
and ``label``) using specificed start and end time positions and writes the result to a new CSV file.
Args:
fn_in (str): Filename and path for input audio file
fn_out (str): Filename and path for input audio file
start_sec (float): Start time position (in seconds) of cut
end_sec (float): End time position (in seconds) of cut
write (bool): If True, then write csv file (Default value = True)
Returns:
ann_cut (list): Cut annotation file
"""
df = | pd.read_csv(fn_in, sep=',', keep_default_na=False, header=None) | pandas.read_csv |
import datetime as dt
import matplotlib.pyplot as plt
import lifetimes
import numpy as np
import os
import pandas as pd
import seaborn as sns
def numcard(x):
return x.nunique(), len(x)
def todateclean(x):
return | pd.to_datetime(x, errors='coerce') | pandas.to_datetime |
"""
PL Modeling Program
-> Python file 1/3: InteractivePLFittingGUI.py (implements the GUI)
Python file 2/3: PLModeling.py (implements the PL emission models)
Python file 3/3: InterferenceFunction.py (implements the interference function models)
Author: <NAME>
Date: March 2021
"""
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy import signal
import scipy.constants as const
from InterferenceFunction import IF
from PLModeling import PLModel
from pathlib import Path
from itertools import compress
from os import path
import sys
import configparser
import json
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import tkinter as tk
from tkinter import ttk
import tkinter.font as tkFont
from tkinter.filedialog import askopenfilename, asksaveasfilename
from PIL import Image, ImageTk
# Define constant
h_eV = const.value('Planck constant in eV/Hz')
# Set proper path for files when executable is run
path_to_file = getattr(sys, '_MEIPASS', path.abspath(path.dirname(__file__)))
class Parameter:
"""Class that defines a model parameter."""
def __init__(self, value, bounds, s_label, s_unit, s_scale, s_prec):
self.value = value
self.bounds = bounds
self.s_label = s_label
self.s_unit = s_unit
self.s_scale = s_scale
self.s_prec = s_prec
class Model:
"""Class that defines a model."""
def __init__(self, model_class, parameters, x_var, label, active_model):
self.model_class = model_class
self.parameters = parameters
self.xunit = x_var
self.label = label
self.active_model = active_model
class Slider(ttk.Scale):
"""
ttk.Scale sublass that limits the precision of values. Taken from:
'https://stackoverflow.com/questions/54186639/tkinter-control-ttk-scales-
increment-as-with-tk-scale-and-a-tk-doublevar'
"""
def __init__(self, *args, **kwargs):
self.precision = kwargs.pop('precision') # Remove non-std kwarg.
self.chain = kwargs.pop('command', lambda *a: None) # Save if present.
super(Slider, self).__init__(*args, command=self._value_changed, **kwargs)
def _value_changed(self, newvalue):
if self.precision == 0:
newvalue = int(round(float(newvalue), 0))
else:
newvalue = round(float(newvalue), self.precision)
self.winfo_toplevel().globalsetvar(self.cget('variable'), (newvalue))
self.chain(newvalue) # Call user specified function.
class NotebookTabs:
"""Class that manages the tabs in the PL modeling program."""
ebox_width = 5
def __init__(self, window):
self.window = window
self.font_title = tkFont.Font(weight='bold', size=18)
self.font_subtitle = tkFont.Font(weight='bold', size=16)
self.font_legend = tkFont.Font(weight='bold', size=11)
self.font_label = tkFont.Font(size=11)
# Config file
self.opt_data = None
self.config = configparser.ConfigParser()
self.config.optionxform = str
self.config.read(path.join(path_to_file, 'files', 'config_default.ini'))
self.c_data = self.fetchConfigData()
# Create tabs
self.tab_control = ttk.Notebook(self.window)
# Main tab
self.models = self.createIFPLModels(np.array(1e-6), np.array(1))
self.objects_main = {'IF': {}, 'PL': {}}
self.tree = {'frame': None, 'tree': None}
self.main = self.createMainTab()
self.tabs = {'main': self.main['frame'], 'data': {}}
# Update IF tab when selected
self.tab_control.bind("<<NotebookTabChanged>>", self.updateSummary)
# Choose model
self.updateModel('IF')
self.updateModel('PL')
# Place tabs
self.tab_control.add(self.main['frame'], text='Main')
self.tab_control.pack(expand=1, fill='both')
def updateSummary(self, event):
sel_tab = event.widget.select()
tab_name = event.widget.tab(sel_tab, "text")
if tab_name == 'IF analysis':
self.tabs['summary']['GUI'].updateGraph()
def createMainTab(self):
# Create tab
tab_main = ttk.Frame(self.tab_control)
tab_main.rowconfigure(2, weight=1)
tab_main.columnconfigure(1, weight=1)
# Title
frm_title = ttk.Frame(tab_main)
(tk.Label(master=frm_title, text='PL Modeling Program', font=self.font_title)
.grid(row=0, column=1, columnspan=4, sticky='news', pady=5))
# Logo
image = Image.open(path.join(path_to_file, 'files', "logo.ico")).resize((80, 80))
photo = ImageTk.PhotoImage(image)
lbl = tk.Label(master=frm_title, image=photo)
lbl.image = photo
lbl.grid(row=0, column=0, rowspan=3, padx=10, pady=10, sticky='w')
# Open file button
btn_open = tk.Button(master=frm_title, text='Open data', font=self.font_label, command=self.openFile)
btn_open.grid(row=1, column=1, columnspan=1, pady=5, padx=5, sticky='w')
# Unit
frm_import_unit = ttk.Frame(frm_title)
(tk.Label(master=frm_import_unit, text='Unit:', font=self.font_legend)
.grid(row=0, column=0, sticky='w'))
c_box_import = ttk.Combobox(frm_import_unit, values=['nm', 'μm', 'eV'], state='readonly', width=5)
c_box_import.current(0)
c_box_import.grid(row=0, column=1, sticky='w')
frm_import_unit.grid(row=2, column=1, sticky='w', pady=5, padx=5)
# Close all tabs
btn_close_all = tk.Button(master=frm_title, text='Close tabs', font=self.font_label, command=self.closeTabs)
btn_close_all.grid(row=1, column=2, sticky='w', pady=2, padx=5)
# Exit program
btn_exit = tk.Button(master=frm_title, text='Exit program', font=self.font_label, command=self.exitProgram)
btn_exit.grid(row=2, column=2, sticky='w', pady=2, padx=5)
# Extra space
frm_title.columnconfigure(1, minsize=150)
frm_title.columnconfigure(2, minsize=150)
frm_title.columnconfigure(3, minsize=150)
# Config file
frm_config = ttk.Frame(frm_title)
btn_config = tk.Button(master=frm_config, text='Load config file', font=self.font_label, command=self.loadConfig)
btn_config.grid(row=0, column=0, pady=5)
# Labels
(tk.Label(master=frm_config, text='File:', font=self.font_legend)
.grid(row=0, column=1, sticky='w'))
file_var = tk.StringVar()
file_var.set('default')
(tk.Label(master=frm_config, textvariable=file_var, font=self.font_label)
.grid(row=0, column=2, sticky='w'))
frm_config.grid(row=1, column=3, sticky='w', padx=5, pady=5)
# Open IF tab
btn_IFtab = tk.Button(master=frm_title, text='Open IF analysis tab', font=self.font_label, command=self.openIFTab)
btn_IFtab.grid(row=2, column=3, sticky='w', pady=2, padx=5)
# Fit all
frm_fit_params = ttk.Frame(tab_main)
frm_lbl_fit = ttk.Frame(frm_fit_params)
(tk.Label(master=frm_lbl_fit, text='Fit all data', font=self.font_subtitle)
.grid(row=0, column=0, padx=5))
image = Image.open(path.join(path_to_file, 'files', "help.png")).resize((20,20))
help_image = ImageTk.PhotoImage(image)
btn_info = tk.Button(master=frm_lbl_fit, image=help_image, font=self.font_label, command=self.showInfo)
btn_info.image = help_image
btn_info.grid(row=0, column=1, pady=5, padx=5)
# Fit unit
frm_fit_unit = ttk.Frame(frm_fit_params)
(tk.Label(master=frm_fit_unit, text='x-axis variable:', font=self.font_legend)
.grid(row=0, column=0, sticky='w'))
c_box_fit = ttk.Combobox(frm_fit_unit, values=['wavelength', 'energy'], state='readonly', width=12)
c_box_fit.bind('<<ComboboxSelected>>', lambda x:self.createTree())
c_box_fit.current(0)
c_box_fit.grid(row=0, column=1, sticky='w')
frm_fit_unit.grid(row=1, column=0, columnspan=2, sticky='ns', pady=5, padx=5)
frm_lbl_fit.grid(row=0, column=0, columnspan=2, sticky='w')
# Buttons
btn_fitPL_all = tk.Button(master=frm_fit_params, text='Fit PL', font=self.font_label, command=self.fitPL_All)
btn_fitPL_all.grid(row=2, column=0, sticky='news', pady=5, padx=5)
btn_fitIFPL_all = tk.Button(master=frm_fit_params, text='Fit IF & PL', font=self.font_label, command=self.fitIF_PL_All)
btn_fitIFPL_all.grid(row=2, column=1, sticky='news', pady=5, padx=5)
# Comboboxes
tk.Label(master=frm_fit_params, text='Fit parameters', font=self.font_legend).grid(row=3, column=0, columnspan=1, sticky='news')
var_select_all = tk.BooleanVar()
ttk.Checkbutton(master=frm_fit_params, variable=var_select_all, command=self.selectAllFit, text='Select all').grid(row=3, column=1)
self.objects_main['var_sel_all'] = var_select_all
self.objects_main['IF']['cbox'] = self.createCombobox(frm_fit_params, 'IF', initial='off')
self.objects_main['PL']['cbox'] = self.createCombobox(frm_fit_params, 'PL', initial='BGF')
tk.Label(master=frm_fit_params, text='IF model', font=self.font_label).grid(row=4, column=0, sticky='news')
tk.Label(master=frm_fit_params, text='PL model', font=self.font_label).grid(row=4, column=1, sticky='news')
self.objects_main['IF']['cbox'].grid(row=5, column=0, sticky='w', padx=5)
self.objects_main['PL']['cbox'].grid(row=5, column=1, sticky='w', padx=5)
# Fixed param
frm_fixed_IF = ttk.Frame(frm_fit_params)
frm_fixed_PL = ttk.Frame(frm_fit_params)
self.createFitFixed(frm_fixed_IF, 'IF')
self.createFitFixed(frm_fixed_PL, 'PL')
frm_fixed_IF.grid(row=6, column=0, sticky='n')
frm_fixed_PL.grid(row=6, column=1, sticky='n')
# Tree with fit details
self.tree['frame'] = ttk.Frame(tab_main)
self.tree['frame'].columnconfigure(0, minsize=150, weight=1)
self.tree['frame'].columnconfigure(1, minsize=150)
self.tree['frame'].rowconfigure(1, weight=1)
(tk.Label(master=self.tree['frame'], text='Fit results', font=self.font_subtitle)
.grid(row=0, column=0, columnspan=2, sticky='w'))
self.createTree()
# Save fit results button
btn_save = tk.Button(master=self.tree['frame'], text='Save fit results', font=self.font_label, command=self.saveFitsData)
btn_save.grid(row=2, column=0, sticky='w', pady=5)
btn_export_plots = tk.Button(master=self.tree['frame'], text='Export all plot data', font=self.font_label, command=self.exportPlotsData)
btn_export_plots.grid(row=2, column=1, sticky='w', pady=5)
# Place frames
frm_title.grid(row=0, column=0, columnspan=2, sticky='ns')
ttk.Separator(master=tab_main, orient=tk.HORIZONTAL).grid(row=1, column=0, columnspan=2, pady=10, sticky='news')
frm_fit_params.grid(row=2, column=0, sticky='n')
self.tree['frame'].grid(row=2, column=1, sticky='news')
return {'frame': tab_main, 'btn_open': btn_open, 'file_var': file_var,
'btn_config': btn_config, 'btn_fitPL': btn_fitPL_all,
'btn_fitIF_PL': btn_fitIFPL_all, 'c_box_import': c_box_import,
'c_box_fit': c_box_fit}
def showInfo(self):
message = ('Model info:\n'
'IF model: Interference Function model [1].\n'
'PL model: Photoluminescence model.\n'
' BGF: band gap fluctuations [2].\n'
' EF: electrostatic fluctuations [3].\n'
' UPF: unified potential fluctuations (BGF + EF) [4].\n\n'
'References:\n[1] {}\n[2] {}\n[3] {}\n[4] {}').format('Journal of Applied Physics 118, 035307 (2015); doi: 10.1063/1.4926857',
'Journal of Applied Physics 101, 113519 (2007); doi: 10.1063/1.2721768',
'Journal of Applied Physics 116, 173504 (2014); doi: 10.1063/1.4898346',
'Check repository README.')
tk.messagebox.showinfo('About', message)
def exportPlotsData(self):
filepath = asksaveasfilename(defaultextension='csv', filetypes=[("CSV", "*.csv"), ("All Files", "*.*")])
if not filepath:
return
graph_data = {}
for idx, tab in enumerate(self.tabs['data']):
x_var = self.main['c_box_fit'].get()
if x_var == 'wavelength':
self.tabs['data'][tab]['GUI'].objects['options']['r_btn_wave'].invoke()
elif x_var == 'energy':
self.tabs['data'][tab]['GUI'].objects['options']['r_btn_energy'].invoke()
if idx == 0:
graph_data = {**graph_data, **{self.tabs['data'][tab]['GUI'].axes[1].get_xlabel(): self.tabs['data'][tab]['GUI'].graphs[0].get_xdata()}}
graph_data = {**graph_data, **self.tabs['data'][tab]['GUI'].getPlotData()}
graph_data = pd.DataFrame(graph_data)
try:
graph_data.to_csv(filepath, index=False, encoding="utf-8-sig")
message = 'Plot data saved to {}.'.format(filepath)
tk.messagebox.showinfo('Plot data saved successfully', message)
except:
message = 'Not able to save data to {}. The file might be open.'.format(filepath)
tk.messagebox.showerror('Save error', message)
def loadConfig(self):
filepath = askopenfilename()
if not filepath:
return
filename = path.basename(filepath)
config_sec = {'IF': ['nk_dir', 'thickness', 'wave_laser', 'R_rms',
'k', 'delta', 'mu', 'sigma'],
'BGF': ['E_g', 'beta', 'sigma_g', 'T'],
'EF': ['E_g', 'theta', 'gamma', 'delta_mu', 'T', 'a_0d'],
'UPF': ['E_g', 'beta', 'sigma_g', 'theta', 'gamma', 'T',
'a_0d']}
# Update config file
try:
self.config = configparser.ConfigParser()
self.config.optionxform = str
self.config.read(filepath)
if not all([self.config.has_option(sec, key) for sec in config_sec for key in config_sec[sec]]):
raise KeyError()
self.c_data = self.fetchConfigData()
self.main['file_var'].set(filename)
except KeyError:
message = 'Error in configuration file. Please check that the required sections and values are present.'
tk.messagebox.showerror('File format error', message)
except ValueError:
message = 'Error in configuration file. Please check that parameter values are within bounds and try again.'
tk.messagebox.showerror('Value error', message)
except:
message = 'Error in configuration file. Please check file format and try again.'
tk.messagebox.showerror('File format error', message)
def fetchConfigData(self):
config = {}
for sec in self.config.items():
params = {}
for it in self.config.items(sec[0]):
if it[0] == 'nk_dir':
if it[1] == 'default':
pass
else:
nk_dir = Path(it[1])
nk_air = pd.read_csv(path.join(nk_dir, 'files', "nk_air.csv"))
nk_CIGS = pd.read_csv(path.join(nk_dir, 'files', "nk_CIGS.csv"))
nk_Mo = pd.read_csv(path.join(nk_dir, 'files', "nk_Mo.csv"))
self.opt_data = {'air': nk_air, 'CIGS': nk_CIGS,
'Mo': nk_Mo}
else:
data = json.loads(it[1])
if type(data) is list:
if not(data[0] <= data[1] and data[1] <= data[2]):
raise ValueError
params[it[0]] = json.loads(it[1])
config[sec[0]] = params
return config
def closeTabs(self):
# close tabs
for tab in range(1, self.tab_control.index('end')):
self.tab_control.forget(1)
# delete data
try:
self.tabs['data'] = {}
del self.tabs['summary']
except:
pass
self.window.title('PL Modeling Program')
self.createTree()
self.main['btn_open'].config(state='normal')
self.main['btn_config'].config(state='normal')
def exitProgram(self):
message = 'Are you sure you want to exit the program?'
answer = tk.messagebox.askyesnocancel('Exit program', message)
if answer is True:
self.window.destroy()
else:
return
def openIFTab(self):
if 'summary' in self.tabs:
self.tab_control.add(self.tabs['summary']['frame'], text='IF analysis')
self.tab_control.pack(expand=1, fill='both')
def saveFitsData(self):
output_dict = {}
for idx, col in enumerate([self.tree['tree'].heading('#0')['text']]+list(self.tree['tree']['columns'])):
output_list = []
for child in self.tree['tree'].get_children():
if idx == 0:
output_list.append(self.tree['tree'].item(child, option='text'))
else:
output_list.append(self.tree['tree'].item(child, option='values')[idx-1])
output_dict[col] = output_list
output = | pd.DataFrame(output_dict) | pandas.DataFrame |
#!/usr/bin python3
import pandas as pd
import numpy as np
import datetime as datetime
def changeDate():
base_CSV = | pd.read_csv('../dataset/lpv_d.csv') | pandas.read_csv |
import os
import glob
import datetime as dt
import pandas as pd
from forest import geo
import bokeh.models
class View(object):
def __init__(self, loader):
self.loader = loader
self.source = bokeh.models.ColumnDataSource({
"x": [],
"y": [],
"date": [],
"longitude": [],
"latitude": [],
"flash_type": []
})
def render(self, valid_date):
frame = self.loader.load_date(valid_date)
x, y = geo.web_mercator(
frame.longitude,
frame.latitude)
self.source.data = {
"x": x,
"y": y,
"date": frame.date,
"longitude": frame.longitude,
"latitude": frame.latitude,
"flash_type": frame.flash_type,
}
def add_figure(self, figure):
renderer = figure.circle(
x="x",
y="y",
size=10,
source=self.source)
tool = bokeh.models.HoverTool(
tooltips=[
('Time', '@date{%F}'),
('Lon', '@longitude'),
('Lat', '@latitude'),
('Flash type', '@flash_type')],
formatters={
'date': 'datetime'
},
renderers=[renderer])
figure.add_tools(tool)
return renderer
class Loader(object):
def __init__(self, paths):
self.paths = paths
if len(self.paths) > 0:
self.frame = self.read(paths)
@classmethod
def pattern(cls, text):
return cls(list(sorted(glob.glob(os.path.expanduser(text)))))
def load_date(self, date):
frame = self.frame.set_index('date')
start = date
end = start + dt.timedelta(minutes=15)
s = "{:%Y-%m-%dT%H:%M}".format(start)
e = "{:%Y-%m-%dT%H:%M}".format(end)
small_frame = frame[s:e].copy()
return small_frame.reset_index()
@staticmethod
def read(csv_files):
if isinstance(csv_files, str):
csv_files = [csv_files]
frames = []
for csv_file in csv_files:
frame = pd.read_csv(
csv_file,
parse_dates=[1],
converters={0: Loader.flash_type},
usecols=[0, 1, 2, 3],
names=["flash_type", "date", "latitude", "longitude"],
header=None)
frames.append(frame)
if len(frames) == 0:
return None
else:
return | pd.concat(frames, ignore_index=True) | pandas.concat |
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import SGDClassifier
import argparse
rate = "0.5" # 默认为6:4的正负样本比例,若要改为1:1则取rate=“0.5”
class SGD:
def __init__(self, trainfile, validfile, testfile):
super(SGD, self).__init__()
train: pd.DataFrame = | pd.read_csv(trainfile) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
| tm.assert_frame_equal(result, exp) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import os
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
cwd = os.getcwd()
print(cwd)
df = | pd.read_csv(cwd + '/' + 'HFI2021.csv') | pandas.read_csv |
"""Functions for plotting system resource use."""
import logging
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional, Union
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import gridspec
async def _read_resource_files(data_location: Path, start_time: datetime, end_time: Optional[datetime] = None,
processes_to_treat_as_root: Optional[Union[tuple, list]] = None):
"""Function for automatically checking the data location and reading in requisite files."""
if end_time is not None:
if end_time <= start_time:
raise ValueError("end time may not be equal to or sooner than the start time!")
all_files = np.sort(list(data_location.glob("*_server_usage.csv")))
if len(all_files) == 0:
raise FileNotFoundError("no valid files to read were found!")
# Convert the times in the filenames into datetime objects
time_strings = [x.stem.rsplit("_")[0] for x in all_files]
time_objects = np.asarray([datetime.strptime(x, "%Y-%m-%d") for x in time_strings])
# Get midnight on the start time (+ end time) and see which files are valid
start_time_midnight = datetime(start_time.year, start_time.month, start_time.day)
valid_files = time_objects >= start_time_midnight
if end_time is not None:
end_time_midnight = datetime(end_time.year, end_time.month, end_time.day) + timedelta(days=1)
valid_files = np.logical_and(valid_files, time_objects <= end_time_midnight)
# Read in all valid files
dataframe = []
for a_file in np.asarray(all_files)[valid_files]:
a_dataframe = pd.read_csv(a_file)
dataframe.append(a_dataframe)
dataframe = pd.concat(dataframe, ignore_index=True)
dataframe['time'] = pd.to_datetime(dataframe['time'])
# Restrict measurements to just being the ones absolutely within the time range
valid_measurements = dataframe['time'] >= start_time
if end_time is not None:
valid_measurements = np.logical_and(dataframe['time'] <= end_time, valid_measurements)
dataframe = dataframe.loc[valid_measurements].reset_index(drop=True)
# Fold some processes as being root processes if necessary
if processes_to_treat_as_root is not None:
to_replace = np.isin(dataframe['username'], processes_to_treat_as_root)
dataframe['username'] = np.where(to_replace, "root", dataframe['username'])
# Re-combine all of the root processes that are with the same username & time
dataframe = (dataframe
.groupby(["username", "time"])
.agg({"cpu_percent": "sum", "memory": "sum", "threads": sum})
.reset_index())
return dataframe
async def plot_resource_use(data_location: Path, output_location: Path,
start_time: Union[datetime, timedelta], end_time: Optional[datetime] = None,
aggregation_level: Optional[str] = None,
default_tick_format_string: str = "%Y.%m.%d %H:%M",
tick_format_string_overwrite: Optional[str] = None, dpi=300,
processes_to_treat_as_root: Optional[Union[tuple, list]] = None,
minimum_resources_to_plot: Union[tuple, list] = (0.0, 0.0),
memory_aggregation_mode="mean", cpu_aggregation_mode="mean"):
"""Function for plotting resource usage in a certain timeframe and dumping this information to a file."""
# Process start_time if it's a timedelta
if isinstance(start_time, timedelta):
start_time = datetime.now() - start_time
logging.debug(f" plot is within range\n start time: {start_time}\n end time: {end_time}")
# Read in the data
logging.debug("fetching files")
dataframe = await _read_resource_files(data_location, start_time, end_time=end_time,
processes_to_treat_as_root=processes_to_treat_as_root)
# Firstly, contemplate dropping users if they never cross the minimum resource usage threshold
if minimum_resources_to_plot[0] > 0 and minimum_resources_to_plot[1] > 0:
# Aggregate all datapoints by user & maximum
dataframe_by_user = dataframe.groupby("username").agg({"cpu_percent": "max", "memory": "max"}).reset_index()
# Remove users whose max datapoint isn't above the minimum allowed value
good_users = np.logical_or(dataframe_by_user['cpu_percent'] > minimum_resources_to_plot[0],
dataframe_by_user['memory'] > minimum_resources_to_plot[1])
users_to_keep = dataframe_by_user.loc[good_users, 'username'].to_numpy()
dataframe = dataframe.loc[np.isin(dataframe['username'], users_to_keep)].reset_index(drop=True)
# Make a dataframe grouped by time - this is the total usage at every sampled step
logging.debug("manipulating dataframe and plotting")
dataframe_by_time = (dataframe
.groupby("time")
.agg({"cpu_percent": "sum", "memory": "sum", "threads": "sum"})
.reset_index())
# Also do some other bits of setup
unique_users = np.unique(dataframe["username"])
unique_times = dataframe_by_time["time"].copy()
# Add implied missing values and make a per-user dataframe for each
user_dataframes = {}
total_usage = {}
for a_user in unique_users:
# Make the user dataframe
user_dataframes[a_user] = dataframe.loc[dataframe["username"] == a_user].reset_index(drop=True)
# Add missing times onto the user dataframe by concatenating with a dataframe with an empty value for every
# missing time
time_is_missing = np.isin(unique_times, user_dataframes[a_user]["time"], invert=True)
missing_times_df = pd.DataFrame(
{"username": a_user, "cpu_percent": 0.0, "memory": 0.0, "threads": 0,
"time": unique_times[time_is_missing]})
user_dataframes[a_user] = (
| pd.concat([user_dataframes[a_user], missing_times_df], ignore_index=True) | pandas.concat |
import pandas as pd
import math
import numpy as np
def matchCheck(colNo, weightl, impactl):
# print(colNo)
# print(weightl)
# print(impactl)
if colNo != weightl or colNo != impactl or impactl != weightl:
raise Exception("Number of weights, number of impacts and number of columns (from 2nd to last columns) must be same")
def checkNumeric(df):
columns = list(df)[1:]
for col in columns:
if not pd.api.types.is_integer_dtype(df[col].dtypes) and not pd.api.types.is_float_dtype(df[col].dtypes) :
# print()
raise Exception("From 2nd to last columns must contain numeric values only.")
return True
def validateWeight(weights):
for wt in weights:
r_wt = wt.replace(".","",1)
if wt.isnumeric() == False:
raise Exception("weights must be separated by ','")
def validateImpact(impacts):
# print(impacts)
for imp in impacts:
# print(imp)
if imp!='-' and imp!='+':
raise Exception("Impacts must be either +ve or -ve")
def euclidean(series,value):
return math.sqrt(sum((series-value)**2))
def topsis(df,weights,impacts):
columns = list(df)[1:]
df_norm = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series( | pd.Categorical(idx) | pandas.Categorical |
"""Covid Model"""
__docformat__ = "numpy"
import warnings
import pandas as pd
import numpy as np
global_cases_time_series = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_"
"covid_19_time_series/time_series_covid19_confirmed_global.csv"
)
global_deaths_time_series = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_"
"covid_19_time_series/time_series_covid19_deaths_global.csv"
)
def get_global_cases(country: str) -> pd.DataFrame:
"""Get historical cases for given country
Parameters
----------
country: str
Country to search for
Returns
-------
pd.DataFrame
Dataframe of historical cases
"""
cases = | pd.read_csv(global_cases_time_series) | pandas.read_csv |
from bs4 import BeautifulSoup
import requests
import pandas as pd
import re
def ensure_string_columns(df):
newcols = []
for col in df.columns:
strcol = str(col)
if strcol[0]=="(": #if it's a tuple, instead of a string
a = strcol
b = a[a.find("("):a.find(",")]
c = b[1:]
d = c.replace("'","")
strcol = d
newcols.append(strcol)
df.columns = newcols
return df
# Takes in a pd.df, and returns a clean version of it.
'''
Clean means:
//- There's no nulls where there needs to be a thing.
- There's nothing where the first two attributes are equal (because in that case, they're all probably equal, and this whole row probably isn't mean to be interpreted by the human reader as a thing.
- column names are simple.
'''
def clean(df):
print(df)
df = ensure_string_columns(df) #might be inefficient (setting df.columns= multiple times via different function calls), but not a primary concern for now
columns = df.columns
#Remove all the citation brackets from the Wikipedia:
for col in columns:
df[col] = [re.sub(r'[\(\[].*?[\)\]]','', str(x)) for x in df[col]]
#Remove all the ones where the label is the same throughout the row:
indexes = []
for index, row in df.iterrows():
if row[columns[0]]==row[columns[1]]:
indexes.append(index)
for i in indexes:
df.drop(i, inplace=True)
return df
# Returns a pd.df of the Wiki table, in its original state
def get_original_df(url, tablename):
response = requests.get(url)
text = response.text
soup = BeautifulSoup(response.text, "html.parser")
table = soup.find('table', {'class':'wikitable'})
print(table)
listy = pd.read_html(str(table))
print(listy)
df = pd.DataFrame(listy[0])
df = df #idk the exact mechanics of why this is necessary, but it's part of the process.
print(df)
df = ensure_string_columns(df)
df = clean(df)
return df
# Returns the Wiki table, but in an Artifact-friendly format. Also, the return type is a DataFrame.
def wiki2artifactsOld(url, tablename="", topic=None):
df = get_original_df(url, tablename)
cols = df.columns
print("_________")
print(cols)
indices = [i for i, s in enumerate(cols) if (('date' in s) or ('Date' in s))]
dateindex = indices[0]
# Get range of indices that don't include 0 or dateindex:
indices = []
for i in range(len(cols)):
indices.append(i)
indices.remove(indices[dateindex])
indices.remove(indices[0])
#Make new df that's good for html
a = pd.DataFrame()
a['title'] = df[cols[0]]
a['date'] = df[cols[dateindex]]
a['description'] = ""
for i in indices:
a['description'] += cols[i]+": "+df[cols[i]]+' '
a['url'] = url
a['atopic'] = topic #idk if i'll assign topics into SQLAlchemy this way, but it's worth coding this in it seems.
return a
#----------------------------------------------------------------
# This is all that's necessary:
def wiki2artifacts(url, tableindex, topicid, datecolumnname):
tables = pd.read_html(url)
table = tables[tableindex]
df = | pd.DataFrame(table) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 30 01:01:33 2017
@author: Flamingo
"""
#%%
from bs4 import BeautifulSoup
import urllib
import pandas as pd
import numpy as np
CITY_NAME = pd.read_csv('CITY_NAME2.csv')
PORT_NAME = CITY_NAME[['AIRPORT_CODE','PORT']].groupby('AIRPORT_CODE',as_index=False).count()
for ind,value in PORT_NAME[13:14].iterrows():
print(value['AIRPORT_CODE'])
#%%
Port = []
Date =[]
Time = []
Temp = []
Bodytemp = []
Dew = []
Humidity =[]
Pressure = []
Visibility =[]
Wind_dir = []
Wind_speed = []
Gust_speed =[]
Event =[]
Condition = []
for vYear in range(2015, 2017):
if vYear == 2015:
Monthrange = np.arange(5, 13)
else:
Monthrange = np.arange(1, 13)
for vMonth in Monthrange:
for vDay in range(1, 32):
if vYear % 4 == 0:
if vMonth == 2 and vDay > 29:
break
else:
if vMonth == 2 and vDay > 28:
break
if vMonth in [4, 6, 9, 11] and vDay > 30:
break
#%%
theDate = str(vYear) + "/" + str(vMonth) + "/" + str(vDay)
theDate2 = str(vYear) + "-" + str(vMonth).zfill(2) + "-" + str(vDay).zfill(2)
print(theDate2)
theport = value['AIRPORT_CODE']
theurl = "http://www.wunderground.com/history/airport/"+ theport +"/" + theDate + "/DailyHistory.html?MR=1"
thepage = urllib.urlopen(theurl)
soup = BeautifulSoup(thepage, "html.parser")
soup_detail = soup.find_all('tr')
Row_Num = len(soup_detail)
print(Row_Num)
col_count = []
for ind in soup_detail:
col_count.append( len(ind.find_all('td') ) )
col_count = np.asarray(col_count )
# print(col_count[-1])
if col_count[-1].tolist() in [12,13]:
first_row = np.amin(np.where(col_count == col_count[-1]))
last_row = np.amax(np.where(col_count ==col_count[-1]))
Col_Num = col_count[-1]
for row_ind in np.arange(first_row,last_row+1):
soup_detail_line = soup_detail[row_ind].find_all('td')
if Col_Num == 13:
Col_indadd = 1
Bodytemp.append(soup_detail_line[2].text.split()[0])
else:
Col_indadd = 0
Bodytemp.append('-')
Port.append(theport)
Date.append(theDate2)
Time.append( soup_detail_line[0].text)
Temp.append( soup_detail_line[1].text.split()[0] )
Dew.append( soup_detail_line[2 + Col_indadd].text.split()[0])
Humidity.append(soup_detail_line[3 + Col_indadd].text.split()[0])
Pressure.append(soup_detail_line[4 + Col_indadd].text.split()[0])
Visibility.append(soup_detail_line[5+ Col_indadd].text.split()[0])
Wind_dir.append(soup_detail_line[6+ Col_indadd].text)
Wind_speed.append(soup_detail_line[7+ Col_indadd].text.split()[0])
Gust_speed.append(soup_detail_line[8+ Col_indadd].text.split()[0])
Event.append(soup_detail_line[10+ Col_indadd].text.strip() )
Condition.append(soup_detail_line[11+ Col_indadd].text)
#%%
Port = | pd.DataFrame(Port, columns=['Port']) | pandas.DataFrame |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pandas as pd
from argparse import ArgumentParser
def save_new_labels(df_labels: pd.DataFrame, filename="labels_concatenated.csv"):
df_labels.to_csv(filename, index_label="img_name")
def main(args):
first_labels_path = args.first_labels_path
second_labels_path = args.second_labels_path
df_first_labels = | pd.read_csv(first_labels_path, index_col="img_name") | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.offline import plot,iplot
from scipy.stats import norm, kurtosis
import os
from scipy.signal import butter, lfilter, freqz
from scipy import signal
from sklearn.model_selection import train_test_split
from collections import Counter
import warnings
warnings.filterwarnings(action='once')
plt.rcParams["figure.figsize"] = 16,12
def create_labels():
labels = pd.read_csv('../data/RawData/labels.txt', sep=" ", header=None)
labels.columns = ['experiment','person','activity','start','end']
return labels
def read_data():
"""Read all data to a dataframe"""
list_df = [] #a list to collect the dataframes
for i in range(1,62):
if i < 10:
i = '0' + str(i)
else:
i = str(i)
for j in os.listdir('../data/RawData/'):
if "acc_exp" + i in j:
acc_path = "../data/RawData/" + j
elif "gyro_exp" + i in j:
gyro_path = "../data/RawData/" + j
acc_df = pd.read_csv(acc_path, sep = " ", names=['acc_x','acc_y','acc_z'])
gyro_df = pd.read_csv(gyro_path, sep = " ", names=['gyro_x','gyro_y','gyro_z'])
exp_df = | pd.concat([acc_df,gyro_df],1) | pandas.concat |
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
DemandChargeReduction.py
This Python class contains methods and attributes specific for service analysis within StorageVet.
"""
from storagevet.ValueStreams.ValueStream import ValueStream
import numpy as np
import cvxpy as cvx
import pandas as pd
import sys
from storagevet.Finances import Financial
from storagevet.ErrorHandling import *
import copy
import time
SATURDAY = 5
class DemandChargeReduction(ValueStream):
""" Retail demand charge reduction. A behind the meter service.
"""
def __init__(self, params):
""" Generates the objective function, finds and creates constraints.
Args:
params (Dict): input parameters
"""
ValueStream.__init__(self, 'DCM', params)
# self.demand_rate = params['rate']
self.tariff = params['tariff']
self.billing_period = params['billing_period']
self.growth = params['growth']/100
self.billing_period_bill = pd.DataFrame()
self.monthly_bill = pd.DataFrame()
def grow_drop_data(self, years, frequency, load_growth):
""" Adds data by growing the given data OR drops any extra data that might have slipped in.
Update variable that hold timeseries data after adding growth data. These method should be called after
add_growth_data and before the optimization is run.
Args:
years (List): list of years for which analysis will occur on
frequency (str): period frequency of the timeseries data
load_growth (float): percent/ decimal value of the growth rate of loads in this simulation
This function adds billing periods to the tariff that match the given year's structure, but the values have
a growth rate applied to them. Then it lists them within self.billing_period.
"""
data_year = self.billing_period.index.year.unique()
no_data_year = {pd.Period(year) for year in years} - {pd.Period(year) for year in data_year} # which years do we not have data for
if len(no_data_year) > 0:
for yr in no_data_year:
source_year = pd.Period(max(data_year))
years = yr.year - source_year.year
first_day = '1/1/' + str(yr.year)
last_day = '1/1/' + str(yr.year + 1)
new_index = pd.date_range(start=first_day, end=last_day, freq=frequency, closed='left')
size = new_index.size
# make new tariff with charges that have increase with user-defined growth rate
add_tariff = self.tariff.reset_index()
add_tariff.loc[:, 'Value'] = self.tariff['Value'].values*(1+self.growth)**years
add_tariff.loc[:, 'Billing Period'] = self.tariff.index + self.tariff.index.max()
add_tariff = add_tariff.set_index('Billing Period', drop=True)
# Build Energy Price Vector based on the new year
temp = | pd.DataFrame(index=new_index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), | u(' bb') | pandas.compat.u |
"""
Usage:
aggregate-makespan.py -i FOLDER [--output FOLDER] [--start-run INT] [--end-run INT]
Required Options:
-i FOLDER --input FOLDER where the experiments are
Options:
-o FOLDER --output FOLDER where the output should go
[default: input]
--start-run INT only include runs starting at start-run
--end-run INT only include runs ending at and including end-run
"""
from docopt import docopt
import pandas as pd
import os
import sys
import re
from datetime import datetime,timedelta
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
args=docopt(__doc__,help=True,options_first=False)
path = args["--input"].rstrip("/")
outPath = args["--input"] if args["--output"] == "input" else args["--output"]
basePath = outPath
rawOutPath = outPath.rstrip("/") + "/raw_total_makespan.csv"
outPath = outPath.rstrip("/") + "/total_makespan.csv"
startRun=args["--start-run"] if args["--start-run"] else 1
endRun=args["--end-run"] if args["--end-run"] else False
df = pd.DataFrame()
df4 = | pd.DataFrame() | pandas.DataFrame |
import pandas as p#导入目前所需要的库并给与简称
data_train = '../homework/train.csv' #查看基本数据
data_train = p.read_csv(data_train)#导入训练模型
print(data_train.info())#查看数据类型
print(data_train.describe())#粗略查看基本数据
###导入并且查看原始数据
import matplotlib.pyplot as pt
import numpy as n
pt.rcParams['font.sans-serif']=['Simhei'] #解决中文为方块的问题
pt.rcParams['axes.unicode_minus'] = False #解决图像是负号显示为方块的问题
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
pt.subplot2grid((2,3),(0,0)) # 在一张大图里分一些小图并设定位置
data_train.Survived.value_counts().plot(kind='bar') #以生存总数为标准 设置图标种类为柱状图
pt.title("生存 (1 Survived)")
pt.ylabel("生存人数")
pt.subplot2grid((2,3),(0,1))
data_train.Pclass.value_counts().plot(kind="bar")
pt.ylabel("总人数")
pt.title("仓位")
pt.subplot2grid((2,3),(0,2))
pt.scatter(data_train.Survived, data_train.Age)
pt.ylabel("年龄")
pt.grid(b=True, which='major', axis='y')
pt.title("年龄 (1 Survived)")
pt.subplot2grid((2,3),(1,0), colspan=2)
data_train.Age[data_train.Pclass == 1].plot(kind='kde')
data_train.Age[data_train.Pclass == 2].plot(kind='kde')
data_train.Age[data_train.Pclass == 3].plot(kind='kde')
pt.xlabel("年龄")
pt.ylabel("密度")
pt.title("各等级的乘客年龄分布")
pt.legend(('头等舱', '2等舱','3等舱'),loc='best') # 设置图例
pt.subplot2grid((2,3),(1,2))
data_train.Embarked.value_counts().plot(kind='bar')
pt.title("各登船口岸上船人数")
pt.ylabel("人数")
pt.show()
#粗略的以数据可视化的形式更直观的查看原始数据
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()#将未生存总数0存入value并与仓位对应
print(Survived_0)
Survived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()
df=p.DataFrame({'生存':Survived_1, '未生存':Survived_0})
df.plot(kind='bar', stacked=False)
pt.title("仓位与生存率是否相关")
pt.xlabel("仓位")
pt.ylabel("总人数")
pt.show()
#设立假设 仓位 也就是阶级 与生存率有关与否
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()
Survived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()
df=p.DataFrame({'男性':Survived_m, '女性':Survived_f})
df.plot(kind='bar', stacked=False)
pt.title("性别与生存率是否相关")
pt.xlabel("性别")
pt.ylabel("总人数")
pt.show()
#设立假设 性别与生存率是否相关
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()
Survived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()
df=p.DataFrame({'生存':Survived_1, '未幸存':Survived_0})
df.plot(kind='bar', stacked=False)
pt.title("假设登船港口与生存率是否有关")
pt.xlabel("港口")
pt.ylabel("总人数")
pt.show()
#假设登船港口与生存率是否有关
g = data_train.groupby(['SibSp','Survived'])
df = p.DataFrame(g.count()['PassengerId'])
print(df)
g = data_train.groupby(['Parch','Survived'])
df = p.DataFrame(g.count()['PassengerId'])
print(df)
#判断是否有兄弟姐妹在船上以及是否有父母子女在船上与生存率是否有关
###设立假设 进行数据分析
### 处理空值年龄
from sklearn.ensemble import RandomForestRegressor #从sklearn库中导入随机森林
### 使用 RandomForest 填补缺失的年龄属性
def set_missing_ages(df):#定义函数
age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]#将已有并且可用特征存入age
known_age = age_df[age_df.Age.notnull()].values#将年龄根据是否为空值为判断条件 分别储存为已知和未知两种值
unknown_age = age_df[age_df.Age.isnull()].values
y = known_age[:, 0]#y为我们希望求得的空值年龄
x = known_age[:, 1:]#X为我们所给予的可用特征
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(x, y)#利用fit将x,y放入随机森林中,并设定随机森林的属性
predictedAges = rfr.predict(unknown_age[:, 1::])#用随机森林中得出的结果去预测未知年龄
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges #将得出的年龄存入空值中
return df, rfr#返回函数
def set_Cabin_type(df):#定义函数以将Cabin中是否有值当成条件判断分别设置成Yes以及No
df.loc[ (df.Cabin.notnull()), 'Cabin' ] = "Yes"
df.loc[ (df.Cabin.isnull()), 'Cabin' ] = "No"
return df#返回函数
data_train, rfr = set_missing_ages(data_train)#将预测值存入训练样本中以供使用
data_train = set_Cabin_type(data_train)#将Yes及No存入训练样本中以供使用
data_train.info()#再次查看整理过的数据
### 处理空值港口
def set_Embarked_type(df):#以填补港口空值为出发点首先进行数据转换以便使用fillna
df.loc[ (df.Embarked=='S'), 'Embarked' ] = "1"
df.loc[ (df.Embarked=='C'), 'Embarked' ] = "2"
df.loc[ (df.Embarked=='Q'), 'Embarked' ] = "3"
return df
data_train = set_Embarked_type(data_train)
data_train.Embarked = data_train.Embarked.fillna(0)
data_train.Embarked = list(map(int,data_train.Embarked))
print(data_train.Embarked.mean())
def set_Embarked_type(df):#再将已经填补完空值的列表赋值回训练样本#
df.loc[ (df.Embarked==0), 'Embarked' ] = "S"
return df
data_train = set_Embarked_type(data_train)
### 使用随机森林处理票价为0的值
def set_missing_fare(df):#定义函数
fare_df = df[['Fare','Age','Parch', 'SibSp', 'Pclass']]#将已有并且可用特征存入age
known_fare = fare_df.loc[fare_df.Fare != 0].values#将年龄根据是否为0为判断条件 分别储存为已知和未知两种值
unknown_fare = fare_df.loc[fare_df.Fare == 0].values
y1 = known_fare[:, 0]#y为我们希望求得的0票价
x1 = known_fare[:, 1:]#X为我们所给予的可用特征
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(x1, y1)#利用fit将x,y放入随机森林中,并设定随机森林的属性
predictedAges = rfr.predict(unknown_fare[:, 1::])#用随机森林中得出的结果去预测未知年龄
df.loc[ df.Fare == 0, 'Fare' ] = predictedAges #将得出的年龄存入空值中
return df, rfr#返回函数
data_train, rfr = set_missing_fare(data_train)
print(data_train.Fare.describe())
###数据处理
### 使用算法开始建模 这里使用逻辑回归
data_train.Pclass = data_train.Pclass.astype('object')
cate =p.get_dummies(data_train[['Cabin','Sex','Embarked','Pclass']])
data_new = data_train[['Survived','Age','SibSp','Parch','Fare']].join(cate) #数据的转储以及整理
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(data_new.iloc[:,1:], data_new.Survived, test_size = 0.2, random_state=34)
lr = LogisticRegression()
lr.fit(x_train,y_train)#用数据X,y来训练模型
pred = lr.predict(x_test)
from sklearn.metrics import classification_report, accuracy_score
print(classification_report(y_test,pred))#预测准确率
print(accuracy_score(y_test,pred))#分类准确率分数
#尝试使用不同算法 这里使用决策树
from sklearn.tree import *
dt = DecisionTreeClassifier(random_state=99,splitter='best', presort=True)
dt.fit(x_train,y_train)
pred = dt.predict(x_test)
from sklearn.metrics import classification_report, accuracy_score
print(classification_report(y_test,pred))
print(accuracy_score(y_test,pred))
####模型构建
data_test = p.read_csv('../homework/test.csv')#导入测试样本
def set_missing_ages(df,rfr):
age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
known_age = age_df[age_df.Age.notnull()].values
unknown_age = age_df[age_df.Age.isnull()].values
y3 = known_age[:, 0]#目标年龄
X3 = known_age[:, 1:]#特征属性值
predictedAges = rfr.predict(unknown_age[:, 1::]) # 用得到的模型进行未知年龄结果预测
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges # 用得到的预测结果填补原缺失数据
return df
data_test = set_missing_ages(data_test, rfr)
data_test = set_Cabin_type(data_test)
data_test.Pclass = data_test.Pclass.astype('object')
cate_test = | p.get_dummies(data_test[['Cabin','Sex','Embarked','Pclass']]) | pandas.get_dummies |
"""Miscellaneous internal PyJanitor helper functions."""
import fnmatch
import functools
import os
import re
import socket
import sys
import warnings
from collections.abc import Callable as dispatch_callable
from itertools import chain, combinations
from typing import (
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Tuple,
Union,
)
import numpy as np
import pandas as pd
from pandas.api.types import (
CategoricalDtype,
is_extension_array_dtype,
is_list_like,
is_scalar,
)
from pandas.core.common import apply_if_callable
from .errors import JanitorError
def check(varname: str, value, expected_types: list):
"""
One-liner syntactic sugar for checking types.
It can also check callables.
Should be used like this::
check('x', x, [int, float])
:param varname: The name of the variable (for diagnostic error message).
:param value: The value of the varname.
:param expected_types: The types we expect the item to be.
:raises TypeError: if data is not the expected type.
"""
is_expected_type: bool = False
for t in expected_types:
if t is callable:
is_expected_type = t(value)
else:
is_expected_type = isinstance(value, t)
if is_expected_type:
break
if not is_expected_type:
raise TypeError(
"{varname} should be one of {expected_types}".format(
varname=varname, expected_types=expected_types
)
)
def _clean_accounting_column(x: str) -> float:
"""
Perform the logic for the `cleaning_style == "accounting"` attribute.
This is a private function, not intended to be used outside of
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method.
:returns: An object with a cleaned column.
"""
y = x.strip()
y = y.replace(",", "")
y = y.replace(")", "")
y = y.replace("(", "-")
if y == "-":
return 0.00
return float(y)
def _currency_column_to_numeric(x, cast_non_numeric=None) -> str:
"""
Perform logic for changing cell values.
This is a private function intended to be used only in
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method, after being passed
through `partial`.
"""
acceptable_currency_characters = {
"-",
".",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"0",
}
if len(x) == 0:
return "ORIGINAL_NA"
if cast_non_numeric:
if x in cast_non_numeric.keys():
check(
"{%r: %r}" % (x, str(cast_non_numeric[x])),
cast_non_numeric[x],
[int, float],
)
return cast_non_numeric[x]
return "".join(i for i in x if i in acceptable_currency_characters)
return "".join(i for i in x if i in acceptable_currency_characters)
def _replace_empty_string_with_none(column_series):
column_series.loc[column_series == ""] = None
return column_series
def _replace_original_empty_string_with_none(column_series):
column_series.loc[column_series == "ORIGINAL_NA"] = None
return column_series
def _strip_underscores(
df: pd.DataFrame, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""
Strip underscores from DataFrames column names.
Underscores can be stripped from the beginning, end or both.
.. code-block:: python
df = _strip_underscores(df, strip_underscores='left')
:param df: The pandas DataFrame object.
:param strip_underscores: (optional) Removes the outer underscores from all
column names. Default None keeps outer underscores. Values can be
either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
and True.
:returns: A pandas DataFrame with underscores removed.
"""
df = df.rename(
columns=lambda x: _strip_underscores_func(x, strip_underscores)
)
return df
def _strip_underscores_func(
col: str, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""Strip underscores from a string."""
underscore_options = [None, "left", "right", "both", "l", "r", True]
if strip_underscores not in underscore_options:
raise JanitorError(
f"strip_underscores must be one of: {underscore_options}"
)
if strip_underscores in ["left", "l"]:
col = col.lstrip("_")
elif strip_underscores in ["right", "r"]:
col = col.rstrip("_")
elif strip_underscores == "both" or strip_underscores is True:
col = col.strip("_")
return col
def import_message(
submodule: str,
package: str,
conda_channel: str = None,
pip_install: bool = False,
):
"""
Return warning if package is not found.
Generic message for indicating to the user when a function relies on an
optional module / package that is not currently installed. Includes
installation instructions. Used in `chemistry.py` and `biology.py`.
:param submodule: pyjanitor submodule that needs an external dependency.
:param package: External package this submodule relies on.
:param conda_channel: Conda channel package can be installed from,
if at all.
:param pip_install: Whether package can be installed via pip.
"""
is_conda = os.path.exists(os.path.join(sys.prefix, "conda-meta"))
installable = True
if is_conda:
if conda_channel is None:
installable = False
installation = f"{package} cannot be installed via conda"
else:
installation = f"conda install -c {conda_channel} {package}"
else:
if pip_install:
installation = f"pip install {package}"
else:
installable = False
installation = f"{package} cannot be installed via pip"
print(
f"To use the janitor submodule {submodule}, you need to install "
f"{package}."
)
print()
if installable:
print("To do so, use the following command:")
print()
print(f" {installation}")
else:
print(f"{installation}")
def idempotent(func: Callable, df: pd.DataFrame, *args, **kwargs):
"""
Raises error if a function operating on a `DataFrame` is not idempotent,
that is, `func(func(df)) = func(df)` is not true for all `df`.
:param func: A python method.
:param df: A pandas `DataFrame`.
:param args: Positional arguments supplied to the method.
:param kwargs: Keyword arguments supplied to the method.
:raises ValueError: If `func` is found to not be idempotent for the given
`DataFrame` `df`.
"""
if not func(df, *args, **kwargs) == func(
func(df, *args, **kwargs), *args, **kwargs
):
raise ValueError(
"Supplied function is not idempotent for the given " "DataFrame."
)
def deprecated_alias(**aliases) -> Callable:
"""
Used as a decorator when deprecating old function argument names, while
keeping backwards compatibility.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
Functional usage example:
.. code-block:: python
@deprecated_alias(a='alpha', b='beta')
def simple_sum(alpha, beta):
return alpha + beta
:param aliases: Dictionary of aliases for a function's arguments.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rename_kwargs(func.__name__, kwargs, aliases)
return func(*args, **kwargs)
return wrapper
return decorator
def refactored_function(message: str) -> Callable:
"""Used as a decorator when refactoring functions
Implementation is inspired from `Hacker Noon`_.
.. Hacker Noon: https://hackernoon.com/why-refactoring-how-to-restructure-python-package-51b89aa91987
Functional usage example:
.. code-block:: python
@refactored_function(
message="simple_sum() has been refactored. Use hard_sum() instead."
)
def simple_sum(alpha, beta):
return alpha + beta
:param message: Message to use in warning user about refactoring.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
def emit_warning(*args, **kwargs):
warnings.warn(message, FutureWarning)
return func(*args, **kwargs)
return emit_warning
return decorator
def rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict):
"""
Used to update deprecated argument names with new names. Throws a
TypeError if both arguments are provided, and warns if old alias is used.
Nothing is returned as the passed ``kwargs`` are modified directly.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
:param func_name: name of decorated function.
:param kwargs: Arguments supplied to the method.
:param aliases: Dictionary of aliases for a function's arguments.
:raises TypeError: if both arguments are provided.
""" # noqa: E501
for old_alias, new_alias in aliases.items():
if old_alias in kwargs:
if new_alias in kwargs:
raise TypeError(
f"{func_name} received both {old_alias} and {new_alias}"
)
warnings.warn(
f"{old_alias} is deprecated; use {new_alias}",
DeprecationWarning,
)
kwargs[new_alias] = kwargs.pop(old_alias)
def check_column(
df: pd.DataFrame, column_names: Union[Iterable, str], present: bool = True
):
"""
One-liner syntactic sugar for checking the presence or absence of columns.
Should be used like this::
check(df, ['a', 'b'], present=True)
This will check whether columns "a" and "b" are present in df's columns.
One can also guarantee that "a" and "b" are not present
by switching to ``present = False``.
:param df: The name of the variable.
:param column_names: A list of column names we want to check to see if
present (or absent) in df.
:param present: If True (default), checks to see if all of column_names
are in df.columns. If False, checks that none of column_names are
in df.columns.
:raises ValueError: if data is not the expected type.
"""
if isinstance(column_names, str) or not isinstance(column_names, Iterable):
column_names = [column_names]
for column_name in column_names:
if present and column_name not in df.columns: # skipcq: PYL-R1720
raise ValueError(
f"{column_name} not present in dataframe columns!"
)
elif not present and column_name in df.columns:
raise ValueError(
f"{column_name} already present in dataframe columns!"
)
def skipna(f: Callable) -> Callable:
"""
Decorator for escaping np.nan and None in a function
Should be used like this::
df[column].apply(skipna(transform))
or::
@skipna
def transform(x):
pass
:param f: the function to be wrapped
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
if (type(x) is float and np.isnan(x)) or x is None:
return np.nan
return f(x, *args, **kwargs)
return _wrapped
def skiperror(
f: Callable, return_x: bool = False, return_val=np.nan
) -> Callable:
"""
Decorator for escaping any error in a function.
Should be used like this::
df[column].apply(
skiperror(transform, return_val=3, return_x=False))
or::
@skiperror(return_val=3, return_x=False)
def transform(x):
pass
:param f: the function to be wrapped
:param return_x: whether or not the original value that caused error
should be returned
:param return_val: the value to be returned when an error hits.
Ignored if return_x is True
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
try:
return f(x, *args, **kwargs)
except Exception: # skipcq: PYL-W0703
if return_x:
return x
return return_val
return _wrapped
def _computations_expand_grid(others: dict) -> pd.DataFrame:
"""
Creates a cartesian product of all the inputs in `others`.
Combines Numpy's `mgrid`, with the `take` method in numpy/Pandas,
to expand each input to the length of the cumulative product of
all inputs in `others`.
There is a performance penalty for small entries (length less than 10)
in using this method, instead of `itertools.product`; however, there is
significant performance benefits as the size of the data increases.
Another benefit of this approach,
in addition to the significant performance gains,
is the preservation of data types. This is particularly relevant for
Pandas' extension arrays dtypes (categoricals, nullable integers, ...).
A dataframe of all possible combinations is returned.
"""
for key, _ in others.items():
check("key", key, [str])
grid = {}
for key, value in others.items():
if is_scalar(value):
grid[key] = pd.Series([value])
elif is_extension_array_dtype(value) and not (
isinstance(value, pd.Series)
):
grid[key] = pd.Series(value)
elif is_list_like(value):
if not isinstance(
value, (pd.DataFrame, pd.Series, np.ndarray, list, pd.Index)
):
grid[key] = list(value)
else:
grid[key] = value
others = None
mgrid_values = [slice(len(value)) for _, value in grid.items()]
mgrid_values = np.mgrid[mgrid_values]
mgrid_values = map(np.ravel, mgrid_values)
grid = zip([*grid.items()], mgrid_values)
grid = ((*left, right) for left, right in grid)
grid = (
_expand_grid(value, key, mgrid_values)
for key, value, mgrid_values in grid
)
grid = pd.concat(grid, axis="columns", sort=False)
return grid
@functools.singledispatch
def _expand_grid(value, key, mgrid_values, mode="expand_grid"):
"""
Base function for dispatch of `_expand_grid`.
`mode` parameter is added, to make the function reusable
in the `_computations_complete` function.
Also, allowing `key` as None enables reuse in the
`_computations_complete` function.
"""
raise TypeError(
f"{type(value).__name__} data type is not supported in `expand_grid`."
)
@_expand_grid.register(list) # noqa: F811
def _sub_expand_grid(value, key, mgrid_values): # noqa: F811
"""
Expands the list object based on `mgrid_values`.
Converts to an array and passes it
to the `_expand_grid` function for arrays.
`mode` parameter is added, to make the function reusable
in the `_computations_complete` function.
Also, allowing `key` as None enables reuse in the
`_computations_complete` function.
Returns Series with name if 1-Dimensional array
or DataFrame if 2-Dimensional array with column names.
"""
if not value:
raise ValueError("""list object cannot be empty.""")
value = np.array(value)
return _expand_grid(value, key, mgrid_values)
@_expand_grid.register(np.ndarray)
def _sub_expand_grid( # noqa: F811
value, key, mgrid_values, mode="expand_grid"
):
"""
Expands the numpy array based on `mgrid_values`.
Ensures array dimension is either 1 or 2.
`mode` parameter is added, to make the function reusable
in the `_computations_complete` function.
Also, allowing `key` as None enables reuse in the
`_computations_complete` function.
Returns Series with name if 1-Dimensional array
or DataFrame if 2-Dimensional array with column names.
The names are derived from the `key` parameter.
"""
if not (value.size > 0):
raise ValueError("""array cannot be empty.""")
if value.ndim > 2:
raise ValueError("""expand_grid works only on 1D and 2D structures.""")
value = value.take(mgrid_values, axis=0)
if value.ndim == 1:
value = pd.Series(value)
# a tiny bit faster than chaining with `rename`
value.name = key
else:
value = pd.DataFrame(value)
# a tiny bit faster than using `add_prefix`
value.columns = value.columns.map(lambda column: f"{key}_{column}")
return value
@_expand_grid.register(pd.Series)
def _sub_expand_grid( # noqa: F811
value, key, mgrid_values, mode="expand_grid"
):
"""
Expands the Series based on `mgrid_values`.
`mode` parameter is added, to make the function reusable
in the `_computations_complete` function.
Also, allowing `key` as None enables reuse in the
`_computations_complete` function.
Checks for empty Series and returns modified keys.
Returns Series with new Series name.
"""
if value.empty:
raise ValueError("""Series cannot be empty.""")
value = value.take(mgrid_values)
value.index = np.arange(len(value))
if mode != "expand_grid":
return value
if value.name:
value.name = f"{key}_{value.name}"
else:
value.name = key
return value
@_expand_grid.register(pd.DataFrame)
def _sub_expand_grid( # noqa: F811
value, key, mgrid_values, mode="expand_grid"
):
"""
Expands the DataFrame based on `mgrid_values`.
`mode` parameter is added, to make the function reusable
in the `_computations_complete` function.
Also, allowing `key` as None enables reuse in the
`_computations_complete` function.
Checks for empty dataframe and returns modified keys.
Returns a DataFrame with new column names.
"""
if value.empty:
raise ValueError("""DataFrame cannot be empty.""")
value = value.take(mgrid_values)
value.index = np.arange(len(value))
if mode != "expand_grid":
return value
if isinstance(value.columns, pd.MultiIndex):
value.columns = [f"{key}_{num}" for num, _ in enumerate(value.columns)]
else:
value.columns = value.columns.map(lambda column: f"{key}_{column}")
return value
@_expand_grid.register(pd.Index)
def _sub_expand_grid( # noqa: F811
value, key, mgrid_values, mode="expand_grid"
):
"""
Expands the Index based on `mgrid_values`.
`mode` parameter is added, to make the function reusable
in the `_computations_complete` function.
Also, allowing `key` as None enables reuse in the
`_computations_complete` function.
Checks for empty Index and returns modified keys.
Returns a DataFrame (if MultiIndex) with new column names,
or a Series with a new name.
"""
if value.empty:
raise ValueError("""Index cannot be empty.""")
value = value.take(mgrid_values)
if mode != "expand_grid":
return value
if isinstance(value, pd.MultiIndex):
value = value.to_frame(index=False)
value.columns = value.columns.map(lambda column: f"{key}_{column}")
else:
value = value.to_series(index=np.arange(len(value)))
if value.name:
value.name = f"{key}_{value.name}"
else:
value.name = key
return value
def _data_checks_complete(
df: pd.DataFrame,
columns: List[Union[List, Tuple, Dict, str]],
by: Optional[Union[list, str]] = None,
):
"""
Function to check parameters in the `complete` function.
Checks the type of the `columns` parameter, as well as the
types within the `columns` parameter.
Check is conducted to ensure that column names are not repeated.
Also checks that the names in `columns` actually exist in `df`.
Returns `df`, `columns`, `column_checker`,
and `by` if all checks pass.
"""
# TODO: get `complete` to work on MultiIndex columns,
# if there is sufficient interest with use cases
if isinstance(df.columns, pd.MultiIndex):
raise ValueError(
"""
`complete` does not support MultiIndex columns.
"""
)
check("columns", columns, [list])
columns = [
list(grouping) if isinstance(grouping, tuple) else grouping
for grouping in columns
]
column_checker = []
for grouping in columns:
check("grouping", grouping, [list, dict, str])
if not grouping:
raise ValueError("grouping cannot be empty")
if isinstance(grouping, str):
column_checker.append(grouping)
else:
column_checker.extend(grouping)
# columns should not be duplicated across groups
column_checker_no_duplicates = set()
for column in column_checker:
if column in column_checker_no_duplicates:
raise ValueError(
f"""{column} column should be in only one group."""
)
column_checker_no_duplicates.add(column) # noqa: PD005
check_column(df, column_checker)
column_checker_no_duplicates = None
if by is not None:
if isinstance(by, str):
by = [by]
check("by", by, [list])
return df, columns, column_checker, by
def _computations_complete(
df: pd.DataFrame,
columns: List[Union[List, Tuple, Dict, str]],
by: Optional[Union[list, str]] = None,
) -> pd.DataFrame:
"""
This function computes the final output for the `complete` function.
If `by` is present, then groupby apply is used.
For some cases, the `stack/unstack` combination is preferred; it is more
efficient than `reindex`, as the size of the data grows. It is only
applicable if all the entries in `columns` are strings, there are
no nulls(stacking implicitly removes nulls in columns),
the length of `columns` is greater than 1, and the index
has no duplicates.
If there is a dictionary in `columns`, it is possible that all the values
of a key, or keys, may not be in the existing column with the same key(s);
as such, a union of the current index and the generated index is executed,
to ensure that all combinations are in the final dataframe.
A dataframe, with rows of missing values, if any, is returned.
"""
df, columns, column_checker, by = _data_checks_complete(df, columns, by)
dict_present = any((isinstance(entry, dict) for entry in columns))
all_strings = all(isinstance(column, str) for column in columns)
df = df.set_index(column_checker)
df_index = df.index
df_names = df_index.names
any_nulls = any(
df_index.get_level_values(name).hasnans for name in df_names
)
if not by:
df = _base_complete(df, columns, all_strings, any_nulls, dict_present)
# a better (and faster) way would be to create a dataframe
# from the groupby ...
# solution here got me thinking
# https://stackoverflow.com/a/66667034/7175713
# still thinking on how to improve speed of groupby apply
else:
df = df.groupby(by).apply(
_base_complete,
columns,
all_strings,
any_nulls,
dict_present,
)
df = df.drop(columns=by)
df = df.reset_index()
return df
def _base_complete(
df: pd.DataFrame,
columns: List[Union[List, Tuple, Dict, str]],
all_strings: bool,
any_nulls: bool,
dict_present: bool,
) -> pd.DataFrame:
df_empty = df.empty
df_index = df.index
unique_index = df_index.is_unique
columns_to_stack = None
if all_strings and (not any_nulls) and (len(columns) > 1) and unique_index:
if df_empty:
df["dummy"] = 1
columns_to_stack = columns[1:]
df = df.unstack(columns_to_stack) # noqa: PD010
df = df.stack(columns_to_stack, dropna=False) # noqa: PD013
if df_empty:
df = df.drop(columns="dummy")
columns_to_stack = None
return df
indexer = _create_indexer_for_complete(df_index, columns)
if unique_index:
if dict_present:
indexer = df_index.union(indexer, sort=None)
df = df.reindex(indexer)
else:
df = df.join(pd.DataFrame([], index=indexer), how="outer")
return df
def _create_indexer_for_complete(
df_index: pd.Index,
columns: List[Union[List, Dict, str]],
) -> pd.DataFrame:
"""
This creates the index that will be used
to expand the dataframe in the `complete` function.
A pandas Index is returned.
"""
complete_columns = (
_complete_column(column, df_index) for column in columns
)
complete_columns = (
(entry,) if not isinstance(entry, list) else entry
for entry in complete_columns
)
complete_columns = chain.from_iterable(complete_columns)
indexer = [*complete_columns]
if len(indexer) > 1:
indexer = _complete_indexer_expand_grid(indexer)
else:
indexer = indexer[0]
return indexer
def _complete_indexer_expand_grid(indexer):
"""
Generate indices to expose explicitly missing values,
using the `expand_grid` function.
Returns a pandas Index.
"""
indexers = []
mgrid_values = [slice(len(value)) for value in indexer]
mgrid_values = np.mgrid[mgrid_values]
mgrid_values = map(np.ravel, mgrid_values)
indexer = zip(indexer, mgrid_values)
indexer = (
_expand_grid(value, None, mgrid_values, mode=None)
for value, mgrid_values in indexer
)
for entry in indexer:
if isinstance(entry, pd.MultiIndex):
names = entry.names
val = (entry.get_level_values(name) for name in names)
indexers.extend(val)
else:
indexers.append(entry)
indexer = pd.MultiIndex.from_arrays(indexers)
indexers = None
return indexer
@functools.singledispatch
def _complete_column(column, index):
"""
This function processes the `columns` argument,
to create a pandas Index or a list.
Args:
column : str/list/dict
index: pandas Index
A unique pandas Index or a list of unique pandas Indices is returned.
"""
raise TypeError(
"""This type is not supported in the `complete` function."""
)
@_complete_column.register(str) # noqa: F811
def _sub_complete_column(column, index): # noqa: F811
"""
This function processes the `columns` argument,
to create a pandas Index.
Args:
column : str
index: pandas Index
Returns:
pd.Index: A pandas Index with a single level
"""
arr = index.get_level_values(column)
if not arr.is_unique:
arr = arr.drop_duplicates()
return arr
@_complete_column.register(list) # noqa: F811
def _sub_complete_column(column, index): # noqa: F811
"""
This function processes the `columns` argument,
to create a pandas Index.
Args:
column : list
index: pandas Index
Returns:
pd.MultiIndex
"""
level_to_drop = [name for name in index.names if name not in column]
arr = index.droplevel(level_to_drop)
if not arr.is_unique:
return arr.drop_duplicates()
return arr
@_complete_column.register(dict) # noqa: F811
def _sub_complete_column(column, index): # noqa: F811
"""
This function processes the `columns` argument,
to create a pandas Index or a list.
Args:
column : dict
index: pandas Index
Returns:
list: A list of unique pandas Indices.
"""
collection = []
for key, value in column.items():
arr = apply_if_callable(value, index.get_level_values(key))
if not is_list_like(arr):
raise ValueError(
"""
Input in the supplied dictionary
must be list-like.
"""
)
if (
not isinstance(
arr, (pd.DataFrame, pd.Series, np.ndarray, pd.Index)
)
) and (not is_extension_array_dtype(arr)):
arr = pd.Index([*arr], name=key)
if arr.ndim != 1:
raise ValueError(
"""
It seems the supplied pair in the supplied dictionary
cannot be converted to a 1-dimensional Pandas object.
Kindly provide data that can be converted to
a 1-dimensional Pandas object.
"""
)
if isinstance(arr, pd.MultiIndex):
raise ValueError(
"""
MultiIndex object not acceptable
in the supplied dictionary.
"""
)
if not isinstance(arr, pd.Index):
arr = pd.Index(arr, name=key)
if arr.empty:
raise ValueError(
"""
Input in the supplied dictionary
cannot be empty.
"""
)
if not arr.is_unique:
arr = arr.drop_duplicates()
if arr.name is None:
arr.name = key
collection.append(arr)
return collection
def _data_checks_pivot_longer(
df,
index,
column_names,
names_to,
values_to,
column_level,
names_sep,
names_pattern,
sort_by_appearance,
ignore_index,
):
"""
This function raises errors if the arguments have the wrong python type,
or if an unneeded argument is provided. It also raises errors for some
other scenarios(e.g if there are no matches returned for the regular
expression in `names_pattern`, or if the dataframe has MultiIndex
columns and `names_sep` or `names_pattern` is provided).
This function is executed before proceeding to the computation phase.
Type annotations are not provided because this function is where type
checking happens.
"""
if column_level is not None:
check("column_level", column_level, [int, str])
df.columns = df.columns.get_level_values(column_level)
if index is not None:
if is_list_like(index) and (not isinstance(index, tuple)):
index = list(index)
index = _select_columns(index, df)
if column_names is not None:
if is_list_like(column_names) and (
not isinstance(column_names, tuple)
):
column_names = list(column_names)
column_names = _select_columns(column_names, df)
if isinstance(names_to, str):
names_to = [names_to]
elif isinstance(names_to, tuple):
names_to = list(names_to)
check("names_to", names_to, [list])
if not all((isinstance(word, str) for word in names_to)):
raise TypeError("All entries in `names_to` argument must be strings.")
if len(names_to) > 1:
if all((names_pattern, names_sep)):
raise ValueError(
"""
Only one of `names_pattern` or `names_sep`
should be provided.
"""
)
if (".value" in names_to) and (names_to.count(".value") > 1):
raise ValueError("There can be only one `.value` in `names_to`.")
# names_sep creates more than one column
# whereas regex with names_pattern can be limited to one column
if (len(names_to) == 1) and (names_sep is not None):
raise ValueError(
"""
For a single `names_to` value,
`names_sep` is not required.
"""
)
if names_pattern is not None:
check("names_pattern", names_pattern, [str, Pattern, List, Tuple])
if isinstance(names_pattern, (list, tuple)):
if not all(
isinstance(word, (str, Pattern)) for word in names_pattern
):
raise TypeError(
"""
All entries in the ``names_pattern`` argument
must be regular expressions.
"""
)
if len(names_pattern) != len(names_to):
raise ValueError(
"""
Length of ``names_to`` does not match
number of patterns.
"""
)
if ".value" in names_to:
raise ValueError(
"""
``.value`` is not accepted
if ``names_pattern``
is a list/tuple.
"""
)
if names_sep is not None:
check("names_sep", names_sep, [str, Pattern])
check("values_to", values_to, [str])
if (values_to in df.columns) and not any(
(
".value" in names_to,
isinstance(names_pattern, (list, tuple)),
)
):
# copied from pandas' melt source code
# with a minor tweak
raise ValueError(
"""
This dataframe has a column name that matches the
'values_to' column name of the resulting Dataframe.
Kindly set the 'values_to' parameter to a unique name.
"""
)
if any((names_sep, names_pattern)) and (
isinstance(df.columns, pd.MultiIndex)
):
raise ValueError(
"""
Unpivoting a MultiIndex column dataframe
when `names_sep` or `names_pattern` is supplied
is not supported.
"""
)
if all((names_sep is None, names_pattern is None)):
# adapted from pandas' melt source code
if (
(index is not None)
and isinstance(df.columns, pd.MultiIndex)
and (not isinstance(index, list))
):
raise ValueError(
"""
index must be a list of tuples
when columns are a MultiIndex.
"""
)
if (
(column_names is not None)
and isinstance(df.columns, pd.MultiIndex)
and (not isinstance(column_names, list))
):
raise ValueError(
"""
column_names must be a list of tuples
when columns are a MultiIndex.
"""
)
check("sort_by_appearance", sort_by_appearance, [bool])
check("ignore_index", ignore_index, [bool])
return (
df,
index,
column_names,
names_to,
values_to,
column_level,
names_sep,
names_pattern,
sort_by_appearance,
ignore_index,
)
def _sort_by_appearance_for_melt(
df: pd.DataFrame, ignore_index: bool, len_index: int
) -> pd.DataFrame:
"""
This function sorts the resulting dataframe by appearance,
via the `sort_by_appearance` parameter in `computations_pivot_longer`.
An example for `sort_by_appearance`:
Say data looks like this :
id, a1, a2, a3, A1, A2, A3
1, a, b, c, A, B, C
when unpivoted into long form, it will look like this :
id instance a A
0 1 1 a A
1 1 2 b B
2 1 3 c C
where the column `a` comes before `A`, as it was in the source data,
and in column `a`, `a > b > c`, also as it was in the source data.
A dataframe that is sorted by appearance is returned.
"""
index_sorter = None
# if the height of the new dataframe
# is the same as the height of the original dataframe,
# then there is no need to sort by appearance
length_check = any((len_index == 1, len_index == len(df)))
if not length_check:
index_sorter = np.reshape(np.arange(len(df)), (-1, len_index)).ravel(
order="F"
)
df = df.take(index_sorter)
if ignore_index:
df.index = np.arange(len(df))
return df
def _pivot_longer_extractions(
df: pd.DataFrame,
index: Optional[Union[List, Tuple]] = None,
column_names: Optional[Union[List, Tuple]] = None,
names_to: Optional[List] = None,
names_sep: Optional[Union[str, Pattern]] = None,
names_pattern: Optional[
Union[
List[Union[str, Pattern]], Tuple[Union[str, Pattern]], str, Pattern
]
] = None,
) -> Tuple:
"""
This is where the labels within the column names are separated
into new columns, and is executed if `names_sep` or `names_pattern`
is not None.
A dataframe is returned.
"""
if any((names_sep, names_pattern)):
if index:
df = df.set_index(index, append=True)
if column_names:
df = df.loc[:, column_names]
mapping = None
if names_sep:
mapping = df.columns.str.split(names_sep, expand=True)
if len(mapping.names) != len(names_to):
raise ValueError(
"""
The length of ``names_to`` does not match
the number of columns extracted.
"""
)
mapping.names = names_to
elif isinstance(names_pattern, str):
mapping = df.columns.str.extract(names_pattern, expand=True)
if mapping.isna().all(axis=None):
raise ValueError(
"""
No labels in the columns
matched the regular expression
in ``names_pattern``.
Kindly provide a regular expression
that matches all labels in the columns.
"""
)
if mapping.isna().any(axis=None):
raise ValueError(
"""
Not all labels in the columns
matched the regular expression
in ``names_pattern``.
Kindly provide a regular expression
that matches all labels in the columns.
"""
)
if len(names_to) != len(mapping.columns):
raise ValueError(
"""
The length of ``names_to`` does not match
the number of columns extracted.
"""
)
if len(mapping.columns) == 1:
mapping = pd.Index(mapping.iloc[:, 0], name=names_to[0])
else:
mapping = pd.MultiIndex.from_frame(mapping, names=names_to)
elif isinstance(names_pattern, (list, tuple)):
mapping = [
df.columns.str.contains(regex, na=False) for regex in names_pattern
]
if not np.any(mapping):
raise ValueError(
"""
Not all labels in the columns
matched the regular expression
in ``names_pattern``.
Kindly provide a regular expression
that matches all labels in the columns.
"""
)
mapping = np.select(mapping, names_to, None)
mapping = pd.Index(mapping, name=".value")
if np.any(mapping.isna()):
raise ValueError(
"""
The regular expressions in ``names_pattern``
did not return all matches.
Kindly provide a regular expression that
captures all patterns.
"""
)
outcome = None
single_index_mapping = not isinstance(mapping, pd.MultiIndex)
if single_index_mapping:
outcome = pd.Series(mapping)
outcome = outcome.groupby(outcome).cumcount()
mapping = | pd.MultiIndex.from_arrays([mapping, outcome]) | pandas.MultiIndex.from_arrays |
"""
Module parse to/from Excel
"""
# ---------------------------------------------------------------------
# ExcelFile class
import abc
from datetime import date, datetime, time, timedelta
from distutils.version import LooseVersion
from io import UnsupportedOperation
import os
from textwrap import fill
import warnings
import numpy as np
import pandas._libs.json as json
import pandas.compat as compat
from pandas.compat import (
OrderedDict, add_metaclass, lrange, map, range, string_types, u, zip)
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
is_bool, is_float, is_integer, is_list_like)
from pandas.core import config
from pandas.core.frame import DataFrame
from pandas.io.common import (
_NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_arg,
get_filepath_or_buffer)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
_read_excel_doc = """
Read an Excel table into a pandas DataFrame
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object, pandas ExcelFile, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheet_name : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed
sheet positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing
sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
sheetname : string, int, mixed list of strings/ints, or None, default 0
.. deprecated:: 0.21.0
Use `sheet_name` instead
header : int, list of ints, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None
index_col : int, list of ints, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
parse_cols : int or list, default None
.. deprecated:: 0.21.0
Pass in `usecols` instead.
usecols : int, str, list-like, or callable default None
* If None, then parse all columns,
* If int, then indicates last column to be parsed
.. deprecated:: 0.24.0
Pass in a list of ints instead from 0 to `usecols` inclusive.
* If string, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of ints, then indicates list of column numbers to be parsed.
* If list of strings, then indicates list of column names to be parsed.
.. versionadded:: 0.24.0
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
.. versionadded:: 0.24.0
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
.. versionadded:: 0.20.0
engine : string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True
.. versionadded:: 0.19.0
false_values : list, default None
Values to consider as False
.. versionadded:: 0.19.0
skiprows : list-like
Rows to skip at the beginning (0-indexed)
nrows : int, default None
Number of rows to parse
.. versionadded:: 0.23.0
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skip_footer : int, default 0
.. deprecated:: 0.23.0
Pass in `skipfooter` instead.
skipfooter : int, default 0
Rows at the end to skip (0-indexed)
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally
mangle_dupe_cols : boolean, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
Examples
--------
An example DataFrame written to a local file
>>> df_out = pd.DataFrame([('string1', 1),
... ('string2', 2),
... ('string3', 3)],
... columns=['Name', 'Value'])
>>> df_out
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> df_out.to_excel('tmp.xlsx')
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx')
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> pd.read_excel(open('tmp.xlsx','rb'))
Name Value
0 string1 1
1 string2 2
2 string3 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None)
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 string3 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', dtype={'Name':str, 'Value':float})
Name Value
0 string1 1.0
1 string2 2.0
2 string3 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx',
... na_values=['string1', 'string2'])
Name Value
0 NaN 1
1 NaN 2
2 string3 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> df = pd.DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
>>> df.to_excel('tmp.xlsx', index=False)
>>> pd.read_excel('tmp.xlsx')
a b
0 1 2
1 #2 3
>>> pd.read_excel('tmp.xlsx', comment='#')
a b
0 1 2
"""
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
``supported_extensions`` defined on the writer."""
if not compat.callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.{ext}.writer".format(ext=ext),
engine_name, validator=str)
_writer_extensions.append(ext)
def _get_default_writer(ext):
_default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'}
try:
import xlsxwriter # noqa
_default_writers['xlsx'] = 'xlsxwriter'
except ImportError:
pass
return _default_writers[ext]
def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '{engine}'"
.format(engine=engine_name))
@Appender(_read_excel_doc)
@deprecate_kwarg("parse_cols", "usecols")
@deprecate_kwarg("skip_footer", "skipfooter")
def read_excel(io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds):
# Can't use _deprecate_kwarg since sheetname=None has a special meaning
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
warnings.warn("The `sheetname` keyword is deprecated, use "
"`sheet_name` instead", FutureWarning, stacklevel=2)
sheet_name = kwds.pop("sheetname")
if 'sheet' in kwds:
raise TypeError("read_excel() got an unexpected keyword argument "
"`sheet`")
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
return io.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds)
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See read_excel for more documentation
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object or xlrd workbook
If a string or path object, expected to be a path to xls or xlsx file
engine : string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
"""
def __init__(self, io, **kwds):
err_msg = "Install xlrd >= 1.0.0 for Excel support"
try:
import xlrd
except ImportError:
raise ImportError(err_msg)
else:
if xlrd.__VERSION__ < LooseVersion("1.0.0"):
raise ImportError(err_msg +
". Current version " + xlrd.__VERSION__)
# could be a str, ExcelFile, Book, etc.
self.io = io
# Always a string
self._io = _stringify_path(io)
engine = kwds.pop('engine', None)
if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: {engine}".format(engine=engine))
# If io is a url, want to keep the data as bytes so can't pass
# to get_filepath_or_buffer()
if _is_url(self._io):
io = _urlopen(self._io)
elif not isinstance(self.io, (ExcelFile, xlrd.Book)):
io, _, _, _ = get_filepath_or_buffer(self._io)
if engine == 'xlrd' and isinstance(io, xlrd.Book):
self.book = io
elif not isinstance(io, xlrd.Book) and hasattr(io, "read"):
# N.B. xlrd.Book has a read attribute too
if hasattr(io, 'seek'):
try:
# GH 19779
io.seek(0)
except UnsupportedOperation:
# HTTPResponse does not support seek()
# GH 20434
pass
data = io.read()
self.book = xlrd.open_workbook(file_contents=data)
elif isinstance(self._io, compat.string_types):
self.book = xlrd.open_workbook(self._io)
else:
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
def __fspath__(self):
return self._io
def parse(self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds):
"""
Parse specified sheet(s) into a DataFrame
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters
"""
# Can't use _deprecate_kwarg since sheetname=None has a special meaning
if | is_integer(sheet_name) | pandas.core.dtypes.common.is_integer |
import time
import os
import shutil
import sys
from sys import argv
import pickle
import csv
from collections import defaultdict
import cProfile
import pstats
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import StandardScaler
from hyperopt import Trials, STATUS_OK, tpe, fmin, hp
import hyperopt
from hyperopt.pyll.base import scope
from hyperopt.pyll.stochastic import sample
from DatasetManager import DatasetManager
import EncoderFactory
import ClassifierFactory
def create_and_evaluate_model(args):
global trial_nr, all_results
trial_nr += 1
print("Trial %s out of %s" % (trial_nr, n_iter))
start = time.time()
score = 0
args['n_estimators'] = 500
for cv_iter in range(n_splits):
# read encoded data
dt_train = pd.read_csv(os.path.join(folds_dir, "fold%s_train.csv" % cv_iter), sep=";")
dt_test = pd.read_csv(os.path.join(folds_dir, "fold%s_test.csv" % cv_iter), sep=";")
with open(os.path.join(folds_dir, "fold%s_train_y.csv" % cv_iter), "rb") as fin:
train_y = np.array(pickle.load(fin))
with open(os.path.join(folds_dir, "fold%s_test_y.csv" % cv_iter), "rb") as fin:
test_y = np.array(pickle.load(fin))
# fit classifier and predict
cls = ClassifierFactory.get_classifier(cls_method, args, random_state, min_cases_for_training,
class_ratios[cv_iter])
cls.fit(dt_train, train_y)
preds = cls.predict_proba(dt_test)
if len(set(test_y)) >= 2:
score += roc_auc_score(test_y, preds)
# save current trial results
for k, v in args.items():
all_results.append((trial_nr, k, v, -1, score / n_splits))
return {'loss': -score / n_splits, 'status': STATUS_OK, 'model': cls}
dataset_ref = argv[1]
method_name = argv[2]
cls_method = argv[3]
n_iter = int(argv[4])
train_ratio = 0.8
n_splits = 3
random_state = 22
min_cases_for_training = 1
if n_splits == 1:
PARAMS_DIR = "val_results_unstructured"
else:
PARAMS_DIR = "cv_results_revision"
# create directory
if not os.path.exists(os.path.join(PARAMS_DIR)):
os.makedirs(os.path.join(PARAMS_DIR))
if "prefix_index" in method_name:
bucket_method, cls_encoding, nr_events = method_name.split("_")
nr_events = int(nr_events)
else:
bucket_method, cls_encoding = method_name.split("_")
nr_events = None
dataset_ref_to_datasets = {
"bpic2011": ["bpic2011_f%s"%formula for formula in range(1,5)],
"bpic2015": ["bpic2015_%s_f2"%(municipality) for municipality in range(1,6)],
"insurance": ["insurance_activity", "insurance_followup"],
"sepsis_cases": ["sepsis_cases_1", "sepsis_cases_2", "sepsis_cases_4"]
}
encoding_dict = {
"laststate": ["static", "last"],
"agg": ["static", "agg"],
"index": ["static", "index"],
"combined": ["static", "last", "agg"]
}
datasets = [dataset_ref] if dataset_ref not in dataset_ref_to_datasets else dataset_ref_to_datasets[dataset_ref]
methods = encoding_dict[cls_encoding]
for dataset_name in datasets:
folds_dir = "folds_%s_%s_%s" % (dataset_name, cls_method, method_name)
if not os.path.exists(os.path.join(folds_dir)):
os.makedirs(os.path.join(folds_dir))
# read the data
dataset_manager = DatasetManager(dataset_name)
data = dataset_manager.read_dataset()
cls_encoder_args = {'case_id_col': dataset_manager.case_id_col,
'static_cat_cols': dataset_manager.static_cat_cols,
'static_num_cols': dataset_manager.static_num_cols,
'dynamic_cat_cols': dataset_manager.dynamic_cat_cols,
'dynamic_num_cols': dataset_manager.dynamic_num_cols,
'fillna': True}
# determine min and max (truncated) prefix lengths
min_prefix_length = 1
if "traffic_fines" in dataset_name:
max_prefix_length = 10
elif "bpic2017" in dataset_name:
max_prefix_length = min(20, dataset_manager.get_pos_case_length_quantile(data, 0.90))
else:
max_prefix_length = min(40, dataset_manager.get_pos_case_length_quantile(data, 0.90))
# split into training and test
train, _ = dataset_manager.split_data_strict(data, train_ratio, split="temporal")
del data
# prepare chunks for CV
class_ratios = []
cv_iter = 0
if n_splits == 1:
if dataset_ref in ["github"]:
train, _ = dataset_manager.split_data(train, train_ratio=0.15/train_ratio, split="random", seed=22)
# train will be 0.1 of original data and val 0.05
train_chunk, test_chunk = dataset_manager.split_val(train, val_ratio=0.33, split="random", seed=22)
else:
train_chunk, test_chunk = dataset_manager.split_val(train, 0.2, split="random", seed=22)
class_ratios.append(dataset_manager.get_class_ratio(train_chunk))
# generate prefixes
if nr_events is not None:
dt_train_prefixes = dataset_manager.generate_prefix_data(train_chunk, nr_events, nr_events)
dt_test_prefixes = dataset_manager.generate_prefix_data(test_chunk, nr_events, nr_events)
else:
dt_train_prefixes = dataset_manager.generate_prefix_data(train_chunk, min_prefix_length, max_prefix_length)
dt_test_prefixes = dataset_manager.generate_prefix_data(test_chunk, min_prefix_length, max_prefix_length)
# encode data for classifier
feature_combiner = FeatureUnion([(method, EncoderFactory.get_encoder(method, **cls_encoder_args)) for method in methods])
if cls_method == "svm" or cls_method == "logit":
feature_combiner = Pipeline([('encoder', feature_combiner), ('scaler', StandardScaler())])
dt_train_encoded = feature_combiner.fit_transform(dt_train_prefixes)
pd.DataFrame(dt_train_encoded).to_csv(os.path.join(folds_dir, "fold%s_train.csv" % cv_iter), sep=";", index=False)
del dt_train_encoded
dt_test_encoded = feature_combiner.transform(dt_test_prefixes)
pd.DataFrame(dt_test_encoded).to_csv(os.path.join(folds_dir, "fold%s_test.csv" % cv_iter), sep=";", index=False)
del dt_test_encoded
# labels
train_y = dataset_manager.get_label_numeric(dt_train_prefixes)
with open(os.path.join(folds_dir, "fold%s_train_y.csv" % cv_iter), "wb") as fout:
pickle.dump(train_y, fout)
test_y = dataset_manager.get_label_numeric(dt_test_prefixes)
with open(os.path.join(folds_dir, "fold%s_test_y.csv" % cv_iter), "wb") as fout:
pickle.dump(test_y, fout)
else:
for train_chunk, test_chunk in dataset_manager.get_stratified_split_generator(train, n_splits=n_splits):
class_ratios.append(dataset_manager.get_class_ratio(train_chunk))
# generate prefixes
if nr_events is not None:
dt_train_prefixes = dataset_manager.generate_prefix_data(train_chunk, nr_events, nr_events)
dt_test_prefixes = dataset_manager.generate_prefix_data(test_chunk, nr_events, nr_events)
else:
dt_train_prefixes = dataset_manager.generate_prefix_data(train_chunk, min_prefix_length, max_prefix_length)
dt_test_prefixes = dataset_manager.generate_prefix_data(test_chunk, min_prefix_length, max_prefix_length)
# encode data for classifier
feature_combiner = FeatureUnion([(method, EncoderFactory.get_encoder(method, **cls_encoder_args)) for method in methods])
if cls_method == "svm" or cls_method == "logit":
feature_combiner = Pipeline([('encoder', feature_combiner), ('scaler', StandardScaler())])
dt_train_encoded = feature_combiner.fit_transform(dt_train_prefixes)
| pd.DataFrame(dt_train_encoded) | pandas.DataFrame |
import timeit
import pandas as pd
import numpy as np
from typing import Dict,List
loops=1000
inputfile:List[List[int]] = [[1,2,3,4,5,6] for x in range(0,1000)]
# input arrives as a list of row lists
# need to make columns
#######################
# zip
# 60us
def i1() -> List[int]:
return list(map(list, zip(*inputfile))) # type:ignore
t = timeit.timeit(i1,number=loops)
print(f'i1 transpose zip {1e6*t/loops} us')
#######################
# list
# 64us
def i2() -> List[List[int]]:
return [list(i) for i in zip(*inputfile)]
t = timeit.timeit(i2,number=loops)
print(f'i2 transpose list {1e6*t/loops} us')
#######################
# append
# 64us
def i3() -> List[List[int]]:
x = []
for i in zip(*inputfile):
x.append((list(i)))
return x
t = timeit.timeit(i3,number=loops)
print(f'i3 transpose append {1e6*t/loops} us')
#######################
# list to col dict
# 50us (winner!), 318 with np.array
def i4() -> Dict[int, int]:
return {x[0]:np.array(x[1]) for x in enumerate(zip(*inputfile))} #type:ignore
t = timeit.timeit(i4,number=loops)
print(f'i4 transpose list to dict {1e6*t/loops} us')
#######################
# list to dict to df
# should be 50+375 but is 1370. 743 if i do the np.array above
# this involves type conversion from series to ndarray
def g1() -> pd.DataFrame:
return pd.DataFrame(i4()) #type:ignore
t = timeit.timeit(g1,number=loops)
print(f'g1 list to col dict to df {1e6*t/loops} us')
#######################
# dictionary of column lists
x1 = list(range(0,1000)) # skipping the np array step is cheating
y1 = {'a':x1,'b':x1,'c':x1,'d':x1,'e':x1,'f':x1}
# 375 us, 650 if i include np array
def f1() -> pd.DataFrame:
y2 = {k:np.array(v) for (k,v) in y1.items()}
return | pd.DataFrame(y2) | pandas.DataFrame |
import logging
import math
import os
import sys
import geopandas as gpd
import numpy as np
import pandas as pd
from _helpers import _sets_path_to_root
from _helpers import _to_csv_nafix
from _helpers import configure_logging
# from shapely.geometry import LineString, Point, Polygon
# from osm_data_config import AFRICA_CC
logger = logging.getLogger(__name__)
# Requirement to set path to filepath for execution
# os.chdir(os.path.dirname(os.path.abspath(__file__)))
def prepare_substation_df(df_all_substations):
"""
Prepare raw substations dataframe to the structure compatible with PyPSA-Eur
Parameters
----------
df_all_substations : dataframe
Raw substations dataframe as downloaded from OpenStreetMap
"""
# Modify the naming of the DataFrame columns to adapt to the PyPSA-Eur-like format
df_all_substations = df_all_substations.rename(
columns={
"id": "bus_id",
"tags.voltage": "voltage",
# "dc", will be added below
"tags.power": "symbol",
# "under_construction", will be added below
"tags.substation": "tag_substation",
"Country": "country", # new/different to PyPSA-Eur
"Area": "tag_area",
"lonlat": "geometry",
})
# Add longitute (lon) and latitude (lat) coordinates in the dataset
df_all_substations["lon"] = df_all_substations["geometry"].x
df_all_substations["lat"] = df_all_substations["geometry"].y
# Add NaN as default
df_all_substations["station_id"] = np.nan
df_all_substations["dc"] = np.nan
df_all_substations["under_construction"] = np.nan
# Rearrange columns
clist = [
"bus_id",
"station_id",
"voltage",
"dc",
"symbol",
"under_construction",
"tag_substation",
"tag_area",
"lon",
"lat",
"geometry",
"country",
]
df_all_substations = df_all_substations[clist]
# add the under_construction and dc
df_all_substations["under_construction"] = False
df_all_substations["dc"] = False
return df_all_substations
def add_line_endings_tosubstations(substations, lines):
# extract columns from substation df
bus_s = gpd.GeoDataFrame(columns=substations.columns)
bus_e = gpd.GeoDataFrame(columns=substations.columns)
# Read information from line.csv
bus_s[["voltage", "country"]] = lines[["voltage", "country"]].astype(str)
bus_s["geometry"] = lines.geometry.boundary.map(
lambda p: p.geoms[0] if len(p.geoms) >= 2 else None)
bus_s["lon"] = bus_s["geometry"].map(lambda p: p.x if p != None else None)
bus_s["lat"] = bus_s["geometry"].map(lambda p: p.y if p != None else None)
bus_s["bus_id"] = (
(substations["bus_id"].max() if "bus_id" in substations else 0) + 1 +
bus_s.index)
bus_e[["voltage", "country"]] = lines[["voltage", "country"]].astype(str)
bus_e["geometry"] = lines.geometry.boundary.map(
lambda p: p.geoms[1] if len(p.geoms) >= 2 else None)
bus_e["lon"] = bus_e["geometry"].map(lambda p: p.x if p != None else None)
bus_e["lat"] = bus_e["geometry"].map(lambda p: p.y if p != None else None)
bus_e["bus_id"] = bus_s["bus_id"].max() + 1 + bus_e.index
bus_all = bus_s.append(bus_e).reset_index(drop=True)
# Add NaN as default
bus_all["station_id"] = np.nan
bus_all["dc"] = False # np.nan
# Assuming substations completed for installed lines
bus_all["under_construction"] = False
bus_all["tag_area"] = 0.0 # np.nan
bus_all["symbol"] = "substation"
# TODO: this tag may be improved, maybe depending on voltage levels
bus_all["tag_substation"] = "transmission"
buses = substations.append(bus_all).reset_index(drop=True)
# Assign index to bus_id
buses.loc[:, "bus_id"] = buses.index
return buses
# # tol=0.01, around 700m at latitude 44.
# def set_substations_ids(buses, tol=0.02):
# """
# Function to set substations ids to buses, accounting for location tolerance
# The algorithm is as follows:
# 1. initialize all substation ids to -1
# 2. if the current substation has been already visited [substation_id < 0], then skip the calculation
# 3. otherwise:
# 1. identify the substations within the specified tolerance (tol)
# 2. when all the substations in tolerance have substation_id < 0, then specify a new substation_id
# 3. otherwise, if one of the substation in tolerance has a substation_id >= 0, then set that substation_id to all the others;
# in case of multiple substations with substation_ids >= 0, the first value is picked for all
# """
# buses["station_id"] = -1
# station_id = 0
# for i, row in buses.iterrows():
# if buses.loc[i, "station_id"] >= 0:
# continue
# # get substations within tolerance
# close_nodes = np.where(
# buses.apply(
# lambda x: math.dist([row["lat"], row["lon"]],
# [x["lat"], x["lon"]]) <= tol,
# axis=1,
# ))[0]
# if len(close_nodes) == 1:
# # if only one substation is in tolerance, then the substation is the current one iì
# # Note that the node cannot be with substation_id >= 0, given the preliminary check
# # at the beginning of the for loop
# buses.loc[buses.index[i], "station_id"] = station_id
# # update station id
# station_id += 1
# else:
# # several substations in tolerance
# # get their ids
# subset_substation_ids = buses.loc[buses.index[close_nodes],
# "station_id"]
# # check if all substation_ids are negative (<0)
# all_neg = subset_substation_ids.max() < 0
# # check if at least a substation_id is negative (<0)
# some_neg = subset_substation_ids.min() < 0
# if all_neg:
# # when all substation_ids are negative, then this is a new substation id
# # set the current station_id and increment the counter
# buses.loc[buses.index[close_nodes], "station_id"] = station_id
# station_id += 1
# elif some_neg:
# # otherwise, when at least a substation_id is non-negative, then pick the first value
# # and set it to all the other substations within tolerance
# sub_id = -1
# for substation_id in subset_substation_ids:
# if substation_id >= 0:
# sub_id = substation_id
# break
# buses.loc[buses.index[close_nodes], "station_id"] = sub_id
def set_unique_id(df, col):
"""
Create unique id's, where id is specified by the column "col"
The steps below create unique bus id's without loosing the original OSM bus_id
Unique bus_id are created by simply adding -1,-2,-3 to the original bus_id
Every unique id gets a -1
If a bus_id exist i.e. three times it it will the counted by cumcount -1,-2,-3 making the id unique
Parameters
----------
df : dataframe
Dataframe considered for the analysis
col : str
Column name for the analyses; examples: "bus_id" for substations or "line_id" for lines
"""
# operate only if id is not already unique (nunique counts unique values)
if df[col].count() != df[col].nunique():
# create cumcount column. Cumcount counts 0,1,2,3 the number of duplicates
df["cumcount"] = df.groupby([col]).cumcount()
# avoid 0 value for better understanding
df["cumcount"] = df["cumcount"] + 1
# add cumcount to id to make id unique
df[col] = df[col].astype(str) + "-" + df["cumcount"].values.astype(str)
# remove cumcount column
df.drop(columns="cumcount", inplace=True)
return df
def split_cells(df, lst_col="voltage"):
"""
Split semicolon separated cells i.e. [66000;220000] and create new identical rows
Parameters
----------
df : dataframe
Dataframe under analysis
lst_col : str
Target column over which to perform the analysis
"""
x = df.assign(**{lst_col: df[lst_col].str.split(";")})
x = pd.DataFrame({
col: np.repeat(x[col].values, x[lst_col].str.len())
for col in x.columns.difference([lst_col])
}).assign(
**{lst_col: np.concatenate(x[lst_col].values)})[x.columns.tolist()]
return x
def filter_voltage(df, threshold_voltage=35000):
# Drop any row with N/A voltage
df = df.dropna(subset=["voltage"])
# Split semicolon separated cells i.e. [66000;220000] and create new identical rows
df = split_cells(df)
# Convert voltage to float, if impossible, discard row
df["voltage"] = (df["voltage"].apply(
lambda x: pd.to_numeric(x, errors="coerce")).astype(float))
df = df.dropna(subset=["voltage"]) # Drop any row with Voltage = N/A
# convert voltage to int
df.loc[:, "voltage"] = df["voltage"].astype(int)
# keep only lines with a voltage no lower than than threshold_voltage
df = df[df.voltage >= threshold_voltage]
return df
def finalize_substation_types(df_all_substations):
"""
Specify bus_id and voltage columns as integer
"""
# make float to integer
df_all_substations["bus_id"] = df_all_substations["bus_id"].astype(int)
df_all_substations.loc[:,
"voltage"] = df_all_substations["voltage"].astype(
int)
return df_all_substations
def prepare_lines_df(df_lines):
"""
This function prepares the dataframe for lines and cables
Parameters
----------
df_lines : dataframe
Raw lines or cables dataframe as downloaded from OpenStreetMap
"""
# Modification - create final dataframe layout
df_lines = df_lines.rename(
columns={
"id": "line_id",
"tags.voltage": "voltage",
"tags.circuits": "circuits",
"tags.cables": "cables",
"tags.frequency": "tag_frequency",
"tags.power": "tag_type",
"lonlat": "geometry",
"Country": "country", # new/different to PyPSA-Eur
"Length": "length",
})
# Add NaN as default
df_lines["bus0"] = np.nan
df_lines["bus1"] = np.nan
df_lines["underground"] = np.nan
df_lines["under_construction"] = np.nan
# Rearrange columns
clist = [
"line_id",
"bus0",
"bus1",
"voltage",
"circuits",
"length",
"underground",
"under_construction",
"tag_type",
"tag_frequency",
"cables",
"geometry",
"country",
]
# Check. If column is not in df create an empty one.
for c in clist:
if c not in df_lines:
df_lines[c] = np.nan
df_lines = df_lines[clist]
return df_lines
def finalize_lines_type(df_lines):
"""
This function is aimed at finalizing the type of the columns of the dataframe
"""
df_lines["line_id"] = df_lines["line_id"].astype(int)
return df_lines
def integrate_lines_df(df_all_lines):
"""
Function to add underground, under_construction, frequency and circuits
"""
# Add under construction info
# Default = False. No more information available atm
df_all_lines["under_construction"] = False
# Add underground flag to check whether the line (cable) is underground
# Simplified. If tag_type cable then underground is True
df_all_lines["underground"] = df_all_lines["tag_type"] == "cable"
# More information extractable for "underground" by looking at "tag_location".
if "tag_location" in df_all_lines: # drop column if exist
df_all_lines.drop(columns="tag_location", inplace=True)
# Add frequency column
df_all_lines["tag_frequency"] = 50
# Add circuits information
# if not int make int
if df_all_lines["cables"].dtype != int:
# HERE. "0" if cables "None", "nan" or "1"
df_all_lines.loc[(df_all_lines["cables"] < "3")
| df_all_lines["cables"].isna(), "cables"] = "0"
df_all_lines["cables"] = df_all_lines["cables"].astype("int")
# downgrade 4 and 5 cables to 3...
if 4 or 5 in df_all_lines["cables"].values:
# Reason: 4 cables have 1 lighting protection cables, 5 cables has 2 LP cables - not transferring energy;
# see https://hackaday.com/2019/06/11/a-field-guide-to-transmission-lines/
# where circuits are "0" make "1"
df_all_lines.loc[(df_all_lines["cables"] == 4) |
(df_all_lines["cables"] == 5), "cables"] = 3
# one circuit contains 3 cable
df_all_lines.loc[df_all_lines["circuits"].isna(), "circuits"] = (
df_all_lines.loc[df_all_lines["circuits"].isna(), "cables"] / 3)
df_all_lines["circuits"] = df_all_lines["circuits"].astype(int)
# where circuits are "0" make "1"
df_all_lines.loc[(df_all_lines["circuits"] == "0") |
(df_all_lines["circuits"] == 0), "circuits"] = 1
# drop column if exist
if "cables" in df_all_lines:
df_all_lines.drop(columns="cables", inplace=True)
return df_all_lines
def prepare_generators_df(df_all_generators):
"""
Prepare the dataframe for generators
"""
# reset index
df_all_generators = df_all_generators.reset_index(drop=True)
# rename columns
df_all_generators = df_all_generators.rename(
columns={"tags.generator:output:electricity": "power_output_MW"})
# convert electricity column from string to float value
df_all_generators = df_all_generators[
df_all_generators["power_output_MW"].astype(str).str.contains("MW")]
df_all_generators["power_output_MW"] = (
df_all_generators["power_output_MW"].str.extract("(\\d+)").astype(
float))
return df_all_generators
def find_first_overlap(geom, country_geoms, default_name):
"""Return the first country whose shape intersects the geometry"""
for c_name, c_geom in country_geoms.items():
if not geom.disjoint(c_geom):
return c_name
return default_name
def set_countryname_by_shape(df,
ext_country_shapes,
names_by_shapes=True,
exclude_external=True):
"Set the country name by the name shape"
if names_by_shapes:
df["country"] = [
find_first_overlap(
row["geometry"],
ext_country_shapes,
None if exclude_external else row["country"],
) for id, row in df.iterrows()
]
df.dropna(subset=["country"], inplace=True)
return df
def create_extended_country_shapes(country_shapes, offshore_shapes):
"""Obtain the extended country shape by merging on- and off-shore shapes"""
merged_shapes = (gpd.GeoDataFrame({
"name":
list(country_shapes.index),
"geometry": [
c_geom.unary_union(offshore_shapes[c_code])
if c_code in offshore_shapes else c_geom
for c_code, c_geom in country_shapes.items()
],
}).set_index("name")["geometry"].set_crs(4326))
return merged_shapes
def clean_data(
input_files,
output_files,
ext_country_shapes=None,
names_by_shapes=True,
tag_substation="transmission",
threshold_voltage=35000,
add_line_endings=True,
):
# ----------- LINES AND CABLES -----------
# Load raw data lines
df_lines = gpd.read_file(input_files["lines"]).set_crs(epsg=4326,
inplace=True)
# prepare lines dataframe and data types
df_lines = prepare_lines_df(df_lines)
df_lines = finalize_lines_type(df_lines)
# Load raw data lines
df_cables = gpd.read_file(input_files["cables"]).set_crs(epsg=4326,
inplace=True)
# prepare cables dataframe and data types
df_cables = prepare_lines_df(df_cables)
df_cables = finalize_lines_type(df_cables)
# concatenate lines and cables in a single dataframe
df_all_lines = | pd.concat([df_lines, df_cables]) | pandas.concat |
#!/usr/bin/env python3
#
# Copyright, <NAME> 2020
#
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import glob
from multiprocessing import Pool
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
from per_class_iou import per_class_iou
from utils import path_join
parser = argparse.ArgumentParser(
description='This script evaluates the per-class iou '
'for given predictions and ground truths.')
parser.add_argument('predictions_dir',
help='Directory containing png files '
'representing segmentation predictions')
parser.add_argument('true_labels_dir',
help='Directory containing the corresponding true labels '
'of the images for which the predictions in '
'\'predictions_dir\' were made.')
parser.add_argument('log_output',
help='Location of CSV file which will be written to contain'
' per-class IOU for each image.')
parser.add_argument('-t', '--threads', default=1, type=int,
help='Number of parallel threads to use')
parser.add_argument('-c', '--n_classes', default=3, type=int,
help='Number of classes in the data')
def mean_per_class_iou(pred_dir, truth_dir, n_classes):
pred_paths = sorted(glob.glob(path_join(pred_dir, '*.png')) + glob.glob(
path_join(pred_dir, '*.jpg')))
truth_paths = sorted(glob.glob(path_join(truth_dir, '*.png')) + glob.glob(
path_join(truth_dir, '*.jpg')))
assert len(pred_paths) == len(truth_paths), \
'Different number of images in prediction and truth directories'
pd_paths = list(zip(pred_paths, truth_paths))
assert np.all(
[Path(x_p).stem == Path(y_p).stem for (x_p, y_p) in pd_paths]), \
'Not all prediction and truth images are named correspondingly'
iou_class_names = [f'iou_class_{i}' for i in range(args.n_classes)]
iou_df = pd.DataFrame(columns=['truth_path', 'pred_path'] + iou_class_names)
for pred_path, truth_path in pd_paths:
pred_img = cv2.imread(pred_path, cv2.IMREAD_GRAYSCALE)
truth_img = cv2.imread(truth_path, cv2.IMREAD_GRAYSCALE)
resized_dim = (truth_img.shape[1], truth_img.shape[0])
pred_img = cv2.resize(pred_img, resized_dim,
interpolation=cv2.INTER_NEAREST)
pc_iou = per_class_iou(pred_img, truth_img, n_classes)
iou_df = iou_df.append(
| pd.Series([truth_path, pred_path] + pc_iou, index=iou_df.columns) | pandas.Series |
'''
Author: <NAME>
Utilities to summarize the outputs produced by the model i.e. the results.txt files spitted out by the
evaluation scripts.
'''
import os
from sys import path
import re
import pandas as pd
import numpy as np
from scipy.stats import ttest_rel
# res will need to be passed to the last function, as an example:
#output_path = '/scratch/geeticka/relation-extraction/output/semeval2010/CrossValidation'
#def res(path): return os.path.join(output_path, path)
## Below are the methods to gather necessary information from the results file
def read_confusion_matrix_per_line(cur_line):
if re.search(r'.*\|.*', cur_line): # only get those lines which have a pipe operator
splitted_line = cur_line.strip().split()
pipe_seen = 0 # the correct numbers are between two pipes
confusion_matrix_line = []
for val in splitted_line:
if val.startswith('|'):
pipe_seen += 1
if pipe_seen == 1 and val.strip() != '|': # keep collecting the values as you are
val = [x for x in val if x != '|'] # to handle some special cases when the pipe operator
# is stuck to the number (happens when the number is too long)
val = ''.join(val)
confusion_matrix_line.append(float(val))
return confusion_matrix_line
return None
def read_accuracy_per_line(cur_line):
if cur_line.startswith('Accuracy (calculated'):
accuracy = re.match(r'.*= (.*)%', cur_line).groups()[0]
accuracy = float(accuracy)
return accuracy
return None
def read_precision_recall_f1(cur_line): # assume that the mode is once we have read 'Results for the individual'
match = re.match(r'.*= (.*)%.*= (.*)%.*= (.*)%$', cur_line)
if match:
precision, recall, f1 = match.groups()
return float(precision), float(recall), float(f1)
else:
return None
#if not cur_line.startswith('Micro-averaged result'): # you want to read only up to the point when the relations
# will need to double check above
# confusion matrix portion refers to which part of the file to read
# for eg, this will be <<< (9+1)-WAY EVALUATION TAKING DIRECTIONALITY INTO ACCOUNT -- OFFICIAL >>> for semeval
def get_file_metrics(num_relations, result_file, confusion_matrix_portion):
official_portion_file = False
individual_relations_f1_portion = False
micro_f1_portion = False
macro_f1_portion = False
confusion_matrix_official = [] # stores the official confusion matrix read from the file
accuracy = None
metrics_indiv_relations = [] # precision, recall and f1 for each relation
metrics_micro = [] # excluding the other relation
metrics_macro = [] # excluding the other relation
with open(result_file, 'r') as result_file:
for cur_line in result_file:
cur_line = cur_line.strip()
#if cur_line.startswith('<<< (9+1)-WAY EVALUATION TAKING DIRECTIONALITY INTO ACCOUNT -- OFFICIAL >>>'):
if official_portion_file is True and cur_line.startswith('<<<'):
break
if cur_line.startswith(confusion_matrix_portion):
official_portion_file = True
if official_portion_file is False:
continue
confusion_matrix_line = read_confusion_matrix_per_line(cur_line)
if confusion_matrix_line is not None:
confusion_matrix_official.append(confusion_matrix_line)
acc = read_accuracy_per_line(cur_line)
if acc is not None: accuracy = acc
# figure out which sub portion of the official portion we are in
if cur_line.startswith('Results for the individual relations:'):
individual_relations_f1_portion = True
elif cur_line.startswith('Micro-averaged result'):
micro_f1_portion = True
elif cur_line.startswith('MACRO-averaged result'):
macro_f1_portion = True
# populate the precision, recall and f1 for the correct respective lists
if individual_relations_f1_portion is True and micro_f1_portion is False:
vals = read_precision_recall_f1(cur_line)
if vals is not None: metrics_indiv_relations.append([vals[0], vals[1], vals[2]])
elif micro_f1_portion is True and macro_f1_portion is False:
vals = read_precision_recall_f1(cur_line)
if vals is not None: metrics_micro.append([vals[0], vals[1], vals[2]])
elif macro_f1_portion is True:
vals = read_precision_recall_f1(cur_line)
if vals is not None: metrics_macro.append([vals[0], vals[1], vals[2]])
return confusion_matrix_official, accuracy, metrics_indiv_relations, metrics_micro, metrics_macro
# Generate confusion matrix as a pandas dataframe
def get_confusion_matrix_as_df(confusion_matrix_official, relations_as_short_list):
index = pd.Index(relations_as_short_list, name='gold labels')
columns = pd.Index(relations_as_short_list, name='predicted')
confusion_matrix_df = pd.DataFrame(data=confusion_matrix_official, columns=columns,index=index)
return confusion_matrix_df
# Give the confusions acorss each relation, with a special interest on other
def generate_confused_with_string(idx, row, relation_full_form_dictionary, full_form=False):
# index is the current relation that we are considering and row is all the predicted examples
confused_with_string = ""
num_of_columns = len(row.index)
for i in range(0, num_of_columns):
column_name = row.index[i]
column_value = int(row.loc[column_name])
if column_value > 0 and column_name != idx:
if full_form is True: column_name = relation_full_form_dictionary[column_name]
confused_with_string += " " + column_name + "(" + str(column_value) + ")"
return confused_with_string.strip()
#print(row.data[0])
#for val in row:
# print(val)
def generate_pretty_summary_confusion_matrix(confusion_matrix_df, relation_full_form_dictionary,
full_form=False):
data = [] # index will be 0,1,2 and so on, but columns will be
# Actual label, confused with as a string, correct predictions as a number
for index, row in confusion_matrix_df.iterrows():
actual_label = relation_full_form_dictionary[index]
confused_with = generate_confused_with_string(index, row, relation_full_form_dictionary, full_form)
# short form is the default
correct_predictions = row[index] # eg: gives the column value for C-E for an index C-E
#if index != '_O': confused_with_other = row['_O'] # this is specific to semeval and will need to be changed
#else: confused_with_other = None
data.append([actual_label, confused_with, correct_predictions])
columns = pd.Index(['Gold Relation', 'Confused With(num_examples)', 'Correct Predictions'], name='summary')
pretty_summary_confusion_matrix_df = pd.DataFrame(data=data, columns=columns)
return pretty_summary_confusion_matrix_df
# Give the individual relation metrics as a dataframe
def create_metrics_indiv_relations_df(metrics_indiv_relations, relation_full_form_dictionary, relation_as_short_list):
index_list = relation_as_short_list
index_list_verbose = [relation_full_form_dictionary[x] for x in index_list]
index = | pd.Index(index_list_verbose, name='labels') | pandas.Index |
from functools import lru_cache
from pyiso import client_factory
from datetime import datetime, timedelta
from funcy import compose, identity, retry
from itertools import repeat
from urllib.error import HTTPError
import pandas as pd
import numpy as np
from app.model import RENEWABLES, NON_RENEWABLES
from app.util import full_hour_series
@retry(5, errors=HTTPError)
@lru_cache()
def raw_generation(ba_name, control_area, start, end):
entso = client_factory(ba_name)
data = entso.get_generation(
latest=True, control_area=control_area, start_at=start, end_at=end
)
return pd.DataFrame(data)
def deduplicate(raw):
"""Sum generation readings that have the same fuel_name"""
by_fuel = raw.groupby(["fuel_name", "timestamp", "freq"])
return by_fuel.agg({"gen_MW": np.sum}).reset_index()
def add_missing_megawatts(raw):
"""Add 0 generation reading for missing timestamps"""
by_fuel = raw.groupby(["fuel_name"])
return pd.concat(
[_group_without_missing_data(g) for _, g in by_fuel], ignore_index=True
)
def _group_without_missing_data(group):
source = group.iloc[0]
source_zero_mw = pd.DataFrame([source])
source_zero_mw.at[source_zero_mw.index[0], "gen_MW"] = 0
# in case first 15 minutes are missing: make sure series starts at 15 minutes past
start = source["timestamp"].replace(minute=15)
end = group.iloc[-1].timestamp
series = full_hour_series(start, end, "15min")
def filler(t):
filler = source_zero_mw.copy()
filler.at[filler.index[0], "timestamp"] = t
return filler
def df_for_timestamp(t):
df = group[group["timestamp"] == t]
return df if len(df) > 0 else filler(t)
return pd.concat([df_for_timestamp(t) for t in series], ignore_index=True)
def downsample(raw):
"""Downsample to 1 hour (original reports use 15m interval)."""
assert raw[raw["freq"] != "15m"].empty
# The last generation report of the day has a timestamp that is
# day+1 at 00:00 (each report contains data of the previous 15 minutes).
# Adjust timestamp a little to get all generation reports within the
# boundary of one day.
raw["timestamp_adjusted"] = raw["timestamp"] - | pd.Timedelta("1s") | pandas.Timedelta |
'''
File: HAPT_Dataset.py
Author: <NAME>
Date: 03/10/2019
Version: 1.0
Description:
utility functions to load the
Human Activities and Postural Transitions (HAPT) dataset
'''
import numpy as np
import pandas as pd
from os.path import expanduser
from keras.utils import to_categorical
from multiprocessing import Pool as ThreadPool
import math
import scipy.signal
home = expanduser("~")
'''
Dataset Info - Labels:
1 WALKING
2 W_UPSTAIRS
3 W_DOWNSTAIRS
4 SITTING
5 STANDING
6 LAYING
7 STAND_TO_SIT
8 SIT_TO_STAND
9 SIT_TO_LIE
10 LIE_TO_SIT
11 STAND_TO_LIE
12 LIE_TO_STAND
'''
#Modify this line with the right path.
#Dataset available at: http://archive.ics.uci.edu/ml/machine-learning-databases/00341/
ucihapt_datapath = home+"/HAPT_Dataset/"
def get_test_uuids():
test_uuids = pd.read_csv(ucihapt_datapath+"Test/subject_id_test.txt",names=["UUID"])
all_test_uuids = np.unique(test_uuids.values)
return all_test_uuids
def get_train_uuids():
train_uuids = pd.read_csv(ucihapt_datapath+"Train/subject_id_train.txt",names=["UUID"])
all_train_uuids = np.unique(train_uuids.values)
return all_train_uuids
#Get Data no resampling
def get_all_data_multi_thread_noresampling_3D(uuids, n_threads):
print("Loading data")
print("Initiating pool")
print("resampling 50 -> 40 Hz disabled. Doing 3D ")
uuids_list = [ [x] for x in uuids ]
pool = ThreadPool(n_threads)
print("Pool map")
test_points = pool.map( get_all_data_noresampling_3D,uuids_list)
print("Pool map")
pool.close()
print("Pool join")
pool.join()
#Merging data from treads
print("Merging threads' data")
X_list = []
y_list = []
for res in test_points:
#dataset_size += len(res[1])
X_list.extend(res[0])
y_list.extend(res[1])
X_es = np.zeros((len(y_list),128,8))
X_es[:,:] = [x for x in X_list ]
y_es = np.zeros(len(y_list))
y_es[:] = [y for y in y_list]
y_scaled = to_categorical(y_es, num_classes=7)
return (X_es, y_scaled)
def get_all_data_noresampling_3D(uuids):
gt_df = pd.read_csv(ucihapt_datapath+"RawData/labels.txt",sep="\s",names=['EXP_ID','USER_ID','LABEL','START','END'],engine='python')
#exclude other uuids
#print( gt_df.head() )
filtered_df = pd.DataFrame(columns=['EXP_ID','USER_ID','LABEL','START','END'])
for uuid in uuids:
data_uuid = gt_df[ gt_df['USER_ID'] == uuid ]
filtered_df = | pd.concat([filtered_df,data_uuid], ignore_index=True) | pandas.concat |
import os
import pandas
import numpy
from ... import normalize
from ... import convert
def hellinger(aves, stds):
"""
Computes pairwise Hellinger distances between Gaussian distributions
from lists of the means and standard deviations.
Args:
aves (numpy array): list of means (length n)
stds (numpy array): list of standard deviations (length n)
Returns:
pairise hellinger distance (numpy array ~ (n, n))
"""
epsilon = 1e-6
variances = (epsilon + stds) ** 2
std_probs = numpy.multiply.outer(epsilon + stds, epsilon + stds)
var_sums = numpy.add.outer(variances, variances)
mean_diffs = numpy.subtract.outer(aves, aves)
return 1 - numpy.exp(-mean_diffs ** 2 / (4 * var_sums)) * \
numpy.sqrt(2 * std_probs / var_sums)
def max_hellinger(aves, stds):
"""
Computes maximum Hellinger distance from pairwise comparisions of Gaussian
distributions from lists of the means and standard deviations.
Args:
aves (numpy array): list of means (length n)
stds (numpy array): list of standard deviations (length n)
Returns:
maximum pairise hellinger distance (float)
"""
return numpy.max(hellinger(aves, stds))
def create_tissue_stats():
"""
Uses data from the GTEx project to estimate statistics of gene expression
across tissues.
Expression is measured in Transcripts per Million (TPM).
Statistics are saved in csv format in the __file__ directory.
Args:
None
Returns:
None
"""
filepath = os.path.dirname(os.path.abspath(__file__))
samples = pandas.read_csv(os.path.join(filepath, 'SRP012682.tsv'), sep='\t')
expression = pandas.read_csv(os.path.join(filepath, 'expression_data.csv'), sep='\t')
mean = pandas.DataFrame()
median = pandas.DataFrame()
std = | pandas.DataFrame() | pandas.DataFrame |
"""Tradingview model"""
__docformat__ = "numpy"
import requests
from tradingview_ta import TA_Handler
import pandas as pd
from gamestonk_terminal import config_terminal as cfg
INTERVALS = {
"1m": "1 min",
"5m": "5 min",
"15m": "15 min",
"1h": "1 hour",
"4h": "4 hours",
"1d": "1 day",
"1W": "1 week",
"1M": "1 month",
}
SCREENERS = ["crypto", "forex", "cfd"]
def get_tradingview_recommendation(
ticker: str, screener: str, exchange: str, interval: str
) -> pd.DataFrame:
"""Get tradingview recommendation based on technical indicators
Parameters
----------
ticker : str
Ticker to get the recommendation from tradingview based on technical indicators
screener : str
Screener based on tradingview docs https://python-tradingview-ta.readthedocs.io/en/latest/usage.html
exchange: str
Exchange based on tradingview docs https://python-tradingview-ta.readthedocs.io/en/latest/usage.html
interval: str
Interval time to check technical indicators and correspondent recommendation
Returns
-------
df_recommendation: pd.DataFrame
Dataframe of tradingview recommendations based on technical indicators
"""
if not exchange:
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
exchange = result.json()["Exchange"]
if not interval:
df_recommendation = | pd.DataFrame() | pandas.DataFrame |
import datetime
import logging
import json
import requests
from pandas import json_normalize
import pandas as pd
from google.cloud import storage
from anyway.parsers.waze.waze_db_functions import (
insert_waze_alerts,
insert_waze_traffic_jams,
enrich_waze_alerts_ended_at_timestamp,
enrich_waze_traffic_jams_ended_at_timestamp,
)
from anyway.models import WazeAlert, WazeTrafficJams
ISRAEL_POLYGON = [
("33.717000", "32.547000"),
("34.722000", "33.004000"),
("35.793000", "33.331000"),
("35.914000", "32.953000"),
("35.750000", "32.723000"),
("35.395000", "31.084000"),
("34.931000", "29.473000"),
("33.717000", "32.547000"),
("33.717000", "32.547000"),
]
WAZE_ALERTS_API_PARAMS = {
"format": "JSON",
"tk": "ccp_partner",
"ccp_partner_name": "The Public Knowledge Workshop",
"types": "traffic,alerts,irregularities",
"polygon": ";".join([",".join(point) for point in ISRAEL_POLYGON]),
}
WAZE_ALERTS_API_URL = "https://il-georss.waze.com/rtserver/web/TGeoRSS"
logger = logging.getLogger("waze_data")
def list_blobs(bucket_name):
"""
Lists all the blobs in the bucket.
"""
storage_client = storage.Client()
blobs = storage_client.list_blobs(bucket_name)
return blobs
def parse_waze_alerts_data(waze_alerts, back_filled=False):
"""
parse waze alert json into a Dataframe.
param waze_alerts: waze raw alert json data
return: parsed Dataframe
"""
waze_df = json_normalize(waze_alerts)
waze_df["created_at"] = pd.to_datetime(waze_df["pubMillis"], unit="ms")
waze_df.rename(
{
"location.x": "longitude",
"location.y": "latitude",
"nThumbsUp": "number_thumbs_up",
"reportRating": "report_rating",
"reportDescription": "report_description",
"reportByMunicipalityUser": "report_by_municipality_user",
"jamUuid": "jam_uuid",
"type": "alert_type",
"subtype": "alert_subtype",
"roadType": "road_type",
},
axis=1,
inplace=True,
)
waze_df["geom"] = waze_df.apply(
lambda row: "POINT({} {})".format(row["longitude"], row["latitude"]), axis=1
)
waze_df["road_type"] = int(waze_df["road_type"].fillna(-1)[0])
waze_df["number_thumbs_up"] = int(waze_df.get("number_thumbs_up").fillna(0)[0])
waze_df["report_by_municipality_user"] = _convert_to_bool(
waze_df.get("report_by_municipality_user", False)
)
waze_df["back_filled"] = back_filled
waze_df.drop(["country", "pubMillis"], axis=1, inplace=True, errors="ignore")
for key in waze_df.keys():
if waze_df[key] is None or key not in [field.name for field in WazeAlert.__table__.columns]:
waze_df.drop([key], axis=1, inplace=True)
return waze_df.to_dict("records")
def parse_waze_traffic_jams_data(waze_jams, back_filled=False):
"""
parse waze traffic jams json into a Dataframe.
param waze_jams: waze raw traffic jams json data
return: parsed Dataframe
"""
waze_df = | json_normalize(waze_jams) | pandas.json_normalize |
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import streamlit as st
import pandas as pd
import seaborn as sns
import random
from sklearn.model_selection import RepeatedKFold, train_test_split, cross_val_score, StratifiedKFold, RepeatedStratifiedKFold, GridSearchCV
from sklearn import svm, preprocessing
from models import PCA, autoencoder, model_prep
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from models.model_prep import Model
from utils import reporter
from sklearn.pipeline import make_pipeline
from sklearn.metrics import precision_recall_curve, roc_curve, auc, f1_score
def svmachine(method, df_data, df_demog, regress, tracts, group, hemi, metric, reps):
#1 Select features
X = model_prep.select_features(method, df_data, tracts, hemi)
dis = df_demog.apply(lambda x: True if x['Group'] == group else False , axis=1)
hcs = df_demog.apply(lambda x: True if x['Group'] == 0 else False , axis=1)
# Count number of True in series and find ratio of HC/PAT for splitting
numOfdis = len(dis[dis == True].index)
numOfhcs = len(hcs[hcs == True].index)
ratio = numOfdis/(numOfhcs+numOfdis)
st.write("Ratio subjects/controls:", np.round(ratio,2))
X = X[(X['Group'] == 0) | (X['Group'] == group)]
y = X[['Group']]
X = X.drop(['Group', 'ID'], axis=1)
#scaler = MinMaxScaler(feature_range=(0, 1))
#X[X.columns] = scaler.fit_transform(X[X.columns])
param_grid = [{'kernel': ['rbf'],
'gamma': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'],
'C': [1, 10, 100, 1000]}]
grid_search = GridSearchCV(svm.SVC(class_weight={0:ratio, group:1-ratio}), param_grid, scoring = "roc_auc")
scores = []
#best_svc = svm.SVC(kernel='linear', C=1, class_weight={0:ratio, group:1-ratio})
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=reps, random_state=42)
for train_index, test_index in cv.split(X,y):
X_train, X_test, y_train, y_test = X.iloc[train_index], X.iloc[test_index], y.iloc[train_index], y.iloc[test_index]
if(regress):
if'sex' in df_demog and 'age' in df_demog:
X_train, X_test = model_prep.regress_confound(X_train,
X_test, df_demog)
else:
st.error("No age or sex information found. Skipping regression step.")
scaler, X_train, X_test = model_prep.normalize_features(X_train, X_test,method)
#best_svc.fit(X_train, y_train.values.ravel())
grid_search.fit(X_train, y_train.values.ravel())
y_pred = grid_search.predict(X_test)
fpr, tpr, thresholds = roc_curve(y_test, y_pred,group)
auc_score = auc(fpr, tpr)
#st.write(auc_score)
scores.append(auc_score)
st.success("Mean AUC: %0.2f (+/- %0.2f)" % (np.round(np.mean(scores), 2), np.round(np.std(scores),2)))
return scores
def run(method, df_data, df_demog, regress, tracts, group, hemi, metric, reps):
st.warning("Computing permutations ... estimated time: " + str(np.round(len(df_demog)*2/60,2)) + " minutes.")
if 'sex' not in df_demog and 'age' not in df_demog:
st.error("No age or sex information found. Skipping regression step.")
#1 Select features
X = model_prep.select_features(method, df_data, tracts, hemi)
# Get a bool series representing which row satisfies the condition i.e. True for
# row in which value of group == 0
#st.write(X)
dis = df_demog.apply(lambda x: True if x['Group'] == group else False , axis=1)
hcs = df_demog.apply(lambda x: True if x['Group'] == 0 else False , axis=1)
# Count number of True in series and find ratio of HC/PAT for splitting
numOfdis = len(dis[dis == True].index)
numOfhcs = len(hcs[hcs == True].index)
ratio = numOfdis/numOfhcs
#To accumulate error Distances
DISTS = np.zeros(len(X))
countInserts = np.zeros(len(X))
count=0
#Separate HC from PATIENTS
HC = X[X['Group'] == 0]
y_HC = HC[['Group', 'ID']]
PATIENTS = X[X['Group'] == group]
y_PAT = PATIENTS[['Group', 'ID']]
##2 HERE, basically split the data into train and Val (split_size*repeats times) into equal size of HC/PAT.
#split_size = int(np.ceil((float(numOfhcs)/numOfdis)))
split_size = 5
#st.write (split_size, numOfhcs, numOfdis)
repeats = reps
#random_state = 12883823
#random_state=42
#rkf = RepeatedKFold(n_splits=split_size, n_repeats=repeats, random_state=random_state)
AUC = np.zeros(repeats)
tpr = []
fpr = []
#for train_idx, test_idx in rkf.split(HC,y_HC):
for r in range(repeats):
st.write("Iteration", r+1 , "of", repeats)
X_train_split, X_test_split, y_train_split, y_test_split = train_test_split(HC, y_HC, test_size=min(0.2,ratio))
#X_train_split, X_test_split = HC.iloc[train_idx], HC.iloc[test_idx]
#y_train_split, y_test_split = y_HC.iloc[train_idx], y_HC.iloc[test_idx]
#Select subset of patients
patients_subset_ids = np.array(random.sample(list(PATIENTS.index), min(len(X_test_split), len(PATIENTS))))
#Cating the HC test with the PATIENTS
X_test_split = pd.concat([X_test_split,PATIENTS.loc[patients_subset_ids]])
y_test_split = pd.concat([y_test_split, y_PAT.loc[patients_subset_ids]])
X_train_split = X_train_split.drop(['Group', 'ID'], axis=1)
X_test_split = X_test_split.drop(['Group', 'ID'], axis=1)
#3 Linear regression of confound
if(regress):
if'sex' in df_demog and 'age' in df_demog:
X_train_split, X_test_split = model_prep.regress_confound(X_train_split,
X_test_split, df_demog)
#4 Normalize features
if method != "Z-score":
scaler, X_train, X_test = model_prep.normalize_features(X_train_split, X_test_split, method)
else:
X_train, X_test = X_train_split, X_test_split
#5 Anomaly detection method
if method == "Z-score":
model = Model(X_train, X_test, "Z-score")
elif method == "PCA":
model = Model(X_train, X_test, "PCA")
else:
model = Model(X_train, X_test, "Autoencoder")
#6 Run
d_train, d_test = model.run()
DISTS[d_test.index] = DISTS[d_test.index] + d_test.values
countInserts[d_test.index] += 1
#7 Evaluate
result = model_prep.evaluate(d_train, d_test, y_HC, y_PAT, method)
AUC[count], f, t = reporter.report_steps("ROC", result, method, group, metric, False)
tpr.append(t)
fpr.append(f)
count = count + 1
#Assemble results / Aggregate and plot mean/median distributions
DISTS /= countInserts
WW = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
"""
coding=utf-8
Code Template
"""
from emblaze import app
import logging
import os
import pandas
import textract
from emblaze.ResumeParser.bin import lib
from emblaze.ResumeParser.bin import field_extraction
import spacy
def main():
"""
Main function documentation template
:return: None
:rtype: None
"""
logging.getLogger().setLevel(logging.INFO)
# Extract data from upstream.
observations = extract()
# Spacy: Spacy NLP
# nlp = spacy.load('en_core_web_sm')
nlp = spacy.load('en')
# Transform data to have appropriate fields
observations, nlp = transform(observations, nlp)
# Load data for downstream consumption
load(observations, nlp)
pass
def text_extract_utf8(f):
try:
return str(textract.process(f), "utf-8")
except UnicodeDecodeError as e:
return ''
def extract():
logging.info('Begin extract')
# Reference variables
candidate_file_agg = list()
# # Create list of candidate files
# for root, subdirs, files in os.walk(lib.get_conf('resume_directory')):
# folder_files = [app.open_resource(os.path.join(root, x)).name for x in files]
# candidate_file_agg.extend(folder_files)
folder_files = [app.open_resource(os.path.join(lib.get_conf('resume_directory'), "resume.pdf")).name]
candidate_file_agg.extend(folder_files)
# print("folder_files:",folder_files)
logging.info("candidate_file_agg:{0}".format(candidate_file_agg))
# Convert list to a pandas DataFrame
observations = | pandas.DataFrame(data=candidate_file_agg, columns=['file_path']) | pandas.DataFrame |
import scipy.signal
import pandas as pd
import numpy as np
import peakutils
from lmfit import models
import chachifuncs as ccf
import os
import glob
################################
# OVERALL Wrapper Function
################################
def ML_generate(import_filepath):
"""Generates a dataframe containing charge and discharge
descriptors/error parameters. Also writes descriptors to an
excel spreadsheet 'describe.xlsx' import_filepath = filepath
containing cleaned separated cycles"""
# checks that the file exists
assert os.path.exists(import_filepath), 'The file does not exist'
# check that the whatever is passed to ML_generate is a string
assert isinstance(import_filepath, str), 'The input should be a string'
# creates dataframe of descriptors for the charge/discharge
# cycles of all batteries
df_ch = process.df_generate(import_filepath, 'c')
df_dc = process.df_generate(import_filepath, 'd')
# concats charge and discharge cycles
df_final = pd.concat([df_ch, df_dc], axis=1)
# drops any duplicate rows
df_final = df_final.T.drop_duplicates().T
# saves data to an excel file
writer = pd.ExcelWriter('describe.xlsx')
df_final.to_excel(writer, 'Sheet1')
writer.save()
return df_final
############################
# Sub - Wrapper Functions
############################
# data processing that calls from fitting class
class process:
# first function called by ML_generate
def df_generate(import_filepath, cd):
"""Creates a pandas dataframe for each battery's charge/
discharge cycle in the import_filepath folder.
import_filepath = filepath containing cleaned separated cycles
cd = 'c' for charge and 'd' for discharge
Output:
df_ch = pandas dataframe for all cycles of all batteries in a
col_ch = list of numbers of columns for each battery"""
# generates a list of datafiles to analyze
rootdir = import_filepath
file_list = [f for f in glob.glob(os.path.join(rootdir, '*.xlsx'))]
# iterate through dir to get excel file
# generates a list of unique batteries
list_bats = []
for file in file_list:
# splits file paths to get battery names
name = os.path.split(file)[1].split('.')[0]
batname = name.split('-')[0]
# adds unique battery names to the list of batteries
if batname not in list_bats:
list_bats.append(batname)
else:
None
# notifies user of successful import
notice = 'Successfully extracted all battery names for ' + cd
print(notice)
# generates a blank dataframe of charge/discharge descriptors
df_ch = process.pd_create(cd)
# begins generating dataframe of descriptors
name_ch = []
for bat in list_bats:
# notifies user which battery is being fit
notice = 'Fitting battery: ' + bat + ' ' + cd
print(notice)
# generates dataframe of descriptor fits for each battery
df = process.imp_all(import_filepath, bat, cd)
# generates an iterative list of names for the 'name'
# column of the final dataframe
name_ch = name_ch + [bat] * len(df.index)
# concats dataframe from current battery with previous
# batteries
df_ch = pd.concat([df_ch, df])
# adds name column to the dataframe
df_ch['names'] = name_ch
return df_ch
def imp_all(source, battery, cd):
"""Generates a Pandas dataframe of descriptors for a single battery
source = string containing directory with the excel sheets
for individual cycle data
battery = string containing excel spreadsheet of file name
cd = either 'c' for charge or 'd' for discharge
Output:
charge_descript = pandas dataframe of charge descriptors"""
# check that the battery label is a string
# check that the whatever is passed to ML_generate is a string
# check that 'c' or 'd' is passed
assert isinstance(source, str), 'The input should be a string'
assert isinstance(battery, str), 'The input should be a string'
# generates list of battery files for import
file_pref = battery + '*.xlsx'
file_list = [f for f in glob.glob(os.path.join(source, file_pref))]
# sorts by cycle
cycle = []
# extracts cycle number from file name using known pattern
for file in file_list:
cyc1 = os.path.split(file)[1].split('Clean')[0]
cyc = os.path.split(cyc1)[1].split('-Cycle')[1]
cycle.append(int(cyc))
# sorts cycle numbers
cyc_sort = sorted(cycle)
# determines order of indexes that will properly sort the data
cyc_index = []
for cyc in cyc_sort:
cyc_index.append(cycle.index(cyc))
# reindexes file list using the lists of indices from above
file_sort = []
for indices in cyc_index:
file_sort.append(file_list[indices])
# this is the end of the shit that sorts by cycle
charge_descript = process.pd_create(cd)
# iterates over the file list and the cycle number
for file_val, cyc_loop in zip(file_sort, cyc_sort):
# determines dictionary of descriptors from file data
c = process.imp_one_cycle(file_val, cd, cyc_loop, battery)
if c != 'throw':
# generates list of dictionaries while rejecting any that
# return the 'throw' error
charge_descript = process.pd_update(charge_descript, c)
return charge_descript
def pd_create(cd):
"""Creates a blank dataframe containing either charge or
discharge descriptors/error parameters
cd = either 'c' for charge or 'd' for discharge
Output:
blank pandas dataframe with descriptor columns and cycle number rows"""
# number of descriptors it generates
n_desc = 19
# determines prefix string based on need for a charge or
# discharge dataframe
if cd == 'c':
prefix = 'ch_'
else:
prefix = 'dc_'
# generates list of names for the top of the descriptors dataframe
names = []
for ch in np.arange(n_desc):
names.append(prefix + str(int(ch)))
# adds names of error parameters to the end of the descriptor list
names = names + [prefix+'AIC', prefix+'BIC', prefix+'red_chi_squared']
# creates pandas dataframe with necessary heading
# print(names)
desc = pd.DataFrame(columns=names)
return desc
def pd_update(desc, charge_descript):
"""adds a list of charge descriptors to a pandas dataframe
desc = dataframe from pd_create
charge_descript = descriptor dictionaries
Output:
pandas dataframe with a row of descriptors appended on"""
# check if the inputs have the right Type
assert isinstance(
desc, pd.core.frame.DataFrame), "This input must be a pandas dataframe"
assert isinstance(
charge_descript, dict), "Stop right there, only dictionaries are allowed in these parts"
# converts the dictionary of descriptors into a list of descriptors
desc_ls = process.dict_2_list(charge_descript)
# adds zeros to the end of each descriptor list to create
# a list with 22 entries
# also appends error parameters to the end of the descriptor list
desc_app = desc_ls + \
np.zeros(19-len(desc_ls)).tolist() + charge_descript['errorParams']
# generates a dataframe of descriptors
desc_df = pd.DataFrame([desc_app], columns=desc.columns)
# combines row of a dataframe with previous dataframe
desc = | pd.concat([desc, desc_df], ignore_index=True) | pandas.concat |
import json
from datetime import datetime
import pandas as pd
def get_forecast():
first_row = True
nrow = 0
with open('data/tmp/forecast_weather.json') as f:
for line in f:
print(nrow)
nrow += 1
res = json.loads(line)
timezone_offset = res['city']['timezone']
first_line = True
for forecast in res['list']:
if first_line:
forecast_time = forecast['dt'] + timezone_offset
first_line = False
dt = forecast['dt'] + timezone_offset
if first_row:
data = pd.DataFrame(forecast['main'],
index=[datetime.utcfromtimestamp(forecast_time).strftime('%Y-%m-%d %H:%M:%S')])
data['dt'] = datetime.utcfromtimestamp(dt).strftime('%Y-%m-%d %H:%M:%S')
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# Input arguments flag
import sys
sys.path.append('..')
_, *flag = sys.argv
# Parse arguments
import argparse
parser = argparse.ArgumentParser(prog='hs_check', description='Check phase synchronization for selected BPMs and plane.')
parser.add_argument('-p', '--plane', choices=('x', 'y'), help='data plane', default='x')
parser.add_argument('-l', '--length', type=int, help='number of turns to use', default=256)
select = parser.add_mutually_exclusive_group()
select.add_argument('--skip', metavar='BPM', nargs='+', help='space separated list of valid BPM names to skip')
select.add_argument('--only', metavar='BPM', nargs='+', help='space separated list of valid BPM names to use')
parser.add_argument('-o', '--offset', type=int, help='rise offset for all BPMs', default=0)
parser.add_argument('-r', '--rise', action='store_true', help='flag to use rise data (drop first turns)')
transform = parser.add_mutually_exclusive_group()
transform.add_argument('--mean', action='store_true', help='flag to remove mean')
transform.add_argument('--median', action='store_true', help='flag to remove median')
transform.add_argument('--normalize', action='store_true', help='flag to normalize data')
parser.add_argument('-f', '--filter', choices=('none', 'svd', 'hankel'), help='filter type', default='none')
parser.add_argument('--rank', type=int, help='rank to use for svd & hankel filter', default=8)
parser.add_argument('--type', choices=('full', 'randomized'), help='svd computation type for hankel filter', default='randomized')
parser.add_argument('--buffer', type=int, help='buffer size to use for randomized hankel filter', default=16)
parser.add_argument('--count', type=int, help='number of iterations to use for randomized hankel filter', default=16)
parser.add_argument('-w', '--window', type=float, help='window order', default=0.0)
parser.add_argument('--factor', type=float, help='threshold factor', default=5.0)
parser.add_argument('--plot', action='store_true', help='flag to plot data')
parser.add_argument('-H', '--harmonica', action='store_true', help='flag to use harmonica PV names for input')
parser.add_argument('--device', choices=('cpu', 'cuda'), help='data device', default='cpu')
parser.add_argument('--dtype', choices=('float32', 'float64'), help='data type', default='float64')
args = parser.parse_args(args=None if flag else ['--help'])
# Import
import epics
import pandas
import torch
from datetime import datetime
from harmonica.util import LIMIT, pv_make
from harmonica.window import Window
from harmonica.data import Data
from harmonica.filter import Filter
from harmonica.decomposition import Decomposition
# Time
TIME = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
# Check and set device & data type
dtype = {'float32': torch.float32, 'float64': torch.float64}[args.dtype]
device = args.device
if device == 'cuda' and not torch.cuda.is_available():
exit(f'error: CUDA is not avalible')
# Set data plane
plane = args.plane.upper()
# Load monitor data
name = epics.caget('H:MONITOR:LIST')[:epics.caget('H:MONITOR:COUNT')]
flag = epics.caget_many([f'H:{name}:FLAG' for name in name])
rise = epics.caget_many([f'H:{name}:RISE' for name in name])
# Set BPM data
bpm = {name: rise for name, flag, rise in zip(name, flag, rise) if flag == 1}
# Check & remove skipped
if args.skip:
for name in (name.upper() for name in args.skip):
if not name in bpm:
exit(f'error: {name} is not a valid BPM to skip')
bpm.pop(name)
# Check & keep selected
if args.only:
for name in (name.upper() for name in args.only):
if not name in bpm:
exit(f'error: {name} is not a valid BPM to read')
for name in bpm.copy():
if not name in (name.upper() for name in args.only):
bpm.pop(name)
# Check BPM list
if not bpm:
exit(f'error: BPM list is empty')
# Set model phase
PHASE = torch.tensor(epics.caget_many([f'H:{name}:MODEL:F{plane}' for name in bpm]), dtype=dtype)
# Set tunes
q = epics.caget(f'H:FREQUENCY:VALUE:{plane}')
Q = epics.caget(f'H:FREQUENCY:MODEL:{plane}')
# Generate PV names
pv_list = [pv_make(name, args.plane, args.harmonica) for name in bpm]
pv_rise = [*bpm.values()]
# Check length
length = args.length
if length < 0 or length > LIMIT:
exit(f'error: {length=}, expected a positive value less than {LIMIT=}')
# Check offset
offset = args.offset
if offset < 0:
exit(f'error: {offset=}, expected a positive value')
if length + offset > LIMIT:
exit(f'error: sum of {length=} and {offset=}, expected to be less than {LIMIT=}')
# Check rise
if args.rise:
rise = min(pv_rise)
if rise < 0:
exit(f'error: rise values are expected to be positive')
rise = max(pv_rise)
if length + offset + rise > LIMIT:
exit(f'error: sum of {length=}, {offset=} and max {rise=}, expected to be less than {LIMIT=}')
else:
rise = 0
# Check window order
if args.window < 0.0:
exit(f'error: window order {args.window} should be greater or equal to zero')
# Load TbT data
size = len(bpm)
count = length + offset + rise
win = Window(length, 'cosine_window', args.window, dtype=dtype, device=device)
tbt = Data.from_epics(win, pv_list, pv_rise if args.rise else None, shift=offset, count=count)
# Remove mean
if args.mean:
tbt.window_remove_mean()
# Remove median
if args.median:
tbt.work.sub_(tbt.median())
# Normalize
if args.normalize:
tbt.normalize()
# Filter (svd)
if args.filter == 'svd':
flt = Filter(tbt)
flt.filter_svd(rank=args.rank)
# Filter (hankel)
if args.filter == 'hankel':
flt = Filter(tbt)
flt.filter_svd(rank=args.rank)
flt.filter_hankel(rank=args.rank, random=args.type == 'randomized', buffer=args.buffer, count=args.count)
# Estimate phase
dec = Decomposition(tbt)
phase, _, _ = dec.harmonic_phase(q, length=args.length, order=args.window, factor=args.factor)
# Check
check, table= Decomposition.phase_check(q, Q, phase, PHASE, factor=args.factor)
# Print result
for marked in check:
index, value = check[marked]
if index != 0:
print(marked, [*bpm][marked], check[marked])
# Plot
if args.plot:
from plotly.express import scatter
flag = [-1 if key not in check else check[key][0]/2 - 1 for key in range(len(bpm))]
df = | pandas.DataFrame() | pandas.DataFrame |
"""Utilities for parsing corpus tsv files into pandas DataFrames."""
import logging
from glob import glob
from pathlib import Path
from typing import Dict, Iterable, Union
import pandas as pd
from tqdm import tqdm
import harmonic_inference.utils.corpus_utils as cu
from harmonic_inference.data.corpus_constants import (
CHORD_ONSET_BEAT,
CONVERTERS,
DTYPES,
MEASURE_OFFSET,
NOTE_ONSET_BEAT,
)
def read_dump(
file: str,
index_col: Union[int, Iterable] = (0, 1),
converters: Dict = None,
dtypes: Dict = None,
**kwargs,
) -> pd.DataFrame:
"""
Read a corpus tsv file into a pandas DataFrame.
Parameters
----------
file : string
The tsv file to parse.
index_col : int or list(int)
The index (or indices) of column(s) to use as the index. For files.tsv, use 0.
converters : dict
Converters which will be passed to the pandas read_csv function. These will
overwrite/be added to the default list of CONVERTERS.
dtypes : dict
Dtypes which will be passed to the pandas read_csv function. These will
overwrite/be added to the default list of DTYPES.
Returns
-------
df : pd.DataFrame
The pandas DataFrame, parsed from the given tsv file.
"""
conv = CONVERTERS.copy()
types = DTYPES.copy()
if dtypes is not None:
types.update(dtypes)
if converters is not None:
conv.update(converters)
return pd.read_csv(file, sep="\t", index_col=index_col, dtype=types, converters=conv, **kwargs)
def load_clean_corpus_dfs(dir_path: Union[str, Path], count: int = None):
"""
Return cleaned DataFrames from the corpus data in the given directory. The DataFrames will
be read from files: 'files.tsv', 'measures.tsv', 'chords.tsv', and 'notes.df'.
They will undergo the following cleaning procedure:
1. Remove repeats from measures.
2. Drop note and chords corresponding to removed measures.
3. Drop chords with numeral '@none' or pd.NAN.
4. Add offsets to notes.
5. Merge tied notes.
6. Add offsets to chords.
Parameters
----------
dir_path : str or Path
The path to a directory containing the files: 'files.tsv', 'measures.tsv', 'chords.tsv',
and 'notes.df'.
count : int
If given, the number of pieces to read in. Else, read all of them.
Returns
-------
files_df : pd.DataFrame
The files data frame.
measures_df : pd.DataFrame
The measures data frame with repeats removed.
chords_df : pd.DataFrame
The chords data frame, cleaned as described.
notes_df : pd.DataFrame
The notes data frame, cleaned as described.
"""
files_df = read_dump(Path(dir_path, "files.tsv"), index_col=0)
measures_df = read_dump(Path(dir_path, "measures.tsv"))
notes_df = read_dump(Path(dir_path, "notes.tsv"))
try:
chords_df = read_dump(Path(dir_path, "chords.tsv"), low_memory=False)
except Exception:
# Enable loading with no annotations
chords_df = None
if count is not None:
files_df = files_df.iloc[:count]
measures_df = measures_df.loc[files_df.index]
notes_df = notes_df.loc[files_df.index]
if chords_df is not None:
chords_df = chords_df.loc[files_df.index]
# Bugfix for Couperin piece "next" error
files_df = files_df.loc[~(files_df["file_name"] == "c11n08_Rondeau.tsv")]
measures_df = measures_df.loc[files_df.index]
notes_df = notes_df.loc[files_df.index]
if chords_df is not None:
chords_df = chords_df.loc[chords_df.index.get_level_values(0).isin(files_df.index)]
# End bugfix
# Incomplete column renaming
if "offset" in measures_df.columns:
measures_df[MEASURE_OFFSET].fillna(measures_df["offset"], inplace=True)
measures_df = measures_df.drop("offset", axis=1)
if chords_df is not None:
if "onset" in chords_df.columns:
chords_df[CHORD_ONSET_BEAT].fillna(chords_df["onset"], inplace=True)
chords_df = chords_df.drop("onset", axis=1)
if "onset" in notes_df.columns:
notes_df[NOTE_ONSET_BEAT].fillna(notes_df["onset"], inplace=True)
notes_df = notes_df.drop("onset", axis=1)
# Remove measure repeats
if isinstance(measures_df.iloc[0].next, tuple):
measures_df = cu.remove_repeats(measures_df)
# Remove unmatched
notes_df = cu.remove_unmatched(notes_df, measures_df)
if chords_df is not None:
chords_df = cu.remove_unmatched(chords_df, measures_df)
chords_df = chords_df.drop(
chords_df.loc[(chords_df.numeral == "@none") | chords_df.numeral.isnull()].index
)
# Remove notes with invalid onset times
note_measures = pd.merge(
notes_df.reset_index(),
measures_df.reset_index(),
how="left",
on=["file_id", "mc"],
)
valid_onsets = (note_measures[MEASURE_OFFSET] <= note_measures[NOTE_ONSET_BEAT]) & (
note_measures[NOTE_ONSET_BEAT] < note_measures["act_dur"] + note_measures[MEASURE_OFFSET]
)
if not valid_onsets.all():
with pd.option_context("display.max_rows", None, "display.max_columns", None):
invalid_string = note_measures.loc[
~valid_onsets,
["file_id", "note_id", "mc", NOTE_ONSET_BEAT, MEASURE_OFFSET, "act_dur"],
]
logging.debug(
f"{(~valid_onsets).sum()} notes have invalid onset times:\n{invalid_string}"
)
notes_df = notes_df.loc[valid_onsets.values]
# Remove chords with invalid onset times
if chords_df is not None:
chord_measures = pd.merge(
chords_df.reset_index(),
measures_df.reset_index(),
how="left",
on=["file_id", "mc"],
)
valid_onsets = (chord_measures[MEASURE_OFFSET] <= chord_measures[CHORD_ONSET_BEAT]) & (
chord_measures[CHORD_ONSET_BEAT]
< chord_measures["act_dur"] + chord_measures[MEASURE_OFFSET]
)
if not valid_onsets.all():
with pd.option_context("display.max_rows", None, "display.max_columns", None):
invalid_string = chord_measures.loc[
~valid_onsets,
["file_id", "chord_id", "mc", CHORD_ONSET_BEAT, MEASURE_OFFSET, "act_dur"],
]
logging.debug(
f"{(~valid_onsets).sum()} chords have invalid onset times:\n{invalid_string}"
)
chords_df = chords_df.loc[valid_onsets.values]
# Add offsets
if not all([column in notes_df.columns for column in ["offset_beat", "offset_mc"]]):
notes_df = cu.add_note_offsets(notes_df, measures_df)
# Merge ties
notes_df = cu.merge_ties(notes_df)
# Add chord metrical info
if chords_df is not None:
chords_df = cu.add_chord_metrical_data(chords_df, measures_df)
# Remove chords with dur 0
if chords_df is not None:
invalid_dur = chords_df["duration"] <= 0
if invalid_dur.any():
with pd.option_context("display.max_rows", None, "display.max_columns", None):
invalid_string = chords_df.loc[
invalid_dur,
["mc", CHORD_ONSET_BEAT, "mc_next", f"{CHORD_ONSET_BEAT}_next", "duration"],
]
logging.debug(
f"{(invalid_dur).sum()} chords have invalid durations:\n{invalid_string}"
)
chords_df = chords_df.loc[~invalid_dur].copy()
return files_df, measures_df, chords_df, notes_df
def aggregate_annotation_dfs(
annotations_path: Union[Path, str],
out_dir: Union[Path, str],
notes_only: bool = False,
):
"""
Aggregate all annotations from a corpus directory into 4 combined tsvs in an out directory.
The resulting tsv will be: files.tsv, chords.tsv, notes.tsv, and measures.tsv.
Parameters
----------
annotations_path : Union[Path, str]
The path of the corpus annotations. Annotations should lie in directories:
annotations_path/*/{harmonies/notes/measures}/*.tsv
out_dir : Union[Path, str]
The directory to write the output tsvs into.
notes_only : bool
If True, the resulting aggregation will include pieces without annotations,
but which have a notes tsv entry. If False, only those with annotations (a harmonies
directory) will be included.
"""
if isinstance(annotations_path, str):
annotations_path = Path(annotations_path)
if isinstance(out_dir, str):
out_dir = Path(out_dir)
files_dict = {"corpus_name": [], "file_name": []}
chord_df_list = []
note_df_list = []
measure_df_list = []
chord_indexes = []
note_indexes = []
measure_indexes = []
dir_name = "notes" if notes_only else "harmonies"
for file_string in tqdm(
glob(str(Path(annotations_path, f"**/{dir_name}/*.tsv")), recursive=True)
):
file_path = Path(file_string)
base_path = file_path.parent.parent
corpus_name = base_path.name
file_name = file_path.name
try:
chord_df = pd.read_csv(Path(base_path, "harmonies", file_name), dtype=str, sep="\t")
chord_indexes.append(len(files_dict["file_name"]))
except Exception:
logging.info("Error parsing harmonies for file %s", file_name)
chord_df = None
try:
note_df = pd.read_csv(Path(base_path, "notes", file_name), dtype=str, sep="\t")
note_indexes.append(len(files_dict["file_name"]))
except Exception:
logging.info("Error parsing notes for file %s", file_name)
note_df = None
try:
measure_df = pd.read_csv(Path(base_path, "measures", file_name), dtype=str, sep="\t")
measure_indexes.append(len(files_dict["file_name"]))
except Exception:
logging.info("Error parsing measures for file %s", file_name)
measure_df = None
files_dict["corpus_name"].append(corpus_name)
files_dict["file_name"].append(file_name)
if chord_df is not None:
chord_df_list.append(chord_df)
if note_df is not None:
note_df_list.append(note_df)
if measure_df is not None:
measure_df_list.append(measure_df)
if not files_dict["file_name"]:
logging.warning("No files found to aggregate.")
return
# Write out aggregated tsvs
if not out_dir.exists():
out_dir.mkdir(parents=True, exist_ok=True)
files_df = | pd.DataFrame(files_dict) | pandas.DataFrame |
import pysam
import pandas as pd
import numpy as np
import re
import os
import sys
import collections
import scipy
from scipy import stats
import statsmodels
from statsmodels.stats.multitest import fdrcorrection
try:
from . import global_para
except ImportError:
import global_para
try:
from .consensus_seq import *
except ImportError:
from consensus_seq import *
try:
from .math_stat import *
except ImportError:
from math_stat import *
def f_0cdna():
# if 0 cdna detected, report messages to users
global_para.logger.info("Program finished successfully")
global_para.logger.info("No cDNA detected. Exiting.")
exit(0)
def f_if_0cdna(obj):
if len(obj) == 0:
f_0cdna()
def f_warning_merge_region(df_region):
# df_region = df_region_stat_bed_merge.copy()
df_region['diff'] =abs( df_region.start - df_region.end)
df_region_diff = df_region[df_region['diff']>10000]
del df_region_diff['diff']
if len(df_region_diff)>0:
global_para.logger.warning("%d extreme long regions are detected (>10 kb), please check results carefully"%len(df_region_diff))
global_para.logger.info(df_region_diff)
def read_gene_model(gtf_gene_unique_file):
# load gene model into a dataframe
print('Loading gene model table')
dict_type = {
"seqname":"str",
"start":"int64",
"end":"int64",
"gene_id":"str",
"gene_name":"str",
"transcript_id":"str",
"exon_flank_start20":"str",
"exon_flank_end20":"str",
"is_exon_boundary_start":"str",
"is_exon_boundary_end":"str",
"exon_boundary_start_nearseq20":"str",
"exon_boundary_end_nearseq20":"str"}
df_gene_exon_unique = pd.read_csv(gtf_gene_unique_file, sep = '\t',header = 0)
df_gene_exon_unique = df_gene_exon_unique.astype(dict_type)
# convert all sequences to uppercase
df_gene_exon_unique['exon_flank_start20'] = df_gene_exon_unique['exon_flank_start20'].str.upper()
df_gene_exon_unique['exon_flank_end20'] = df_gene_exon_unique['exon_flank_end20'].str.upper()
df_gene_exon_unique['exon_boundary_start_nearseq20'] = df_gene_exon_unique['exon_boundary_start_nearseq20'].str.upper()
df_gene_exon_unique['exon_boundary_end_nearseq20'] = df_gene_exon_unique['exon_boundary_end_nearseq20'].str.upper()
df_gene_exon_unique = df_gene_exon_unique.fillna('')
print('Loaded %d exons\n'%(len(df_gene_exon_unique)))
return(df_gene_exon_unique)
def check_bam_index(genome_bam_file):
## check index of bam file; if no, generate one.
print('Checking index of input bam file')
if os.path.exists(genome_bam_file + '.bai') or os.path.exists(re.sub('bam$','bai',genome_bam_file)):
print('Index file exists')
else:
print('file is not indexed, now generating index')
pysam.index(genome_bam_file)
print('Index file created\n')
return
def f_overlap_reference(genome_bam_file,df_gene_exon):
# overlap reference for input bam and gene model
bam_genome = pysam.AlignmentFile(genome_bam_file,'rb')
reference_bam = bam_genome.references
bam_genome.close()
reference_exon = df_gene_exon.seqname.unique().tolist()
overlap_reference = [x for x in reference_bam if x in reference_exon]
if len(overlap_reference)==0: global_para.logger.error('chromosome names are not matched between gene model and bam file'); exit(1)
df_gene_exon = df_gene_exon.query('seqname in @overlap_reference')
return df_gene_exon
def f_close_exon_merge(df_transcript_exon):
df_transcript_exon = df_transcript_exon.sort_values(['transcript_id','start'])
df_transcript_exon = df_transcript_exon.reset_index(drop = True)
df_transcript_exon['start_next'] = df_transcript_exon.groupby(['transcript_id'])['start'].shift(-1)
df_transcript_exon['dis_exon'] = abs(df_transcript_exon['end'] - df_transcript_exon['start_next'])
df_transcript_exon_close = df_transcript_exon.query('dis_exon<@global_para.exon_distance')
list_transcript = df_transcript_exon_close.transcript_id.unique().tolist()
if len(list_transcript) >0:
list_df_transcript_merge = []
for transcript_id in list_transcript:
sub_df = df_transcript_exon.query('transcript_id==@transcript_id')
sub_df_new = f_df_1transcript_merge(sub_df)
list_df_transcript_merge.append(sub_df_new)
df_transcript_exon_close_new = | pd.concat(list_df_transcript_merge) | pandas.concat |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.