prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from pathlib import Path
import pytest
from pytorch_quik import arg
from pytorch_quik.mlflow import QuikMlflow
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import pandas as pd
import numpy as np
import torch
import json
from os import getenv
import warnings
from collections import OrderedDict
import sys
# bd = Path("/workspaces/rdp-vscode-devcontainer/pytorch-quik")
# TESTDIR = bd.joinpath("pytorch_quik", "tests")
TESTDIR = Path(__file__).parent
SAMPLE = TESTDIR.joinpath("sample_data.json")
FINAL = TESTDIR.joinpath("final_data.json")
ENCODING = TESTDIR.joinpath("sample_encoding.pt")
AMASK = TESTDIR.joinpath("sample_amask.pt")
TRACKING_URI = getenv("TRACKING_URI", "https://localhost:5000")
ENDPOINT_URL = getenv("ENDPOINT_URL", None)
MLUSER = getenv("MLUSER", None)
IS_CI = getenv("CI", "false")
def pytest_collection_modifyitems(items):
skipif_mlflow = pytest.mark.skipif(
IS_CI == "true", reason="no mlflow server access"
)
skipif_mlflow_partial = pytest.mark.skipif(
IS_CI == "true", reason="no mlflow server access"
)
skipif_gpus = pytest.mark.skipif(
IS_CI == "true", reason="no GPU for test version"
)
for item in items:
if "skip_mlflow" in item.keywords:
item.add_marker(skipif_mlflow)
# [True- is when test_mlflow = True
if "skip_mlflow_partial" in item.keywords and "[True-" in item.name:
item.add_marker(skipif_mlflow_partial)
# -0] is when gpu = 0
if "skip_gpus" in item.keywords and "0]" in item.name:
item.add_marker(skipif_gpus)
@pytest.fixture(params=[None, 0])
def gpu(request):
return request.param
@pytest.fixture(params=[True, False])
def test_mlflow(request):
return request.param
@pytest.fixture
def clean_run():
def clean_run_function(mlf, gpu):
mlf.client.delete_run(mlf.runid)
if gpu == 0:
mlf.client.delete_experiment(mlf.expid)
return clean_run_function
@pytest.fixture
def create_qml():
def create_qml_function(args):
args.experiment = "pytest"
mlf = QuikMlflow(args)
exp = mlf.client.get_experiment(mlf.expid)
if exp.lifecycle_stage == "deleted":
mlf.client.restore_experiment(mlf.expid)
return mlf
return create_qml_function
@pytest.fixture
def args(gpu):
"""sample args namespace"""
sys.argv = [""]
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
MLKWARGS = {
"user": MLUSER,
"tracking_uri": TRACKING_URI,
"endpoint_url": ENDPOINT_URL,
}
parser = arg.add_ddp_args(parser)
parser = arg.add_learn_args(parser)
parser = arg.add_mlflow_args(parser, kwargs=MLKWARGS)
parser = arg.add_ray_tune_args(parser)
args = parser.parse_args()
args.mixed_precision = False
args.bert_type = "roberta"
args.data_date = 20210101
args.gpu = gpu
if args.gpu is not None:
args.has_gpu = True
args.gpus = 1
else:
args.has_gpu = False
args.gpus = 0
warnings.warn(
"GPU not found, setting has_gpu to False. \
Some tests will be skipped"
)
args.num_workers = 2
args.use_init_group = True
args.use_mlflow = False
args.use_ray_tune = False
args.sched_kwargs = {
"num_warmup_steps": 10,
}
return args
@pytest.fixture
def test_file(args):
fn = f"test_tensor_{args.data_date}.pt"
fn = Path(__file__).parent.joinpath("data", args.bert_type, fn)
return fn
@pytest.fixture(scope="session")
def senti_classes():
"""sentiment classes"""
dir_classes = OrderedDict(
[(0, "Negative"), (1, "Neutral"), (2, "Positive")]
)
return dir_classes
@pytest.fixture(scope="session")
def two_classes():
"""sentiment classes"""
two_classes = OrderedDict([(0, "Negative"), (1, "Positive")])
return two_classes
@pytest.fixture(scope="session")
def inv_senti_classes():
"""inverse sentiment classes"""
inv_classes = OrderedDict(
[("Negative", 0), ("Neutral", 1), ("Positive", 2)]
)
return inv_classes
@pytest.fixture(scope="session")
def sample_tensor():
"""sample tensor"""
torch.manual_seed(0)
return torch.rand(200, 64)
@pytest.fixture(scope="session")
def sample_preds():
"""labels tensor"""
torch.manual_seed(0)
p = torch.rand(200, 3)
# https://stackoverflow.com/questions/59090533/how-do-i-add-some-gaussian
# -noise-to-a-tensor-in-pytorch
noise = torch.zeros(200, 3, dtype=torch.float64)
noise = noise + (0.1 ** 0.9) * torch.randn(200, 3)
return p + noise
@pytest.fixture(scope="session")
def sample_tds(sample_tensor, sample_preds):
"""sample tensor dataset"""
return torch.utils.data.TensorDataset(
sample_tensor, sample_tensor, sample_preds
)
@pytest.fixture(scope="session")
def sample_labels():
"""labels tensor"""
torch.manual_seed(0)
labels = torch.rand(200, 3)
labels = np.argmax(labels, axis=1).flatten()
# torch.manual_seed(0)
# return (torch.rand(200) < 0.5).int()
return labels
@pytest.fixture(scope="session")
def eight_labels():
"""labels list for 9 categories"""
labels = [0, 0, 0, 0, 1, 1, 1, 1]
return labels
@pytest.fixture(scope="session")
def sample_data():
"""sample user/item dataset"""
with open(SAMPLE) as f:
df = pd.DataFrame(json.load(f))
return df
@pytest.fixture(scope="session")
def sample_encoding():
"""sample encoded tensors from tokenizer"""
enc = {
"input_ids": torch.load(ENCODING),
"attention_mask": torch.load(AMASK),
}
return enc
@pytest.fixture(scope="session")
def sample_ins(sample_encoding):
"""sample encoded tensor input_ids from tokenizer"""
ins = sample_encoding["input_ids"]
return ins
@pytest.fixture(scope="session")
def sample_amask(sample_encoding):
"""sample encoded tensor attention_masks from tokenizer"""
amask = sample_encoding["attention_mask"]
return amask
@pytest.fixture(scope="session")
def final_data():
"""final user/item dataset"""
with open(FINAL) as f:
df = pd.DataFrame(json.load(f))
df.index.names = ["ui_index"]
return df
@pytest.fixture(scope="session")
def tens_labels(final_data):
"""sample tensor labels from final user/item dataset"""
tens = torch.LongTensor(final_data.label.values)
return tens
@pytest.fixture(scope="session")
def batch(sample_ins, sample_amask, tens_labels):
"""sample batch for model training"""
return [sample_ins, sample_amask, tens_labels]
@pytest.fixture(scope="session")
def final_cmdf(senti_classes):
"""final confusion matrix data frame"""
arr = np.array([[63, 8, 7], [6, 48, 4], [2, 8, 54]])
classes = senti_classes.values()
aidx = | pd.MultiIndex.from_product([["Actual"], classes]) | pandas.MultiIndex.from_product |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
''''
def TSNE_(data_zs):
from sklearn.manifold import TSNE
tsne = TSNE()
tsne.fit_transform(data_zs) # 进行数据降维
tsne = pd.DataFrame(tsne.embedding_, index=data_zs.index) # 转换数据格式
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
d = tsne[data_zs[u'聚类类别'] == 0]
plt.plot(d[0], d[1], 'r.')
d = tsne[data_zs[u'聚类类别'] == 1]
plt.plot(d[0], d[1], 'go')
d = tsne[data_zs[u'聚类类别'] == 2]
plt.plot(d[0], d[1], 'b*')
plt.show()
'''
def draw_silhouette_score(inputfile,n):
from sklearn.metrics import silhouette_score
from sklearn.cluster import KMeans
X = pd.read_csv(inputfile)
from sklearn.preprocessing import StandardScaler
X = StandardScaler().fit_transform(X)
Scores = [] # 存放轮廓系数
for k in range(2,n):
estimator = KMeans(n_clusters=k) # 构造聚类器
estimator.fit(X)
Scores.append(silhouette_score(X, estimator.labels_, metric='euclidean'))
xxx = range(2,n)
plt.xlabel('k')
plt.ylabel('Silhouette Coefficient')
plt.plot(xxx, Scores, 'o-')
plt.show()
return True
def draw_(inputfile,k):
from sklearn.cluster import KMeans
X=pd.read_csv(inputfile)
from sklearn.preprocessing import StandardScaler
X = StandardScaler().fit_transform(X)
kmeans = KMeans(n_clusters=k)
X1 = np.array(X)
kmeans.fit(X1)
print(kmeans.labels_) # 类别
print(kmeans.cluster_centers_) # 聚类中心
markers = [['*', 'b'], ['o', 'r'],['.','c'],[',','g'],['^','y'],['<','m'],['>','g'],['s','b'],['h','r'],['+','c'],['d','r']]
for i in range(k):
members = kmeans.labels_ == i # members是布尔数组
plt.scatter(X1[members, 0], X1[members, 1], s=60, marker=markers[i][0], c=markers[i][1], alpha=0.5)
plt.title('KmeansClustering')
plt.show()
def Corr(inputfile):
import seaborn as sns
data=pd.read_csv(inputfile)
print("相关性:")
print(data.corr().round(2).T)
print("相关性热力图:")
print(sns.heatmap(data.corr()))
print("分层相关性热力图:")
print(sns.clustermap(data.corr()))
return True
def Replace_Data(inputfile):
data=pd.read_csv(inputfile)
data['Month'].replace('Jan',1,inplace=True)
data['Month'].replace('Feb', 2, inplace=True)
data['Month'].replace('Mar', 3, inplace=True)
data['Month'].replace('Apr', 4, inplace=True)
data['Month'].replace('May', 5, inplace=True)
data['Month'].replace('June', 6, inplace=True)
data['Month'].replace('Jul', 7, inplace=True)
data['Month'].replace('Aug', 8, inplace=True)
data['Month'].replace('Sep', 9, inplace=True)
data['Month'].replace('Oct', 10, inplace=True)
data['Month'].replace('Nov', 11, inplace=True)
data['Month'].replace('Dec', 12, inplace=True)
data['VisitorType'].replace('Returning_Visitor',1,inplace=True)
data['VisitorType'].replace('Other', 2, inplace=True)
data['VisitorType'].replace('New_Visitor', 3, inplace=True)
data['VisitorType'].replace('New_Visitor', 3, inplace=True)
data['Weekend'].replace(False, 0, inplace=True)
data['Weekend'].replace(True, 1, inplace=True)
return data
def K_Means_(inputfile,n):
from sklearn.cluster import KMeans
from matplotlib.font_manager import FontProperties
K = range(1, n)
X=pd.read_csv(inputfile)
from sklearn.preprocessing import StandardScaler
X = StandardScaler().fit_transform(X)
mean_distortions = []
for k in K:
kmeans = KMeans(n_clusters=k)
kmeans.fit(X)
r1 = pd.Series(kmeans.labels_).value_counts() # 统计各个类别的数目
r2 = pd.DataFrame(kmeans.cluster_centers_) # 找出聚类中心
r = pd.concat([r2, r1], axis=1) # 横向连接,0是纵向
print(r)
mean_distortions.append(kmeans.inertia_)
plt.plot(K, mean_distortions, 'bx-')
plt.xlabel('k')
font = FontProperties(size=20)
plt.ylabel(u'平均畸变程度', fontproperties=font)
plt.title(u'用肘部法确定最佳的K值', fontproperties=font)
plt.show()
draw_silhouette_score(inputfile,n)
return True
def Agglo(inputfile,n):
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import MinMaxScaler
# 读取数据
data = pd.read_csv(inputfile)
df = MinMaxScaler().fit_transform(data)
# 建立模型
model = AgglomerativeClustering(n_clusters=n)
model.fit(df)
data['类别标签'] = model.labels_
print(data.head())
# 画图
ss = sch.linkage(df, method='ward')
sch.dendrogram(ss)
plt.show()
# %%
return True
def DBS_(inputfile):
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
# 初始化样本数据
data = | pd.read_csv(inputfile) | pandas.read_csv |
#!/usr/bin/env python
# encoding: utf-8
'''
cn.py
Created by <NAME>
on 2019-10-10.
Copyright (c) 2019 All rights reserved.
'''
import pandas as pd
import numpy as np
import dask.dataframe as dd
import argparse
import sys
import os
import biomarker_survival as surv
from . import ZscoreCommon
from intervaltree import IntervalTree
def get_options(argv):
parser = argparse.ArgumentParser(description='Get CN file, clinical file, optional output dir')
parser.add_argument('-d', action='store', dest='cn')
parser.add_argument('-k', action='store', dest='annotation_file')
parser.add_argument('-c', action='store', dest='tcga_cdr')
parser.add_argument('-p', action='store', dest='parallel', type=int)
parser.add_argument('-o', action='store', dest='output_directory', default='.')
parser.add_argument('--no_zscores', dest='zscores', action='store_false')
parser.add_argument('--no_from_raw', dest='from_raw', action='store_false')
parser.set_defaults(zscores=True, from_raw=True, parallel=1)
ns = parser.parse_args()
return ns.cn, ns.annotation_file, ns.tcga_cdr, ns.output_directory, ns.parallel, ns.zscores, ns.from_raw
def ctype_cleaning(df, ctype, ctype_clinical): #ctype_clinical is unused
df = df.reset_index()
df = surv.maybe_clear_non_01s(df, 'index', ctype)
df = surv.add_identifier_column(df, 'index')
if ctype == 'LAML':
PRIMARY_BLOOD_DERIVED_CANCER_PATIENT_ID_REGEX = '^.{4}-.{2}-.{4}-03.*'
non_primary_blood_barcodes = df[~df['index'].str.contains(PRIMARY_BLOOD_DERIVED_CANCER_PATIENT_ID_REGEX)]['index']
df = df.drop(non_primary_blood_barcodes.index)
df.set_index('identifier', inplace=True, drop=True)
df = df.drop('index', axis=1)
return df
def prep_annotation_file(annotation_path):
gtf = pd.read_csv(annotation_path, sep='\t', header=None, comment="#")
gtf.columns = ['chr', 'annotation_src', 'feature_type', 'start', 'end', 'score', 'genomic_strand', 'genomic_phase', 'extra']
gtf['gene_name'] = gtf['extra'].str.extract(pat='gene_name "(.*?)";')
gtf['transcript_type'] = gtf['extra'].str.extract(pat='transcript_type "(.*?)";')
gtf = gtf[gtf['feature_type'] == 'transcript']
gtf = gtf.groupby('gene_name').apply(lambda x: x[x['start'] == x['start'].min()].iloc[0])
return gtf[['chr', 'start', 'end', 'feature_type', 'transcript_type']]
# Takes a raw segment file, returns a dictionary of lists of interval trees.
# Dictionary is keyed by patient id
# List is "chromosome copy number intervals". List is 24 long, has one slot for each chromosome
# Interval tree has copy number value at each interval on a particular chromosome, for each patient.
def prep_interval_trees(cn_data):
patient_data = {}
for index, row in cn_data.iterrows():
chromosome = int(row['Chromosome'])
# Add the new patient to the data dict, initializing the chromosome list
if not index in patient_data:
# note the length of the list is 24, so we can use chromosome number (and not worry about index 0)
patient_data[index] = [0] * 24
# print('New id: ', index)
# Initialize the interval tree for the new chromosome for this patient
if patient_data[index][chromosome] == 0:
patient_data[index][chromosome] = IntervalTree()
# Add the range and copy number in this row to the correct patient_data/chromosome location
# Note the interval tree implementation uses half close intervals, but copy number data
# uses closed intervals, so we add 1 to the end to ensure our intervaltree matches the data.
start, end, copy_number = row['Start'], row['End'], row['Segment_Mean']
patient_data[index][chromosome][start:end+1] = copy_number
return patient_data
def get_copy_number_for_gene_start(chr_cn, annotation_start, patient, gene):
if chr_cn == 0:
return np.nan
intervals = chr_cn[annotation_start]
if len(intervals) > 1:
print('Err: more than one interval found for patient:', patient, 'for gene:', gene)
print('Got:', intervals)
return np.nan
if len(intervals) == 0:
# print('Err: no interval found for patient:', patient, 'for gene:', gene)
return np.nan
else:
interval = intervals.pop()
return interval.data
def parse_chrom(chrom):
if not 'chr' in chrom:
return None
if chrom[3:] in ['X', 'Y']:
return 23
elif chrom[3:] in ['M']:
return 0
else:
return int(chrom[3:])
# Extra data = the annotation file
def prep_data(cn_path, extra_data=None, parallel=1, outdir=None, **kwargs):
annotations = prep_annotation_file(extra_data)
print('annotations prepped')
cn_data = pd.read_csv(cn_path, sep='\t', header=0,
na_values='???', index_col=0)
cn_interval_trees = prep_interval_trees(cn_data)
print('interval trees created')
ddata = dd.from_pandas(annotations, npartitions=parallel, sort=False)
patients = sorted(tuple(cn_interval_trees.keys()))
cols = np.append(patients, ['chr', 'start'])
meta_df = pd.DataFrame(index=annotations.index, columns=cols).astype(float)
def make_cn_row(annotation_row):
chrom = parse_chrom(annotation_row['chr'])
cn_row = | pd.Series(index=patients, name=annotation_row.name) | pandas.Series |
'''
...
'''
import os
import numpy as np
import pandas as pd
import datetime as dt
from tqdm import tqdm
import lib.utils as utils
import lib.db_utils as dutils
from datetime import timedelta
from collections import defaultdict
from dateutil.relativedelta import relativedelta
class DefineCohortSettings:
def __init__(self, vacineja2plus_df, init_cohort, final_cohort):
'''
Description.
Args:
vacineja2plus_df:
'''
self.vacineja2plus_df = vacineja2plus_df.copy()
self.init_cohort = init_cohort
self.final_cohort = final_cohort
def define_eligibility(self, partial=14, fully=14, return_=True):
'''
'''
subset = ["DATA D1(VACINADOS)", "DATA D2(VACINADOS)"]
self.vacineja2plus_df["VACINA STATUS - COORTE"] = self.vacineja2plus_df[subset].apply(lambda x: f_when_vaccine(x, self.init_cohort, self.final_cohort), axis=1)
self.vacineja2plus_df["IMUNIZACAO MAXIMA ATE FIM DA COORTE"] = self.vacineja2plus_df[subset].apply(lambda x: f_immunization(x, self.init_cohort, self.final_cohort, partial, fully), axis=1)
# --> Eligibility by tests
subset = ["DATA SOLICITACAO(TESTES)", "DATA COLETA(TESTES)", "RESULTADO FINAL GAL-INTEGRASUS"]
self.vacineja2plus_df["ELIGIBILIDADE TESTE"] = self.vacineja2plus_df[subset].apply(lambda x: f_eligible_test(x, self.init_cohort, self.final_cohort), axis=1)
subset = "IMUNIZACAO MAXIMA ATE FIM DA COORTE"
aptos = ["NAO VACINADO", "PARCIALMENTE IMUNIZADO", "TOTALMENTE IMUNIZADO", "VACINADO SEM IMUNIZACAO"]
self.vacineja2plus_df["ELIGIBILIDADE COORTE GERAL"] = self.vacineja2plus_df[subset].apply(lambda x: "APTO" if x in aptos else "NAO APTO")
# --> Eligibility for cases partial
self.vacineja2plus_df["ELIGIBILIDADE EXPOSTO PARCIAL"] = self.vacineja2plus_df[subset].apply(lambda x: "APTO" if x=="PARCIALMENTE IMUNIZADO" else "NAO APTO")
# --> Eligibility for cases fully
self.vacineja2plus_df["ELIGIBILIDADE EXPOSTO TOTAL"] = self.vacineja2plus_df[subset].apply(lambda x: "APTO" if x=="TOTALMENTE IMUNIZADO" else "NAO APTO")
# --> Create column with age based on the final of cohort.
self.vacineja2plus_df["IDADE"] = self.vacineja2plus_df["DATA NASCIMENTO(VACINEJA)"].apply(lambda x: relativedelta(self.final_cohort, x.date()).years)
self.vacineja2plus_df = self.vacineja2plus_df.drop_duplicates(subset=["CPF"], keep="first")
if return_:
return self.vacineja2plus_df
def dynamical_matching(self, vaccine="CORONAVAC", return_=True, verbose=False, age_thr=18, seed=0):
'''
Description.
Args:
return_:
Return:
'''
if "ELIGIBILIDADE TESTE" not in self.vacineja2plus_df.columns:
return -1
datelst = utils.generate_date_list(self.init_cohort, self.final_cohort)
# --> Apply essential filters
# First, consider only people with age older or equal to 18 years old.
df = self.vacineja2plus_df[self.vacineja2plus_df["IDADE"]>=age_thr]
df = df[df["OBITO INCONSISTENCIA"]!="S"]
df = df[df["DATA VACINA CONSISTENCIA"]!="N"]
# Filter by eligibility
df = df[(df["ELIGIBILIDADE TESTE"]=="APTO") & (df["ELIGIBILIDADE COORTE GERAL"]=="APTO")]
# Obtain set of vaccinated and unvaccinated.
df_vaccinated = df[df["VACINA(VACINADOS)"]==vaccine]
df_vaccinated = df_vaccinated.dropna(subset=["DATA D1(VACINADOS)"], axis=0)
df_unvaccinated = df[pd.isna(df["VACINA(VACINADOS)"])]
if verbose:
print(f"Dimensão de elegíveis após aplicacão das condições: {df.shape}")
print(f"Número restante de óbitos: {df['DATA OBITO'].notnull().sum()}")
print(f"Número restante de hospitalizados: {df['DATA HOSPITALIZACAO'].notnull().sum()}")
print(f"Número restante de testes: {df['DATA SOLICITACAO(TESTES)'].notnull().sum()}")
print(f"Número de vacinados elegíveis para {vaccine}: {df_vaccinated.shape[0]}")
#condition_exposed1 = df_vaccinated["ELIGIBILIDADE TESTE"]=="APTO"
#condition_exposed2 = df_vaccinated["ELIGIBILIDADE COORTE GERAL"]=="APTO"
#df_vaccinated = df_vaccinated[(condition_exposed1) & (condition_exposed2)]
#condition_unexposed1 = df_unvaccinated["ELIGIBILIDADE TESTE"]=="APTO"
#condition_unexposed2 = df_unvaccinated["ELIGIBILIDADE COORTE GERAL"]=="APTO"
#df_unvaccinated = df_unvaccinated[(condition_unexposed1) & (condition_unexposed2)]
# -- CREATE CONTROL RESERVOIR --
control_dates = {
"D1": defaultdict(lambda:-1),
"DEATH": defaultdict(lambda:-1),
"HOSPITAL": defaultdict(lambda:-1)
}
control_reservoir = defaultdict(lambda:[])
control_used = defaultdict(lambda: False)
df_join = pd.concat([df_vaccinated, df_unvaccinated])
print("Criando reservatório de controles ...")
for j in tqdm(range(0, df_join.shape[0])):
cpf = df_join["CPF"].iat[j]
age = df_join["IDADE"].iat[j]
sex = df_join["SEXO(VACINEJA)"].iat[j]
d1 = df_join["DATA D1(VACINADOS)"].iat[j]
dt_death = df_join["DATA OBITO"].iat[j]
dt_hospt = df_join["DATA HOSPITALIZACAO"].iat[j]
control_reservoir[(age,sex)].append(cpf)
if not pd.isna(d1):
control_dates["D1"][cpf] = d1.date()
if not pd.isna(dt_death):
control_dates["DEATH"][cpf] = dt_death.date()
if not pd.isna(dt_hospt):
control_dates["HOSPITAL"][cpf] = dt_hospt.date()
if seed!=0:
np.random.seed(seed)
for key in control_reservoir.keys():
np.random.shuffle(control_reservoir[key])
matchings = defaultdict(lambda:-1)
print("Executando pareamento ...")
for cur_date in tqdm(datelst):
# Select all people who was vaccinated at the current date
df_vaccinated["compare_date"] = df_vaccinated["DATA D1(VACINADOS)"].apply(lambda x: "TRUE" if x.date()==cur_date else "FALSE")
current_vaccinated = df_vaccinated[df_vaccinated["compare_date"]=="TRUE"]
#print(current_vaccinated.shape)
cpf_list = current_vaccinated["CPF"].tolist()
age_list = current_vaccinated["IDADE"].tolist()
sex_list = current_vaccinated["SEXO(VACINEJA)"].tolist()
date_list = current_vaccinated["DATA D1(VACINADOS)"].tolist()
# For each person vaccinated at the current date, check if there is a control for he/she.
for j in range(0, len(cpf_list)):
pair = find_pair(cur_date, age_list[j], sex_list[j], control_reservoir, control_used, control_dates)
if pair!=-1:
matchings[cpf_list[j]] = pair
items_matching = matchings.items()
pareados = pd.DataFrame({"CPF CASO": [ x[0] for x in items_matching ], "CPF CONTROLE": [ x[1] for x in items_matching ]})
events_df = self.get_intervals(pareados, df_vaccinated, df_unvaccinated)
matched = defaultdict(lambda:False)
for cpf in [ x[0] for x in items_matching ]+[ x[1] for x in items_matching ]:
matched[cpf]=True
df_join["PAREADO"] = df_join["CPF"].apply(lambda x: "SIM" if matched[x] else "NAO")
return events_df, df_join
def get_intervals(self, df_pairs, df_vac, df_unvac):
'''
Description.
Args:
df_pairs:
df_vac:
df_unvac:
'''
pareado = defaultdict(lambda: False)
matched_cpfs = df_pairs["CPF CASO"].tolist()+df_pairs["CPF CONTROLE"].tolist()
[ pareado.update({cpf:True}) for cpf in matched_cpfs ]
data_teste = defaultdict(lambda: np.nan)
data_hospitalizado = defaultdict(lambda:np.nan)
data_obito = defaultdict(lambda:np.nan)
data_d1 = defaultdict(lambda:np.nan)
data_d2 = defaultdict(lambda:np.nan)
df_join = pd.concat([df_vac, df_unvac])
for j in range(0, df_join.shape[0]):
cpf = df_join["CPF"].iat[j]
obito = df_join["DATA OBITO"].iat[j]
teste = df_join["DATA SOLICITACAO(TESTES)"].iat[j]
hospitalizacao = df_join["DATA HOSPITALIZACAO"].iat[j]
d1_dt = df_join["DATA D1(VACINADOS)"].iat[j]
d2_dt = df_join["DATA D2(VACINADOS)"].iat[j]
if not pd.isna(obito):
data_obito[cpf] = obito
if not pd.isna(d1_dt):
data_d1[cpf] = d1_dt
if not pd.isna(d2_dt):
data_d2[cpf] = d2_dt
if not pd.isna(teste):
data_teste[cpf] = teste
if not | pd.isna(hospitalizacao) | pandas.isna |
import datetime
from collections import defaultdict
from itertools import count
import rdflib
from rdflib import Dataset, Graph, URIRef, Literal, XSD, Namespace, RDFS, BNode, OWL
from rdfalchemy import rdfSubject, rdfMultiple, rdfSingle
import pandas as pd
DATA = pd.read_csv('data/gedichtenGGD_STCN_Steur_stripped.csv', sep=';')
create = Namespace("https://data.create.humanities.uva.nl/")
schema = Namespace("https://schema.org/")
bio = Namespace("http://purl.org/vocab/bio/0.1/")
foaf = Namespace("http://xmlns.com/foaf/0.1/")
void = Namespace("http://rdfs.org/ns/void#")
dcterms = Namespace("http://purl.org/dc/terms/")
rdflib.graph.DATASET_DEFAULT_GRAPH_ID = create
ga = Namespace("https://data.create.humanities.uva.nl/id/datasets/huwelijksgedichten/")
class Entity(rdfSubject):
rdf_type = URIRef('urn:entity')
label = rdfMultiple(RDFS.label)
name = rdfMultiple(schema.name)
mainEntityOfPage = rdfSingle(schema.mainEntityOfPage)
sameAs = rdfMultiple(OWL.sameAs)
disambiguatingDescription = rdfSingle(schema.disambiguatingDescription)
depiction = rdfSingle(foaf.depiction)
subjectOf = rdfMultiple(schema.subjectOf)
about = rdfSingle(schema.about)
url = rdfSingle(schema.url)
class DatasetClass(Entity):
# db = ConjunctiveGraph
rdf_type = void.Dataset, schema.Dataset
title = rdfMultiple(dcterms.title)
description = rdfMultiple(dcterms.description)
creator = rdfMultiple(dcterms.creator)
publisher = rdfMultiple(dcterms.publisher)
contributor = rdfMultiple(dcterms.contributor)
source = rdfSingle(dcterms.source)
date = rdfSingle(dcterms.date)
created = rdfSingle(dcterms.created)
issued = rdfSingle(dcterms.issued)
modified = rdfSingle(dcterms.modified)
exampleResource = rdfSingle(void.exampleResource)
vocabulary = rdfMultiple(void.vocabulary)
triples = rdfSingle(void.triples)
class CreativeWork(Entity):
rdf_type = schema.CreativeWork
publication = rdfMultiple(schema.publication)
author = rdfMultiple(schema.author)
about = rdfSingle(schema.about)
class PublicationEvent(Entity):
rdf_type = schema.PublicationEvent
startDate = rdfSingle(schema.startDate)
location = rdfSingle(schema.location)
publishedBy = rdfMultiple(schema.publishedBy)
class Place(Entity):
rdf_type = schema.Place
class Marriage(Entity):
rdf_type = bio.Marriage
date = rdfSingle(bio.date)
partner = rdfMultiple(bio.partner)
place = rdfSingle(bio.place)
subjectOf = rdfMultiple(schema.subjectOf)
class Person(Entity):
rdf_type = schema.Person
def main(data, g):
personCounter = count(1)
marriageCounter = count(1)
documentCounter = count(1)
persons = dict()
marriages = dict()
marriage2records = defaultdict(list)
for r in data.to_dict(orient='records'):
groom = persons.get(r['Bruidegom'], None)
if not groom:
groom = Person(ga.term('Person/' + str(next(personCounter))),
label=[r['Bruidegom']],
name=[r['Bruidegom']])
persons[r['Bruidegom']] = groom
bride = persons.get(r['Bruid'], None)
if not bride:
bride = Person(ga.term('Person/' + str(next(personCounter))),
label=[r['Bruid']],
name=[r['Bruid']])
persons[r['Bruid']] = bride
partners = [groom, bride]
if | pd.isna(r['Plaats_huwelijk']) | pandas.isna |
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset=pd.read_csv(path)
# look at the first five columns
print(dataset.head(5))
# Check if there's any column which is not useful and remove it like the column id
dataset.drop("Id",axis=1,inplace=True)
print(dataset.columns)
# check the statistical description
print(dataset.describe())
# --------------
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
import seaborn as sns
from matplotlib import pyplot as plt
#names of all the attributes
cols=dataset.columns
#number of attributes (exclude target)
size=dataset.drop("Cover_Type",axis=1).shape[1]
print(size)
#x-axis has target attribute to distinguish between classes
x=dataset['Cover_Type']
#y-axis shows values of an attribute
y=dataset.drop("Cover_Type",axis=1)
#Plot violin for all attributes
for i in range(size-1):
sns.violinplot(x=dataset[cols[i]], hue=y,data=dataset, palette="muted")
# --------------
import numpy
upper_threshold = 0.5
lower_threshold = -0.5
# Code Starts Here
# create a subset of dataframe with only the first 10 features
subset_train = dataset.iloc[:, :10]
# Calculate the Pearson correlation
data_corr = subset_train.corr()
# Plot a heatmap
f, ax = plt.subplots(figsize = (10,8))
sns.heatmap(data_corr,vmax=0.8,square=True);
# List the correlation pairs
correlation = data_corr.unstack().sort_values(kind='quicksort')
# Select the highest correlation pairs using slicing
corr_var_list = correlation[((correlation>upper_threshold) | (correlation<lower_threshold)) & (correlation!=1)]
# Code ends here
# --------------
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
X=dataset.iloc[:,:-1]
y=dataset.iloc[:,-1]
X_train,X_test,y_train,y_test = cross_validation.train_test_split(X, y, test_size=0.2, random_state=0)
num_feat_cols = ['Elevation', 'Aspect', 'Slope','Horizontal_Distance_To_Hydrology','Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways','Hillshade_9am','Hillshade_Noon', 'Hillshade_3pm','Horizontal_Distance_To_Fire_Points']
cat_feat_cols = list(set(X_train.columns) - set(num_feat_cols))
scaler=StandardScaler()
X_train_temp=X_train[num_feat_cols].copy()
X_test_temp=X_test[num_feat_cols].copy()
X_train_temp[num_feat_cols]=scaler.fit_transform(X_train_temp[num_feat_cols])
X_test_temp[num_feat_cols]=scaler.fit_transform(X_test_temp[num_feat_cols])
X_train1=pd.concat([X_train_temp,X_train.loc[:,cat_feat_cols]],axis=1)
print(X_train1.head())
X_test1=pd.concat([X_test_temp,X_test.loc[:,cat_feat_cols]],axis=1)
print(X_test1.head())
scaled_features_train_df=X_train1
#Standardized
#Apply transform only for non-categorical data
scaled_features_test_df=X_test1
#Concatenate non-categorical data and categorical
# --------------
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif
# Write your solution here:
# Code starts here
skb = SelectPercentile(score_func=f_classif,percentile=20)
predictors = skb.fit_transform(X_train1, y_train)
scores = list(skb.scores_)
Features = scaled_features_train_df.columns
dataframe = | pd.DataFrame({'Features':Features,'Scores':scores}) | pandas.DataFrame |
# Tests aimed at pandas.core.indexers
import numpy as np
import pytest
from pandas.core.indexers import is_scalar_indexer, length_of_indexer, validate_indices
def test_length_of_indexer():
arr = np.zeros(4, dtype=bool)
arr[0] = 1
result = length_of_indexer(arr)
assert result == 1
def test_is_scalar_indexer():
indexer = (0, 1)
assert is_scalar_indexer(indexer, 2)
assert not is_scalar_indexer(indexer[0], 2)
indexer = (np.array([2]), 1)
assert is_scalar_indexer(indexer, 2)
indexer = (np.array([2]), np.array([3]))
assert is_scalar_indexer(indexer, 2)
indexer = (np.array([2]), np.array([3, 4]))
assert not | is_scalar_indexer(indexer, 2) | pandas.core.indexers.is_scalar_indexer |
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat
import pandas._testing as tm
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
expected = DataFrame(
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
)
expected["one"] = expected["one"].astype(np.float64)
expected["two"] = expected["two"].astype(object)
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
tm.assert_frame_equal(result, expected)
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
{"a": "category", "b": "category", "c": CategoricalDtype()},
],
)
def test_categorical_dtype(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["a", "a", "b"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])
def test_categorical_dtype_single(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_unsorted(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", "b", "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_missing(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", np.nan, "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(all_parsers):
# see gh-18186
parser = all_parsers
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({"a": Categorical(data, ordered=True)})
actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category")
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True
)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_latin1(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
encoding = "latin-1"
expected = parser.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"})
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_utf16(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
encoding = "utf-16"
sep = "\t"
expected = parser.read_csv(pth, sep=sep, encoding=encoding)
expected = expected.apply(Categorical)
actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_infer_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}),
DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]),
]
actuals = parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_explicit_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ["a", "b", "c"]
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}),
DataFrame(
{"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)}, index=[2, 3]
),
]
dtype = CategoricalDtype(cats)
actuals = parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"categories",
[["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]],
)
def test_categorical_category_dtype(all_parsers, categories, ordered):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(
["a", "b", "b", "c"], categories=categories, ordered=ordered
),
}
)
dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)}
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_category_dtype_unsorted(all_parsers):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(["c", "b", "a"])
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]),
}
)
result = parser.read_csv(StringIO(data), dtype={"b": dtype})
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_numeric(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = DataFrame({"b": Categorical([1, 1, 2, 3])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_datetime(all_parsers):
parser = all_parsers
dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None)
dtype = {"b": CategoricalDtype(dti)}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timestamp(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([Timestamp("2014")])}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timedelta(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(pd.to_timedelta(["1H", "2H", "3H"]))}
data = "b\n1H\n2H\n3H"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
"b\nTrue\nFalse\nNA\nFalse",
"b\ntrue\nfalse\nNA\nfalse",
"b\nTRUE\nFALSE\nNA\nFALSE",
"b\nTrue\nFalse\nNA\nFALSE",
],
)
def test_categorical_dtype_coerces_boolean(all_parsers, data):
# see gh-20498
parser = all_parsers
dtype = {"b": CategoricalDtype([False, True])}
expected = DataFrame({"b": Categorical([True, False, None, False])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_unexpected_categories(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_empty_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(
StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
)
expected = DataFrame(
{"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
)
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two,three"
result = parser.read_csv(
StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
)
exp_idx = MultiIndex.from_arrays(
[np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)], names=["one", "two"]
)
expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
with pytest.raises(ValueError, match="Duplicate names"):
data = ""
parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
def test_raise_on_passed_int_dtype_with_nas(all_parsers):
# see gh-2631
parser = all_parsers
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
msg = (
"Integer column has NA values"
if parser.engine == "c"
else "Unable to convert column DOY"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)
def test_dtype_with_converters(all_parsers):
parser = all_parsers
data = """a,b
1.1,2.2
1.2,2.3"""
# Dtype spec ignored if converted specified.
with tm.assert_produces_warning(ParserWarning):
result = parser.read_csv(
StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)}
)
expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),
("category", DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[])),
(
dict(a="category", b="category"),
DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[]),
),
("datetime64[ns]", DataFrame(columns=["a", "b"], dtype="datetime64[ns]")),
(
"timedelta64[ns]",
DataFrame(
{
"a": Series([], dtype="timedelta64[ns]"),
"b": Series([], dtype="timedelta64[ns]"),
},
index=[],
),
),
(
dict(a=np.int64, b=np.int32),
DataFrame(
{"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
index=[],
),
),
(
{0: np.int64, 1: np.int32},
DataFrame(
{"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
index=[],
),
),
(
{"a": np.int64, 1: np.int32},
DataFrame(
{"a": | Series([], dtype=np.int64) | pandas.Series |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2018 <NAME>,
# <NAME>,
# <NAME>,
# <NAME>,
# <NAME>,
# <NAME>
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import unittest
import numpy as np
import pandas as pd
from sklearn.naive_bayes import GaussianNB
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
from sklearn.cluster import DBSCAN, KMeans
from sklearn.linear_model import LinearRegression
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import GridSearchCV
from pipegraph.base import (PipeGraph,
add_mixins_to_step,
build_graph,
make_connections_when_not_provided_to_init,
Concatenator,
ColumnSelector,
Reshape,
Demultiplexer,
NeutralRegressor,
NeutralClassifier)
from pipegraph.adapters import (FitTransformMixin,
FitPredictMixin,
AtomicFitPredictMixin,
CustomFitPredictWithDictionaryOutputMixin)
from pipegraph.demo_blocks import CustomCombination
logging.basicConfig(level=logging.NOTSET)
logger = logging.getLogger(__name__)
class TestRootFunctions(unittest.TestCase):
def setUp(self):
self.size = 100
self.X = np.random.rand(self.size, 1)
self.y = self.X + np.random.randn(self.size, 1)
concatenator = Concatenator()
gaussian_clustering = GaussianMixture(n_components=3)
dbscan = DBSCAN(eps=0.5)
mixer = CustomCombination()
linear_model = LinearRegression()
self.steps = steps = [('Concatenate_Xy', concatenator),
('Gaussian_Mixture', gaussian_clustering),
('Dbscan', dbscan),
('Combine_Clustering', mixer),
('Regressor', linear_model),
]
self.connections = {
'Concatenate_Xy': dict(df1='X',
df2='y'),
'Gaussian_Mixture': dict(X=('Concatenate_Xy', 'Xy')),
'Dbscan': dict(X=('Concatenate_Xy', 'Xy')),
'Combine_Clustering': dict(
dominant=('Dbscan', 'predict'),
other=('Gaussian_Mixture', 'predict')),
'Regressor': dict(X='X', y='y')
}
def test_wrap_adaptee_in_process__right_classes(self):
tests_table = [
{'model': LinearRegression(),
'expected_class': FitPredictMixin},
{'model': MinMaxScaler(),
'expected_class': FitTransformMixin},
{'model': DBSCAN(),
'expected_class': AtomicFitPredictMixin},
{'model': Demultiplexer(),
'expected_class': CustomFitPredictWithDictionaryOutputMixin}
]
for test_dict in tests_table:
model = test_dict['model']
step = add_mixins_to_step(model)
self.assertEqual(isinstance(step, test_dict['expected_class']), True)
def test_wrap_adaptee_in_process__wrap_process_does_nothing(self):
lm = LinearRegression()
wrapped_lm = add_mixins_to_step(lm)
double_wrap = add_mixins_to_step(wrapped_lm)
self.assertEqual(double_wrap , lm)
def test_wrap_adaptee_in_process__raise_exception(self):
model = object()
self.assertRaises(ValueError, add_mixins_to_step, model)
def test_build_graph__connections_None(self):
graph = build_graph(None)
self.assertTrue( graph is None)
def test_build_graph__node_names(self):
graph = build_graph(self.connections)
node_list = list(graph.nodes())
self.assertEqual(sorted(node_list), sorted(['Concatenate_Xy',
'Gaussian_Mixture',
'Dbscan',
'Combine_Clustering',
'Regressor',
]))
def test_build_graph__node_edges(self):
graph = build_graph(self.connections)
in_edges = {name: [origin for origin, destination in list(graph.in_edges(name))]
for name in graph}
logger.debug("in_edges: %s", in_edges)
self.assertEqual(in_edges['Gaussian_Mixture'], ['Concatenate_Xy'])
self.assertEqual(in_edges['Dbscan'], ['Concatenate_Xy'])
self.assertEqual(sorted(in_edges['Combine_Clustering']), sorted(['Gaussian_Mixture', 'Dbscan']))
def test_make_connections_when_not_provided_to_init__Many_steps(self):
steps = self.steps
connections = make_connections_when_not_provided_to_init(steps=steps)
expected = dict(Concatenate_Xy={'X': 'X'},
Gaussian_Mixture={'X': ('Concatenate_Xy', 'predict')},
Dbscan={'X': ('Gaussian_Mixture', 'predict')},
Combine_Clustering={'X': ('Dbscan', 'predict')},
Regressor={'X': ('Combine_Clustering', 'predict'),
'y':'y'}
)
self.assertEqual(connections, expected)
class TestPipegraph(unittest.TestCase):
def setUp(self):
self.size = 1000
self.X = pd.DataFrame(dict(X=np.random.rand(self.size, )))
self.y = pd.DataFrame(dict(y=(np.random.rand(self.size, ))))
concatenator = Concatenator()
gaussian_clustering = GaussianMixture(n_components=3)
dbscan = DBSCAN(eps=0.5)
mixer = CustomCombination()
linear_model = LinearRegression()
steps = [('Concatenate_Xy', concatenator),
('Gaussian_Mixture', gaussian_clustering),
('Dbscan', dbscan),
('Combine_Clustering', mixer),
('Regressor', linear_model),
]
connections = {
'Concatenate_Xy': dict(df1='X',
df2='y'),
'Gaussian_Mixture': dict(X=('Concatenate_Xy', 'predict')),
'Dbscan': dict(X=('Concatenate_Xy', 'predict')),
'Combine_Clustering': dict(
dominant=('Dbscan', 'predict'),
other=('Gaussian_Mixture', 'predict')),
'Regressor': dict(X='X', y='y')
}
self.steps_external = [('_External', concatenator),
('Gaussian_Mixture', gaussian_clustering),
('Dbscan', dbscan),
('Combine_Clustering', mixer),
('Regressor', linear_model),
]
self.connections_external = {
'_External': dict(df1='X',
df2='y'),
'Gaussian_Mixture': dict(X=('Concatenate_Xy', 'predict')),
'Dbscan': dict(X=('Concatenate_Xy', 'predict')),
'Combine_Clustering': dict(
dominant=('Dbscan', 'predict'),
other=('Gaussian_Mixture', 'predict')),
'Regressor': dict(X='X', y='y')
}
self.steps = steps
self.connections = connections
self.pgraph = PipeGraph(steps=steps, fit_connections=connections)
self.pgraph.fit(self.X, self.y)
def test_Pipegraph__External_step_name(self):
pgraph = PipeGraph(steps=self.steps_external, fit_connections=self.connections_external)
self.assertRaises(ValueError, pgraph.fit, self.X, self.y)
def test_Pipegraph__example_1_no_connections(self):
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from pipegraph import PipeGraph
X = np.random.rand(100, 1)
y = 4 * X + 0.5 * np.random.randn(100, 1)
scaler = MinMaxScaler()
linear_model = LinearRegression()
steps = [('scaler', scaler),
('linear_model', linear_model)]
pgraph = PipeGraph(steps=steps)
self.assertTrue(pgraph.fit_connections is None)
self.assertTrue(pgraph.predict_connections is None)
pgraph.fit(X, y)
y_pred = pgraph.predict(X)
self.assertEqual(y_pred.shape[0], y.shape[0])
self.assertEqual(pgraph.fit_connections, dict(scaler={'X': 'X'},
linear_model={'X':('scaler', 'predict'),
'y': 'y'}))
self.assertEqual(pgraph.predict_connections, dict(scaler={'X': 'X'},
linear_model={'X':('scaler', 'predict'),
'y': 'y'}))
def test_Pipegraph__ex_3_inject(self):
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from pipegraph.base import PipeGraph
from pipegraph.demo_blocks import CustomPower
X = pd.DataFrame(dict(X=np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
sample_weight=np.array(
[0.01, 0.95, 0.10, 0.95, 0.95, 0.10, 0.10, 0.95, 0.95, 0.95, 0.01])))
y = np.array([10, 4, 20, 16, 25, -60, 85, 64, 81, 100, 150])
scaler = MinMaxScaler()
polynomial_features = PolynomialFeatures()
linear_model = LinearRegression()
custom_power = CustomPower()
selector = ColumnSelector(mapping={'X': slice(0, 1),
'sample_weight': slice(1, 2)})
steps = [('selector', selector),
('custom_power', custom_power),
('scaler', scaler),
('polynomial_features', polynomial_features),
('linear_model', linear_model)]
pgraph = PipeGraph(steps=steps) #PipeGraphRegressor
self.assertTrue(pgraph.fit_connections is None)
self.assertTrue(pgraph.predict_connections is None)
(pgraph.inject(sink='selector', sink_var='X', source='_External', source_var='X')
.inject('custom_power', 'X', 'selector', 'sample_weight')
.inject('scaler', 'X', 'selector', 'X')
.inject('polynomial_features', 'X', 'scaler')
.inject('linear_model', 'X', 'polynomial_features')
.inject('linear_model', 'y', source_var='y')
.inject('linear_model', 'sample_weight', 'custom_power'))
self.assertTrue(pgraph.fit_connections is not None)
self.assertTrue(pgraph.predict_connections is not None)
pgraph.fit(X, y)
self.assertEqual(pgraph.fit_connections,
{'selector': {'X': ('_External', 'X')},
'custom_power': {'X': ('selector', 'sample_weight')},
'scaler': {'X': ('selector', 'X')},
'polynomial_features': {'X': ('scaler', 'predict')},
'linear_model': {'X': ('polynomial_features', 'predict'),
'y': ('_External', 'y'),
'sample_weight': ('custom_power', 'predict')}})
self.assertEqual(pgraph.predict_connections,
{'selector': {'X': ('_External', 'X')},
'custom_power': {'X': ('selector', 'sample_weight')},
'scaler': {'X': ('selector', 'X')},
'polynomial_features': {'X': ('scaler', 'predict')},
'linear_model': {'X': ('polynomial_features', 'predict'),
'y': ('_External', 'y'),
'sample_weight': ('custom_power', 'predict')}})
def test_Pipegraph__fit_connections(self):
pgraph = PipeGraph(self.steps, self.connections)
pgraph.fit(self.X, self.y)
fit_nodes_list = list(pgraph._filter_fit_nodes())
self.assertEqual(sorted(fit_nodes_list), sorted(['Concatenate_Xy',
'Gaussian_Mixture',
'Dbscan',
'Combine_Clustering',
'Regressor',
]))
def test_Pipegraph__some_fit_connections(self):
some_connections = {
'Concatenate_Xy': dict(df1='X',
df2='y'),
'Gaussian_Mixture': dict(X=('Concatenate_Xy', 'predict')),
'Dbscan': dict(X=('Concatenate_Xy', 'predict')),
}
pgraph = PipeGraph(steps=self.steps, fit_connections=some_connections, predict_connections=self.connections)
pgraph.fit(self.X, self.y)
fit_nodes_list = list(pgraph._filter_fit_nodes())
self.assertEqual(sorted(fit_nodes_list), sorted(['Concatenate_Xy',
'Gaussian_Mixture',
'Dbscan',
]))
def test_Pipegraph__predict_connections(self):
pgraph = PipeGraph(self.steps, self.connections)
pgraph.fit(self.X, self.y)
predict_nodes_list = list(pgraph._filter_predict_nodes())
self.assertEqual(sorted(predict_nodes_list), sorted(['Concatenate_Xy',
'Gaussian_Mixture',
'Dbscan',
'Combine_Clustering',
'Regressor',
]))
def test_Pipegraph__some_predict_connections(self):
some_connections = {
'Concatenate_Xy': dict(df1='X',
df2='y'),
'Gaussian_Mixture': dict(X=('Concatenate_Xy', 'predict')),
'Dbscan': dict(X=('Concatenate_Xy', 'predict')),
}
pgraph = PipeGraph(steps=self.steps, fit_connections=self.connections, predict_connections=some_connections)
pgraph.fit(self.X, self.y)
predict_nodes_list = list(pgraph._filter_predict_nodes())
self.assertEqual(sorted(predict_nodes_list), sorted(['Concatenate_Xy',
'Gaussian_Mixture',
'Dbscan',
]))
def test_Pipegraph__read_step_inputs_from_fit_data(self):
pgraph = self.pgraph
pgraph._fit_data = {('_External', 'X'): self.X,
('_External', 'y'): self.y,
('Dbscan', 'predict'): self.y * 4}
result = pgraph._fetch_signature_values(graph_data=pgraph._fit_data,
step_name='Regressor',
method='fit')
assert_array_equal(result['X'], self.X)
assert_array_equal(result['y'], self.y)
self.assertEqual(len(result), 2)
def test_Pipegraph__read_predict_signature_variables_from_graph_data(self):
pgraph = self.pgraph
pgraph._predict_data = {('_External', 'X'): self.X,
('_External', 'y'): self.y,
('Dbscan', 'predict'): self.y * 4}
result = pgraph._fetch_signature_values(graph_data=pgraph._predict_data,
step_name='Regressor',
method='predict')
assert_array_equal(result['X'], self.X)
self.assertEqual(len(result), 1)
def test_Pipegraph__step__predict_lm(self):
X = self.X
y = self.y
lm = LinearRegression()
lm_step = add_mixins_to_step(lm)
lm_step.fit(X=X, y=y)
assert_array_equal(lm.predict(X), lm_step.predict_dict(X=X)['predict'])
def test_Pipegraph__under_fit__concatenate_Xy(self):
pgraph = self.pgraph
pgraph._fit_data = {('_External', 'X'): self.X,
('_External', 'y'): self.y,
('Dbscan', 'predict'): self.y * 4,
}
expected = pd.concat([self.X, self.y], axis=1)
pgraph._fit_single('Concatenate_Xy')
self.assertEqual(expected.shape, pgraph._fit_data['Concatenate_Xy', 'predict'].shape)
assert_frame_equal(self.X, pgraph._fit_data['Concatenate_Xy', 'predict'].loc[:,['X']])
assert_frame_equal(self.y, pgraph._fit_data['Concatenate_Xy', 'predict'].loc[:,['y']])
def test_Pipegraph__predict__concatenate_Xy(self):
X = self.X
y = self.y
pgraph = self.pgraph
expected = pd.concat([X, y], axis=1)
current_step = pgraph._steps_dict['Concatenate_Xy']
current_step.fit()
result = current_step.predict_dict(df1=X, df2=y)['predict']
self.assertEqual(expected.shape, result.shape)
assert_frame_equal(self.X, result.loc[:,['X']])
assert_frame_equal(self.y, result.loc[:,['y']])
assert_frame_equal(expected, result)
def test_Pipegraph__predict__gaussian_mixture(self):
X = self.X
y = self.y
pgraph = self.pgraph
current_step = pgraph._steps_dict['Gaussian_Mixture']
current_step.fit(X=X)
expected = current_step.predict(X=X)
result = current_step.predict_dict(X=X)['predict']
assert_array_equal(expected, result)
def test_Pipegraph__predict__dbscan(self):
X = self.X
y = self.y
pgraph = self.pgraph
current_step = pgraph._steps_dict['Dbscan']
current_step.fit(X=X)
expected = current_step.fit_predict(X=X)
result = current_step.predict_dict(X=X)['predict']
assert_array_equal(expected, result)
def test_Pipegraph__combine_clustering_predict(self):
X = self.X
y = self.y
pgraph = self.pgraph
current_step = pgraph._steps_dict['Gaussian_Mixture']
current_step.fit(X=pd.concat([X, y], axis=1))
result_gaussian = current_step.predict_dict(X=pd.concat([X, y], axis=1))['predict']
current_step = pgraph._steps_dict['Dbscan']
result_dbscan = current_step.predict_dict(X=pd.concat([X, y], axis=1))['predict']
self.assertEqual(result_dbscan.min(), 0)
current_step = pgraph._steps_dict['Combine_Clustering']
current_step.fit(dominant=result_dbscan, other=result_gaussian)
expected = current_step.predict(dominant=result_dbscan, other=result_gaussian)
result = current_step.predict_dict(dominant=result_dbscan, other=result_gaussian)['predict']
assert_array_equal(expected, result)
self.assertEqual(result.min(), 0)
def test_Pipegraph__strategy__dict_key(self):
X = self.X
y = self.y
pgraph = self.pgraph
current_step = pgraph._steps_dict['Concatenate_Xy']
current_step.fit()
result = current_step.predict_dict(df1=X, df2=y)
self.assertEqual(list(result.keys()), ['predict'])
def test_Pipegraph__dbscan__dict_key(self):
X = self.X
pgraph = self.pgraph
current_step = pgraph._steps_dict['Dbscan']
current_step.fit(X=X)
result = current_step.predict_dict(X=X)
self.assertEqual(list(result.keys()), ['predict'])
def test_Pipegraph__combine_clustering__dict_key(self):
X = self.X
y = self.y
pgraph = self.pgraph
current_step = pgraph._steps_dict['Combine_Clustering']
current_step.fit(dominant=X, other=y)
result = current_step.predict_dict(dominant=X, other=y)
self.assertEqual(list(result.keys()), ['predict'])
def test_Pipegraph__gaussian_mixture__dict_key(self):
X = self.X
y = self.y
pgraph = self.pgraph
current_step = pgraph._steps_dict['Gaussian_Mixture']
current_step.fit(X=X)
result = current_step.predict_dict(X=X)
self.assertEqual(sorted(list(result.keys())), sorted(['predict', 'predict_proba']))
def test_Pipegraph__regressor__dict_key(self):
X = self.X
y = self.y
pgraph = self.pgraph
current_step = pgraph._steps_dict['Regressor']
current_step.fit(X=X, y=y)
result = current_step.predict_dict(X=X)
self.assertEqual(list(result.keys()), ['predict'])
def test_Pipegraph__fit_node_names(self):
pgraph = self.pgraph.fit(self.X, self.y)
node_list = list(pgraph._fit_graph.nodes())
self.assertEqual(sorted(node_list), sorted(['Concatenate_Xy',
'Gaussian_Mixture',
'Dbscan',
'Combine_Clustering',
'Regressor',
]))
def test_Pipegraph__predict_node_names(self):
pgraph = self.pgraph.fit(self.X, self.y)
node_list = list(pgraph._predict_graph.nodes())
self.assertEqual(sorted(node_list), sorted(['Concatenate_Xy',
'Gaussian_Mixture',
'Dbscan',
'Combine_Clustering',
'Regressor',
]))
def test_Pipegraph__filter_nodes_fit(self):
pgraph = self.pgraph.fit(self.X, self.y)
fit_nodes = list(pgraph._filter_fit_nodes())
self.assertEqual(sorted(fit_nodes), sorted(['Concatenate_Xy',
'Dbscan',
'Gaussian_Mixture',
'Combine_Clustering',
'Regressor',
]))
def test_Pipegraph__filter_nodes_predict(self):
alternative_connections = {
'Regressor': dict(X='X', y='y')
}
pgraph = PipeGraph(steps=self.steps, fit_connections=self.connections,
predict_connections=alternative_connections)
pgraph.fit(self.X, self.y)
predict_nodes = list(pgraph._filter_predict_nodes())
self.assertEqual(predict_nodes, ['Regressor'])
def test_Pipegraph__graph_fit_using_keywords(self):
pgraph = self.pgraph
pgraph.fit(X=self.X, y=self.y)
assert_frame_equal(pgraph._fit_data['_External', 'X'], self.X)
assert_frame_equal(pgraph._fit_data['_External', 'y'], self.y)
self.assertEqual(pgraph._fit_data['Dbscan', 'predict'].shape[0], self.y.shape[0])
self.assertEqual(pgraph._fit_data['Dbscan', 'predict'].min(), 0)
self.assertEqual(pgraph._fit_data['Gaussian_Mixture', 'predict'].shape[0], self.y.shape[0])
self.assertEqual(pgraph._fit_data['Gaussian_Mixture', 'predict'].min(), 0)
self.assertEqual(pgraph._fit_data['Combine_Clustering', 'predict'].shape[0], self.y.shape[0])
self.assertEqual(pgraph._fit_data['Combine_Clustering', 'predict'].min(), 0)
self.assertEqual(pgraph._fit_data['Combine_Clustering', 'predict'].max(),
pgraph._fit_data['Gaussian_Mixture', 'predict'].max())
self.assertEqual(pgraph._fit_data['Regressor', 'predict'].shape[0], self.y.shape[0])
def test_Pipegraph__graph_fit_three_positional(self):
pgraph = self.pgraph
self.assertRaises(TypeError, pgraph.fit, self.X, self.y, self.y)
def test_Pipegraph__graph_fit_two_positional(self):
pgraph = self.pgraph
pgraph.fit(self.X, self.y)
assert_frame_equal(pgraph._fit_data['_External', 'X'], self.X)
assert_frame_equal(pgraph._fit_data['_External', 'y'], self.y)
self.assertEqual(pgraph._fit_data['Dbscan', 'predict'].shape[0], self.y.shape[0])
self.assertEqual(pgraph._fit_data['Dbscan', 'predict'].min(), 0)
self.assertEqual(pgraph._fit_data['Gaussian_Mixture', 'predict'].shape[0], self.y.shape[0])
self.assertEqual(pgraph._fit_data['Gaussian_Mixture', 'predict'].min(), 0)
self.assertEqual(pgraph._fit_data['Combine_Clustering', 'predict'].shape[0], self.y.shape[0])
self.assertEqual(pgraph._fit_data['Combine_Clustering', 'predict'].min(), 0)
self.assertEqual(pgraph._fit_data['Combine_Clustering', 'predict'].max(),
pgraph._fit_data['Gaussian_Mixture', 'predict'].max())
self.assertEqual(pgraph._fit_data['Regressor', 'predict'].shape[0], self.y.shape[0])
def test_Pipegraph__graph_fit_one_positional(self):
pgraph = self.pgraph
pgraph.fit(self.X, y=self.y)
assert_frame_equal(pgraph._fit_data['_External', 'X'], self.X)
assert_frame_equal(pgraph._fit_data['_External', 'y'], self.y)
self.assertEqual(pgraph._fit_data['Dbscan', 'predict'].shape[0], self.y.shape[0])
self.assertEqual(pgraph._fit_data['Dbscan', 'predict'].min(), 0)
self.assertEqual(pgraph._fit_data['Gaussian_Mixture', 'predict'].shape[0], self.y.shape[0])
self.assertEqual(pgraph._fit_data['Gaussian_Mixture', 'predict'].min(), 0)
self.assertEqual(pgraph._fit_data['Combine_Clustering', 'predict'].shape[0], self.y.shape[0])
self.assertEqual(pgraph._fit_data['Combine_Clustering', 'predict'].min(), 0)
self.assertEqual(pgraph._fit_data['Combine_Clustering', 'predict'].max(),
pgraph._fit_data['Gaussian_Mixture', 'predict'].max())
self.assertEqual(pgraph._fit_data['Regressor', 'predict'].shape[0], self.y.shape[0])
def test_Pipegraph__graph_pg_predict_using_keywords(self):
pgraph = self.pgraph
pgraph.fit(X=self.X, y=self.y)
pgraph.predict_dict(X=self.X, y=self.y)
assert_frame_equal(pgraph._predict_data['_External', 'X'], self.X)
assert_frame_equal(pgraph._predict_data['_External', 'y'], self.y)
self.assertEqual(pgraph._predict_data['Regressor', 'predict'].shape[0], self.y.shape[0])
def test_Pipegraph__graph_predict_using_three_positional(self):
pgraph = self.pgraph
pgraph.fit(X=self.X, y=self.y)
self.assertRaises(TypeError, pgraph.predict, self.X, self.y, self.y)
def test_Pipegraph__graph_predict_using_one_positional_one_keyword(self):
pgraph = self.pgraph
pgraph.fit(X=self.X, y=self.y)
pgraph.predict_dict(self.X, y=self.y)
assert_frame_equal(pgraph._predict_data['_External', 'X'], self.X)
assert_frame_equal(pgraph._predict_data['_External', 'y'], self.y)
self.assertEqual(pgraph._predict_data['Regressor', 'predict'].shape[0], self.y.shape[0])
def test_Pipegraph__graph_pg_predict_using_one_positional(self):
pgraph = self.pgraph
pgraph.fit(X=self.X, y=self.y)
pgraph.predict_dict(self.X, y=self.y)
| assert_frame_equal(pgraph._predict_data['_External', 'X'], self.X) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import pandas
import pickle
import random
random.seed(9999)
def obtainOutputFileName(outputPath, cancerType):
return outputPath + cancerType + "_model_selection_statistics.csv"
def obtainPredictionsOutputFileName(outputPath, cancerType):
return outputPath + cancerType + "_model_selection_predictions.csv"
def obtainOutputModelFileName(outputPath, cancerType, modelName, sampleId):
return outputPath + cancerType + "_" + modelName + "_s" + sampleId + ".pkl"
def getDatasetsNE(dataFolder, cancerType, available_samples):
dataframes = []
featureSets = getFeatureSetNE(cancerType, available_samples)
for inx, sample in enumerate(available_samples):
dataframe = pandas.read_csv(dataFolder + cancerType.capitalize() + "/" + cancerType + "_training_data_" + sample + ".dat", header=0, sep=",")
dataframe = dataframe[dataframe['label'] != 2]
dataframe.drop("gene", axis=1, inplace=True)
dataframePositive = dataframe[dataframe['label'] == 1]
dataframeNegative = dataframe[dataframe['label'] == 0]
positiveSize = dataframePositive.shape[0]
negativeSize = dataframeNegative.shape[0]
# Set them the same size
if(positiveSize > negativeSize):
dataframePositive = dataframePositive.head(-(positiveSize-negativeSize))
elif(negativeSize > positiveSize):
dataframeNegative = dataframeNegative.head(-(negativeSize-positiveSize))
dataframe = pd.concat([dataframePositive, dataframeNegative])
dataframeFeatureSet = list(featureSets[0])
dataframeFeatureSet.append("label")
dataframe = dataframe[dataframeFeatureSet]
dataframes.append(dataframe)
return dataframes
def getDatasets(dataFolder, cancerType, available_samples):
dataframes = []
featureSets = getFeatureSet(cancerType, available_samples)
for inx, sample in enumerate(available_samples):
dataframe = pandas.read_csv(dataFolder + cancerType.capitalize() + "/" + cancerType + "_training_data_" + sample + ".dat", header=0, sep=",")
dataframe = dataframe[dataframe['label'] != 2]
dataframe.drop("gene", axis=1, inplace=True)
dataframePositive = dataframe[dataframe['label'] == 1]
dataframeNegative = dataframe[dataframe['label'] == 0]
positiveSize = dataframePositive.shape[0]
negativeSize = dataframeNegative.shape[0]
# Set them the same size
if(positiveSize > negativeSize):
dataframePositive = dataframePositive.head(-(positiveSize-negativeSize))
elif(negativeSize > positiveSize):
dataframeNegative = dataframeNegative.head(-(negativeSize-positiveSize))
dataframe = pd.concat([dataframePositive, dataframeNegative])
dataframeFeatureSet = list(featureSets[0])
dataframeFeatureSet.append("label")
dataframe = dataframe[dataframeFeatureSet]
dataframes.append(dataframe)
return dataframes
def getFeatureSet(cancerType, available_samples):
# Read feature importance file
featureSubsets = []
dataframe = pandas.read_csv("output/feature_importance/" + cancerType + "_feature_importance.csv", header=0, sep=",")
dataframe = dataframe.loc[dataframe['z_score'] >= 0.5]
featureSubsets.append(dataframe['feature'].values)
return featureSubsets
def getFeatureSetNE(cancerType, available_samples):
# Read feature importance file
neFeatures = ["e0", "e1", "e2", "e3", "e4", "e5", "e6", "e7", "e8", "e9", "e10", "e11", "e12", "e13", "e14", "e15", "e16", "e17", "e18", "e19", "e20", "e21", "e22", "e23", "e24", "e25", "e26", "e27", "e28", "e29", "e30", "e31"]
featureSubsets = []
dataframe = pandas.read_csv("output/feature_importance/" + cancerType + "_feature_importance.csv", header=0, sep=",")
dataframe = dataframe.loc[dataframe['feature'].isin(neFeatures)]
featureSubsets.append(dataframe['feature'].values)
return featureSubsets
def select_best_method(modelPerformancesFolder, dataFile, cancerType, available_samples, useAveraged=True):
modelSelectionAttribute = "auc"
# Get the data for the first feature selection approach: one feature set for all (average)
statisticsFilePath = modelPerformancesFolder + "/" + cancerType + "_model_selection_statistics.csv"
statisticsDataframe = pandas.read_csv(statisticsFilePath, header=0, sep=",")
statisticsDataframe = statisticsDataframe[statisticsDataframe['sampleid'] == "overall"]
statisticsDataframe = statisticsDataframe.ix[statisticsDataframe[modelSelectionAttribute].idxmax()]
# Get the best approach and its model name
bestModel = (statisticsDataframe["model"], statisticsDataframe[modelSelectionAttribute])
# Get the path for the models
modelsPath = modelPerformancesFolder + "/" + cancerType + "_" + bestModel[0] + "_XX.pkl"
# Save the models in an array
models_to_return = []
feature_sets = getFeatureSet(cancerType, available_samples)
positiveGenesSet = list()
negativeGenesSet = list()
genesSetLabels = list()
for sampleInx, sample in enumerate(available_samples):
sampleModelPath = modelsPath.replace("XX", sample)
with open(sampleModelPath, 'rb') as f:
sampleModel = pickle.load(f)
# Prepare data in case of refit
dataframe = pandas.read_csv(dataFile + "/" + cancerType + "_training_data_" + sample + ".dat", header=0, sep=",")
dataframe = dataframe[dataframe['label'] != 2]
dataframePositive = dataframe[dataframe['label'] == 1]
dataframeNegative = dataframe[dataframe['label'] == 0]
positiveSize = dataframePositive.shape[0]
negativeSize = dataframeNegative.shape[0]
# Set them the same size
if(positiveSize > negativeSize):
dataframePositive = dataframePositive.head(-(positiveSize-negativeSize))
elif(negativeSize > positiveSize):
dataframeNegative = dataframeNegative.head(-(negativeSize-positiveSize))
for geneName in dataframePositive['gene'].values:
if geneName not in positiveGenesSet:
positiveGenesSet.append(geneName)
genesSetLabels.append("positive")
for geneName in dataframeNegative['gene'].values:
if geneName not in negativeGenesSet:
negativeGenesSet.append(geneName)
genesSetLabels.append("negative")
dataframe = | pd.concat([dataframePositive, dataframeNegative]) | pandas.concat |
#!/usr/bin/env python
# Script to be called by a parent at a constant rate
import matplotlib.pyplot as plt
import datetime as dt
import math
import numpy as np
import requests
from requests.exceptions import ConnectionError, RequestException
import pandas as pd
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
import csv
import socket
import ast
import time
import pytz
import timezonefinder
from angle_to_position import *
def convert_float(x):
try:
x = float(x)
except ValueError as e:
print("Couldn't convert {} to a float.".format(x))
return x
class RaZON():
def __init__(self, lat, lon, panel_tilt=20, razonIP="192.168.15.150"):
# Config Settings
latitude = math.radians(lat)
longitude = math.radians(lon)
self.tilt = math.radians(panel_tilt)
dzenith = latitude-panel_tilt
self.razonIP = razonIP
# Timezone from lat/lng
tzfinder = timezonefinder.TimezoneFinder()
self.tz = tzfinder.certain_timezone_at(lat=lat, lng=lon)
def get_local_datetime(self):
# Date handling for request
now = dt.datetime.now()
localtime = pytz.timezone(self.tz)
a = localtime.localize(now)
is_dst = bool(a.dst())
# RaZON doesn't adjust for DST
if is_dst:
now = now.replace(hour=now.hour-1)
print("Getting DNI data for {}...".format(now.date()))
return now
def sample_interval(self, df, start, end):
# Get appropriate time slice
# If multiple spans of days, return time slices in each day
df_datetimes = df['Datetime Local (non-DST)']
# RaZON doesn't adjust for DST
# Request for an hour earlier if date lies in DST
localtime = pytz.timezone(self.tz)
dst_datetimes = df_datetimes.apply(lambda x: bool(localtime.localize(x).dst()))
df['Datetime Local'] = df[dst_datetimes]['Datetime Local (non-DST)'] + \
dt.timedelta(hours=1)
corrected_datetimes = df['Datetime Local']
# Slice data by start and end time and return the dataframe
greaterThanStart = corrected_datetimes >= start
lessThanEnd = corrected_datetimes <= end
time.sleep(0.2)
df = df[greaterThanStart & lessThanEnd]
df = df.reset_index()
df['Datetime Local'] = df['Datetime Local'].apply(lambda x: localtime.localize(x))
return df
def sample_data(self, df, end_time, duration, time_col_name):
# Get appropriate row (current minute data)
times = df[time_col_name]
hours = duration / 60
startMinutes = (end_time.minute - duration) % 60
if startMinutes == 59:
hours += 1
startHours = end_time.hour - hours
start = dt.datetime(year=1900,
month=1,
day=1,
hour=startHours,
minute=startMinutes,
second=end_time.second)
greaterThanStart = (pd.to_datetime(times, format=' %H:%M:%S') > pd.to_datetime(start))
end = dt.datetime(year=1900,
month=1,
day=1,
hour=end_time.hour,
minute=end_time.minute,
second=end_time.second)
lessThanEnd = (pd.to_datetime(times, format=' %H:%M:%S') <= pd.to_datetime(end))
time.sleep(0.5)
df = df[greaterThanStart & lessThanEnd]
df = df.reset_index()
return df
def request_interval(self, now, start, end):
# nowDateOutputFileString = now.strftime('%Y%m%d')
start_date = start.date()
start_date = start_date.strftime('%Y-%m-%d')
end_date = end.date()
end_date = end_date.strftime('%Y-%m-%d')
nowDateFileGETString = now.strftime('%m-%d-%y')
payload = {'beginDate' : start_date,
'endDate' : end_date,
'fileName' : str(nowDateFileGETString)+'.csv'}
# Make request to RaZON for data, handle connection error
try:
req = requests.get("http://"+str(self.razonIP)+"/loggings/exportdata.csv",data=payload)
except RequestException as e:
raise e
# Convert into readable csv and data
sio = StringIO(req.content)
reader = csv.reader(req.content)
with open('results.csv', 'w+') as f:
f.write(req.content)
dni_df = pd.read_csv("results.csv", skiprows=5)
dni_df = dni_df.rename(columns={'IrrDirect (W/m2)':'Irrad. (W/m2)',
'Time Local ( hh:mm ) ':'Time (hh:mm:ss)'})
times, dates = dni_df['Time (hh:mm:ss)'], dni_df['Date Local (yyyy-mm-dd)']
df_datetimes = times+dates
df_datetimes = pd.to_datetime(df_datetimes, format=' %H:%M:%S %Y-%m-%d')
dni_df['Datetime Local (non-DST)'] = df_datetimes
dni_df = self.sample_interval(dni_df, start, end)
# Solar angles used for cosine correction
azimuth_angles = (dni_df['SolarAzimuth (Degrees)']).map(math.radians)
altitude_angles = (90. - dni_df['SolarZenith (Degrees)']).map(math.radians)
dni_df['Azimuth (rad)'] = azimuth_angles
dni_df['Altitude (rad)'] = altitude_angles
return dni_df
def request_data(self, now, end_time, duration):
nowDateOutputFileString = now.strftime('%Y%m%d')
nowDateString = now.strftime('%Y-%m-%d')
nowDateFileGETString = now.strftime('%m-%d-%y')
payload = {'beginDate':nowDateString,
'endDate': nowDateString,
'fileName':str(nowDateFileGETString)+'.csv'}
# Make request to RaZON for data, handle connection error
try:
r5 = requests.get("http://"+str(self.razonIP)+"/loggings/exportdata.csv", data=payload)
except RequestException as e:
raise e
try:
r6 = requests.get("http://"+str(self.razonIP)+"/status_trackings/lastirradiance?")
except RequestException as e:
raise e
# Convert into readable csv and data
sio = StringIO(r5.content)
reader = csv.reader(r5.content)
with open('results.csv', 'w+') as f:
f.write(r5.content)
dni_df = pd.read_csv("results.csv", skiprows=5)
dni_df = dni_df.rename(columns={'IrrDirect (W/m2)':'Irrad. (W/m2)',
'Time Local ( hh:mm ) ':'Time (hh:mm:ss)'})
dni_df = self.sample_data(dni_df, end_time, duration, 'Time (hh:mm:ss)')
irrad_arr = ast.literal_eval(r6.content)
if irrad_arr:
irrad_arr = irrad_arr[0]
irrad = [convert_float(i) for i in irrad_arr][-3]
# Solar angles used for cosine correction
azimuth_angles = (dni_df['SolarAzimuth (Degrees)']).map(math.radians)
altitude_angles = (90. - dni_df['SolarZenith (Degrees)']).map(math.radians)
return dni_df, azimuth_angles, altitude_angles
def cos_correct(self, dni_df, cos_correct_df):
# Apply cos correction to irradiance
illumination = dni_df['Irrad. (W/m2)']
dni_df['Cosine Corrected DNI'] = illumination*(cos_correct_df['Cos(Theta)']*
cos_correct_df['Cos(Phi)'])
# dni_df = dni_df.append(cos_correct_df)
for col in cos_correct_df.columns:
dni_df[col] = | pd.Series(cos_correct_df[col], index=dni_df.index) | pandas.Series |
#!/usr/bin/env python
# encoding: utf-8
'''
Created by <NAME>
on 2018-09-08.
Multivariate cox analysis to understand liver cancer CNA usefulness from CBioportal data.
Copyright (c) 2018. All rights reserved.
'''
import pandas as pd
import numpy as np
import argparse
import sys
import os
import pdb
import rpy2
import pprint
import cbioportal_util
sys.path.append('../common/')
import utilities as util
import analysis
import tumor_stage_util
cancer_type = 'LIHC'
age_r = 'AGE'
cirrhosis_r = 'CIRRHOSIS'
grade_r = 'GRADE'
hep_r = 'HISTOLOGICAL_SUBTYPE'
def get_options():
parser = argparse.ArgumentParser(description='Tumor stage group counts')
parser.add_argument('-c', action='store', dest='LIHC_clinical')
parser.add_argument('-i', action='store', dest='clinical_variables')
parser.add_argument('-d', action='store', dest='LIHC_cna')
parser.add_argument('-o', action='store', dest='outdir', default='.')
ns = parser.parse_args()
return (ns.LIHC_clinical, ns.clinical_variables, ns.LIHC_cna,
ns.outdir)
def make_clinical_data(clinical_file, clinical_variables, outdir):
clinical = pd.read_csv(clinical_file, index_col=0, dtype=str)
clinical = clinical[[age_r, cirrhosis_r, grade_r, hep_r, 'Time', 'Censor']]
cirrhosis_groups = pd.read_csv(os.path.join(clinical_variables, 'LIHC_cirrhosis.csv'), dtype=str)
clinical = tumor_stage_util.group_discontinuous_vars(cirrhosis_r, 'cirrhosis',
cirrhosis_groups, clinical)
clinical[age_r] = pd.to_numeric(clinical[age_r], errors='coerce')
clinical['Time'] = pd.to_numeric(clinical['Time'], errors='coerce')
clinical['Censor'] = pd.to_numeric(clinical['Censor'], errors='coerce')
clinical['HBV'] = np.where(clinical[hep_r] == 'HBV', 1, 0)
clinical['HCV'] = np.where(clinical[hep_r] == 'HCV', 1, 0)
clinical.to_csv(os.path.join(outdir, cancer_type + '_clinical.csv'),
index_label='patient_id')
return clinical
def do_cox_models(clinical, cn_file, outdir):
cn = pd.read_csv(cn_file, sep='\t', index_col=0)
cn_by_patient = cn.transpose()
cn_by_patient = cn_by_patient.drop(['Entrez_Gene_Id'])
cn = cn_by_patient[['KLF6', 'FBLN1']]
data = cn.join(clinical, how='inner')
analyses = {
'CNA only': [age_r, 'cirrhosis_0', 'HBV', 'HCV'],
}
results = | pd.DataFrame() | pandas.DataFrame |
import unittest
from pydre import project
from pydre import core
from pydre import filters
from pydre import metrics
import os
import glob
import contextlib
import io
from tests.sample_pydre import project as samplePD
from tests.sample_pydre import core as c
import pandas
import numpy as np
from datetime import timedelta
import logging
import sys
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
# Test cases of following functions are not included:
# Reason: unmaintained
# in common.py:
# tbiReaction()
# tailgatingTime() & tailgatingPercentage()
# ecoCar()
# gazeNHTSA()
#
# Reason: incomplete
# in common.py:
# findFirstTimeOutside()
# brakeJerk()
class TestPydre(unittest.TestCase):
ac_diff = 0.000001
# the acceptable difference between expected & actual results when testing scipy functions
def setUp(self):
# self.whatever to access them in the rest of the script, runs before other scripts
self.projectlist = ["honda.json"]
self.datalist = ["Speedbump_Sub_8_Drive_1.dat", "ColTest_Sub_10_Drive_1.dat"]
self.zero = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
funcName = ' [ ' + self._testMethodName + ' ] ' # the name of test function that will be executed right after this setUp()
print(' ')
print (funcName.center(80,'#'))
print(' ')
def tearDown(self):
print(' ')
print('[ END ]'.center(80, '#'))
print(' ')
# ----- Helper Methods -----
def projectfileselect(self, index: int):
projectfile = self.projectlist[index]
fullpath = os.path.join("tests/test_projectfiles/", projectfile)
return fullpath
def datafileselect(self, index: int):
datafile = self.datalist[index]
fullpath = glob.glob(os.path.join(os.getcwd(), "tests/test_datfiles/", datafile))
return fullpath
def secs_to_timedelta(self, secs):
return timedelta(weeks=0, days=0, hours=0, minutes=0, seconds=secs)
def compare_cols(self, result_df, expected_df, cols):
result = True
for names in cols:
result = result and result_df[names].equals(expected_df[names])
if not result:
print(names)
print(result_df[names])
print("===")
print(expected_df[names])
return False
return result
# convert a drivedata object to a str
def dd_to_str(self, drivedata: core.DriveData):
output = ""
output += str(drivedata.PartID)
output += str(drivedata.DriveID)
output += str(drivedata.roi)
output += str(drivedata.data)
output += str(drivedata.sourcefilename)
return output
# ----- Test Cases -----
def test_datafile_exist(self):
datafiles = self.datafileselect(0)
self.assertFalse(0 == len(datafiles))
for f in datafiles:
self.assertTrue(os.path.isfile(f))
def test_reftest(self):
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
results = p.run(self.datafileselect(0))
results.Subject.astype('int64')
sample_p = samplePD.Project(desiredproj)
expected_results = (sample_p.run(self.datafileselect(0)))
self.assertTrue(self.compare_cols(results, expected_results, ['ROI', 'getTaskNum']))
def test_columnMatchException_excode(self):
f = io.StringIO()
with self.assertRaises(SystemExit) as cm:
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
result = p.run(self.datafileselect(1))
self.assertEqual(cm.exception.code, 1)
def test_columnMatchException_massage(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184]}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
handler = logging.FileHandler(filename='tests\\temp.log')
filters.logger.addHandler(handler)
with self.assertRaises(core.ColumnsMatchError):
result = filters.smoothGazeData(data_object)
expected_console_output = "Can't find needed columns {'FILTERED_GAZE_OBJ_NAME'} in data file ['test_file3.csv'] | function: smoothGazeData"
temp_log = open('tests\\temp.log')
msg_list = temp_log.readlines()
msg = ' '.join(msg_list)
filters.logger.removeHandler(handler)
#self.assertIn(expected_console_output, msg)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_1(self):
d = {'col1': [1, 2, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 3, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1 7\n1 2 8\n2 3 9"
self.assertEqual(result, expected_result)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_2(self):
d = {'col1': [1, 1.1, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 2, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1.0 7\n1 1.1 8"
self.assertEqual(result, expected_result)
def test_core_mergeBySpace(self):
d1 = {'SimTime': [1, 2], 'XPos': [1, 3], 'YPos': [4, 3]}
df1 = pandas.DataFrame(data=d1)
d2 = {'SimTime': [3, 4], 'XPos': [10, 12], 'YPos': [15, 16]}
df2 = pandas.DataFrame(data=d2)
data_object1 = core.DriveData.initV2(PartID=0,DriveID=1, data=df1, sourcefilename="test_file.csv")
data_object2 = core.DriveData.initV2(PartID=0, DriveID=2, data=df2, sourcefilename="test_file.csv")
param = []
param.append(data_object1)
param.append(data_object2)
result = self.dd_to_str(core.mergeBySpace(param))
expected_result = "01None SimTime XPos YPos\n0 1 1 4\n1 2 3 3\n0 2 10 15\n1 3 12 16test_file.csv"
self.assertEqual(result, expected_result)
def test_filter_numberSwitchBlocks_1(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
print(result.data)
print(expected_result.data)
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_2(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'taskblocks': [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_3(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 1.0, 1.0, 1.0, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_smoothGazeData_1(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'FILTERED_GAZE_OBJ_NAME': ['localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen']}
# the func should be able to identify this in-valid input and returns None after prints
# "Bad gaze data, not enough variety. Aborting"
print("expected console output: Bad gaze data, not enough variety. Aborting")
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object)
#print(result.to_string())
self.assertEqual(None, result)
def test_filter_smoothGazeData_2(self):
d3 = {'DatTime': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object, latencyShift=0)
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane'],
'gaze': ["offroad", "offroad", "offroad", "offroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad",
"onroad", "onroad", "onroad", "onroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad"],
'gazenum': np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_filter_smoothGazeData_3(self):
# --- Construct input ---
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
gaze_col = ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']
d3 = {'DatTime': dat_time_col, 'FILTERED_GAZE_OBJ_NAME': gaze_col}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# ----------------------
result = filters.smoothGazeData(data_object, latencyShift=0)
print(result.data)
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': gaze_col,
'gaze': ["offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad"],
'gazenum': np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_metrics_findFirstTimeAboveVel_1(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [-0.000051, -0.000051, -0.000041, -0.000066, -0.000111, -0.000158, -0.000194, -0.000207, 0.000016, 0.000107, 0.000198]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_2(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_3(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_4(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeOutside_1(self):
pass
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
#result = metrics.common.findFirstTimeOutside(data_object)
#expected_result = 0
#self.assertEqual(expected_result, result)
#err: NameError: name 'pos' is not defined --------------------------------------------------------!!!!!!!!!
def test_metrics_colMean_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position')
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_colMean_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = 6.5
self.assertEqual(expected_result, result)
def test_metrics_colMean_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = np.nan
#self.assertEqual(expected_result, result)
np.testing.assert_equal(expected_result, result)
def test_metrics_colSD_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position')
expected_result = 3.1622776601683795
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position', 3)
expected_result = 2.29128784747792
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position')
expected_result = 0
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colMax_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 10
self.assertEqual(expected_result, result)
def test_metrics_colMax_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 9
self.assertEqual(expected_result, result)
def test_metrics_colMax_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_colMin_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMin(data_object, 'position')
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_colMin_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMin(data_object, 'position')
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_timeAboveSpeed_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.timeAboveSpeed(data_object, 0, True)
expected_result = 1.002994011976048
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_timeAboveSpeed_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData(data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.timeAboveSpeed(data_object, 0, False)
expected_result = 0.1675
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_timeAboveSpeed_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.timeAboveSpeed(data_object, 0, False)
expected_result = 0.0
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_roadExits_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'RoadOffset': [1.7679, 1.7679, 1.5551, 1.5551, 1.5551, 1.667174, 1.667174, 1.668028, 1.668028, 1.668028, 1.786122],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExits(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_roadExits_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'RoadOffset': [7.3, 7.4, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2],
'Velocity': [0, 15.1, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExits(data_object)
expected_result = 0.034
self.assertEqual(expected_result, result)
def test_metrics_roadExits_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'RoadOffset': [-1, -1, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2],
'Velocity': [15.1, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExits(data_object)
expected_result = 0.034
self.assertEqual(expected_result, result)
def test_metrics_roadExitsY_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'YPos': [1.7679, 1.7679, 1.5551, 1.5551, 1.5551, 1.667174, 1.667174, 1.668028, 1.668028, 1.668028, 1.786122],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExitsY(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_roadExitsY_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'YPos': [7.3, 7.4, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2],
'Velocity': [0, 15.1, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExitsY(data_object)
expected_result = 0.184
self.assertEqual(expected_result, result)
def test_metrics_roadExitsY_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'YPos': [-1, -1, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2],
'Velocity': [15.1, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExitsY(data_object)
expected_result = 0.184
self.assertEqual(expected_result, result)
def test_metrics_steeringEntropy_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Steer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
df = | pandas.DataFrame(data=d) | pandas.DataFrame |
from src.models.ModelManager import ModelManager
from src.models.sklearn.Naive_Bayes import Naive_Bayes
from src.models.huggingface.roberta import HuggingFaceModel
import os
import pandas as pd
def pred_if_not_pred(base_path, ModelManager, candidate):
for root_base, dirs, _ in os.walk(base_path, topdown=False):
for day_dir in dirs:
path_day = os.path.join(root_base, day_dir)
for root_day, dirs, files in os.walk(path_day, topdown=False):
for csv_candidate in files:
if csv_candidate.startswith(candidate):
path_candidate = os.path.join(root_day, csv_candidate)
if not os.path.exists(ModelManager.get_csv_out_from_csv_in(path_candidate)):
ModelManager.predict(path_candidate)
def complete_df(df, day):
df["day"] = int(day)
df["preds_total"] = df["predict_Positive"] + \
df["predict_Neutral"] + df["predict_Negative"]
df["preds_not_positive"] = df["predict_Neutral"] + \
df["predict_Negative"]
return df
def load_all_candidates_csv(result_dir, candidate):
df = None
for root_base, dirs, _ in os.walk(result_dir, topdown=False):
for day_dir in dirs:
path_day = os.path.join(root_base, day_dir)
for root_day, dirs, files in os.walk(path_day, topdown=False):
for csv_candidate in files:
if csv_candidate.startswith(candidate):
path_candidate = os.path.join(root_day, csv_candidate)
y_preds = | pd.read_csv(path_candidate) | pandas.read_csv |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import pandas as pd
import argparse
from common import transitions
import seaborn as sns
def main ():
if args.var_name.startswith("_"): # variable to extract
var_name = args.var_name
else: # catch variables without underscore
var_name = '_'+args.var_name
print("\nvariable: %s" % var_name)
in_state = args.in_state # value of initial state
print("initial state: %d" % in_state)
print("Loading household data...\n")
# household response data - only keep required variables (files are too big to store in memory)
var_dict = {}
for wave in range(1,8):
waveletter = chr(96+wave) # 1 -> "a" etc
data = pd.read_csv('data/'+waveletter+'_hhresp.tab', sep ='\t')
data = data.loc[data[waveletter+var_name]>=0] # Drop any missing values
var_dict[wave] = data[[waveletter+'_hrpid', waveletter+var_name]].set_index(waveletter+'_hrpid')
# transitions from wave w to wave w+1
t_perc_df = transitions(var_name, in_state, var_dict)[0]
t_perc_df.index.name = 'final state'
print("\n%%hh transitions from intial state (%d) in wave w to state in w+1:" % in_state)
print(t_perc_df.round(2))
# transitions at any time from given initial state
all_waves = | pd.concat([var_dict[1], var_dict[2], var_dict[3], var_dict[4], var_dict[5], var_dict[6], var_dict[7]], axis=1, join='inner') | pandas.concat |
r"""Submodule frequentist_statistics.py includes the following functions: <br>
- **normal_check():** compare the distribution of numeric variables to a normal distribution using the
Kolmogrov-Smirnov test <br>
- **correlation_analysis():** Run correlations for numerical features and return output in different formats <br>
- **correlations_as_sample_increases():** Run correlations for subparts of the data to check robustness <br>
- **multiple_univariate_OLSs():** Tmp <br>
- **potential_for_change_index():** Calculate the potential for change index based on either variants of the r-squared
(from linear regression) or the r-value (pearson correlation) <br>
- **correct_pvalues():** function to correct for multiple testing <br>
- **partial_correlation():** function to calculate the partial correlations whilst correcting for other variables <br>
"""
from itertools import combinations
from itertools import product
from typing import Tuple
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from matplotlib.lines import Line2D
from scipy import stats
from sklearn.linear_model import LinearRegression
from statsmodels.stats.multitest import multipletests
from .utils import apply_scaling
def normal_check(data: pd.DataFrame) -> pd.DataFrame:
r"""Compare the distribution of numeric variables to a normal distribution using the Kolmogrov-Smirnov test
Wrapper for `scipy.stats.kstest`: the empircal data is compared to a normally distributed variable with the
same mean and standard deviation. A significant result (p < 0.05) in the goodness of fit test means that the
data is not normally distributed.
Parameters
----------
data: pandas.DataFrame
Dataframe including the columns of interest
Returns
----------
df_normality_check: pd.DataFrame
Dataframe with column names, p-values and an indication of normality
Examples
----------
>>> tips = sns.load_dataset("tips")
>>> df_normality_check = normal_check(tips)
"""
# Select numeric columns only
num_features = data.select_dtypes(include="number").columns.tolist()
# Compare distribution of each feature to a normal distribution with given mean and std
df_normality_check = data[num_features].apply(
lambda x: stats.kstest(
x.dropna(), stats.norm.cdf, args=(np.nanmean(x), np.nanstd(x)), N=len(x)
)[1],
axis=0,
)
# create a label that indicates whether a feature has a normal distribution or not
df_normality_check = pd.DataFrame(df_normality_check).reset_index()
df_normality_check.columns = ["feature", "p-value"]
df_normality_check["normality"] = df_normality_check["p-value"] >= 0.05
return df_normality_check
def permute_test(a, test_type, test, **kwargs):
r"""Helper function to run tests for permutations
Parameters
----------
a : np.array
test_type: str {'correlation', 'independent_t_test'}
Type of the test to be used
test:
e.g. `scipy.stats.pearsonr` or `statsmodels.stats.weightstats.ttest_ind`
**kwargs:
Additional keywords to be added to `test`
- `a2` for the second feature if test_type = 'correlation'
Returns
----------
float:
p value for permutation
"""
if test_type == "correlation":
a2 = kwargs["a2"]
_, p = test(a, a2)
else:
raise ValueError("Unknown test_type provided")
def correlation_analysis(
data: pd.DataFrame,
col_list=None,
row_list=None,
check_norm=False,
method: str = "pearson",
dropna: str = "pairwise",
permutation_test: bool = False,
n_permutations: int = 1000,
random_state=None,
):
r"""Run correlations for numerical features and return output in different formats
Different methods to compute correlations and to handle missing values are implemented.
Inspired by `researchpy.corr_case` and `researchpy.corr_pair`.
Parameters
----------
data : pandas.DataFrame
Dataframe with variables in columns, cases in rows
row_list: list or None (default: None)
List with names of columns in `data` that should be in the rows of the correlogram.
If None, all columns are used but only every unique combination.
col_list: list or None (default: None)
List with names of columns in `data` that should be in the columns of the correlogram.
If None, all columns are used and only every unique combination.
check_norm: bool (default: False)
If True, normality will be checked for columns in `data` using `normal_check`. This influences the used method
for correlations, i.e. Pearson or Spearman. Note: normality check ignores missing values.
method: {'pearson', 'kendall', 'spearman'}, default 'pearson'
Type of correlation, either Pearson's r, Spearman's rho, or Kendall's tau, implemented via respectively
`scipy.stats.pearsonr`, `scipy.stats.spearmanr`, and `scipy.stats.kendalltau`
Will be ignored if check_norm=True. Instead, Person's r is used for every combination of normally distributed
columns and Spearman's rho is used for all other combinations.
dropna : {'listwise', 'pairwise'}, default 'pairwise'
Should rows with missing values be dropped over the complete `data` ('listwise') or for every correlation
separately ('pairwise')
permutation_test: bool (default: False)
If true, a permutation test will added
n_permutations: int (default: 1000)
Number of permutations in the permutation test
random_state: None or int (default: None)
Random state for permutation_test. If not None, random_state will be updated for every permutation
plot_permutation: bool (default: False)
Whether to plot the results of the permutation test
figsize: tuple (default: (11.7, 8.27))
Width and height of the figure in inches
Returns
----------
result_dict: dict
Dictionary containing with the following keys:
info : pandas.DataFrame
Description of correlation method, missing values handling and number of observations
r-values : pandas.DataFrame
Dataframe with correlation coefficients. Indices and columns are column names from `data`. Only lower
triangle is filled.
p-values : pandas.DataFrame
Dataframe with p-values. Indices and columns are column names from `data`. Only lower triangle is filled.
N : pandas.DataFrame
Dataframe with numbers of observations. Indices and columns are column names from `data`. Only lower
triangle is filled. If dropna ='listwise', every correlation will have the same number of observations.
summary : pandas.DataFrame
Dataframe with columns ['analysis', 'feature1', 'feature2', 'r-value', 'p-value', 'N', 'stat-sign']
which indicate the type of test used for the correlation, the pair of columns, the correlation coefficient,
the p-value, the number of observations for each combination of columns in `data` and whether the r-value is
statistically significant.
plotted_permuations: Figure
Examples
----------
>>> from jmspack.frequentist_statistics import correlation_analysis
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>> dict_results = correlation_analysis(iris, method='pearson', dropna='listwise', permutation_test=True,
>>> n_permutations=100, check_norm=True)
>>> dict_results['summary']
References
----------
<NAME> (2018). researchpy's documentation [Revision 9ae5ed63]. Retrieved from
https://researchpy.readthedocs.io/en/latest/
"""
# Settings test
if method == "pearson":
test, test_name = stats.pearsonr, "Pearson"
elif method == "spearman":
test, test_name = stats.spearmanr, "Spearman Rank"
elif method == "kendall":
test, test_name = stats.kendalltau, "Kendall's Tau-b"
else:
raise ValueError("method not in {'pearson', 'kendall', 'spearman'}")
# Copy numerical data from the original data
data = data.copy().select_dtypes("number")
# Get correct lists
if col_list and not row_list:
row_list = data.select_dtypes("number").drop(col_list, axis=1).columns.tolist()
elif row_list and not col_list:
col_list = data.select_dtypes("number").drop(row_list, axis=1).columns.tolist()
# Initializing dataframes to store results
info = pd.DataFrame()
summary = pd.DataFrame()
if not col_list and not row_list:
r_vals = pd.DataFrame(columns=data.columns, index=data.columns)
p_vals = pd.DataFrame(columns=data.columns, index=data.columns)
n_vals = pd.DataFrame(columns=data.columns, index=data.columns)
iterator = combinations(data.columns, 2)
else:
r_vals = pd.DataFrame(columns=col_list, index=row_list)
p_vals = pd.DataFrame(columns=col_list, index=row_list)
n_vals = pd.DataFrame(columns=col_list, index=row_list)
iterator = product(col_list, row_list)
if dropna == "listwise":
# Remove rows with missing values
data = data.dropna(how="any", axis="index")
info = info.append(
{
f"{test_name} correlation test using {dropna} deletion": f"Total observations used = {len(data)}"
},
ignore_index=True,
)
elif dropna == "pairwise":
info = info.append(
{
f"{test_name} correlation test using {dropna} deletion": f"Observations in the data = {len(data)}"
},
ignore_index=True,
)
else:
raise ValueError("dropna not in {'listwise', 'pairwise'}")
if check_norm:
# Check normality of all columns in the data
df_normality = normal_check(data)
norm_names = df_normality.loc[df_normality["normality"], "feature"].tolist()
# Iterating through the Pandas series and performing the correlation
for col1, col2 in iterator:
if dropna == "pairwise":
# Remove rows with missing values in the pair of columns
test_data = data[[col1, col2]].dropna()
else:
test_data = data
if check_norm:
# Select Pearson's r only if both columns are normally distributed
if (col1 in norm_names) and (col2 in norm_names):
test, test_name = stats.pearsonr, "Pearson"
else:
test, test_name = stats.spearmanr, "Spearman Rank"
# Run correlations
r_value, p_value = test(test_data.loc[:, col1], test_data.loc[:, col2])
n_value = len(test_data)
# Store output in matrix format
try:
r_vals.loc[col2, col1] = r_value
p_vals.loc[col2, col1] = p_value
n_vals.loc[col2, col1] = n_value
except KeyError:
r_vals.loc[col1, col2] = r_value
p_vals.loc[col1, col2] = p_value
n_vals.loc[col1, col2] = n_value
# Store output in dataframe format
dict_summary = {
"analysis": test_name,
"feature1": col1,
"feature2": col2,
"r-value": r_value,
"p-value": p_value,
"stat-sign": (p_value < 0.05),
"N": n_value,
}
if permutation_test:
raise ValueError("permutation_test has yet to be implemented")
# # Copy the complete data
# col2_shuffle = np.array(test_data.loc[:, col2])
# col2_shuffle = np.repeat(
# col2_shuffle[:, np.newaxis], n_permutations, axis=1
# )
# # Shuffle within the columns
# np.random.seed(random_state)
# ix_i = np.random.sample(col2_shuffle.shape).argsort(axis=0)
# ix_j = np.tile(np.arange(col2_shuffle.shape[1]), (col2_shuffle.shape[0], 1))
# col2_shuffle = col2_shuffle[ix_i, ix_j]
# permutations = np.apply_along_axis(
# permute_test,
# axis=0,
# arr=col2_shuffle,
# test_type="correlation",
# test=test,
# a2=np.array(test_data.loc[:, col1]),
# )
#
# extreme_permutation = np.where(permutations < p_value, 1, 0)
# p_permutation = extreme_permutation.sum() / len(permutations)
# dict_summary["permutation-p-value"] = p_permutation
#
# # Reset random seed numpy
# np.random.seed(None)
summary = pd.concat(
[summary, pd.DataFrame(data=dict_summary, index=[0])],
axis=0,
ignore_index=True,
sort=False,
)
# Embed results within a dictionary
result_dict = {
"r-value": r_vals,
"p-value": p_vals,
"N": n_vals,
"info": info,
"summary": summary,
}
return result_dict
def correlations_as_sample_increases(
data: pd.DataFrame,
feature1: str,
feature2: str,
starting_N: int = 10,
step: int = 1,
method="pearson",
random_state=42,
bootstrap: bool = False,
bootstrap_per_N: int = 2,
plot: bool = True,
addition_to_title: str = "",
figsize: Tuple[float, float] = (9.0, 4.0),
alpha: float = 0.05,
):
r"""Plot changes in r-value and p-value from correlation between two features when sample size increases.
Different methods to compute correlations are implemented. Data is shuffled first, to prevent any order effects.
Parameters
----------
data : pandas.DataFrame
Dataframe with variables in columns, cases in rows
feature1: str
Name of column with first feature to be included in correlation
feature2: str
Name of column with second feature to be included in correlation
starting_N: int (default: 10)
Number of cases that should be used for first correlation
step: int (default: 1)
Step for increasing the number of cases for the correlations
method: {'pearson', 'kendall', 'spearman'}, default 'pearson'
Type of correlation, either Pearson's r, Spearman's rho, or Kendall's tau, implemented via respectively
`scipy.stats.pearsonr`, `scipy.stats.spearmanr`, and `scipy.stats.kendalltau`.
random_state: int (default: 42)
Random state for reordering the data
bootstrap: bool
Whether to bootstrap the data at each N
bootstrap_per_N: int
If bootstrap is True then how many bootstraps per each sample size should be performed i.e if bootstrap_per_N
is 2 then at sample size N=20, 2 bootstraps will be performed. This will continue until starting_N == N.
plot: bool (default: True)
Whether to plot the results
addition_to_title: str (default: '')
The title of the plot will be "The absolute r-value between {feature1} and {feature2} as N increases" and
followed by the addition (e.g. to describe a dataset).
alpha: float (default: 0.05)
Threshold for p-value that should be shown in the plot
Returns
----------
cor_results: pd.DataFrame
Dataframe with the results for all ran analyses
fig: Figure
Figure will be returned if plot=True, otherwise None. This allows you to change properties of the figure
afterwards, e.g. fig.axes[0].set_title('This is my new title')
Examples
----------
>>> import seaborn as sns
>>> from jmspack.frequentist_statistics import correlations_as_sample_increases
>>> iris = sns.load_dataset('iris')
>>> summary, fig = correlations_as_sample_increases(data=iris,feature1='petal_width',feature2='sepal_length',
>>> starting_N=20)
"""
data = (
data[[feature1, feature2]].copy()
# Remove rows with np.nans
.dropna()
# Randomize order of the data
.sample(frac=1, random_state=random_state)
)
if data.shape[0] < starting_N:
raise ValueError("Number of valid cases is smaller than the starting_N")
if data.shape[0] < starting_N + step:
raise ValueError(
"Number of valid cases is smaller than the starting_N + step (only one correlation possible)"
)
# Initiate data frame for results
corr_results = pd.DataFrame()
# Loop through all possible number of rows from starting N till number of rows
for i in range(starting_N, data.shape[0] + 1, step):
boot_corr_results = pd.DataFrame()
if bootstrap:
for boot_num in range(0, bootstrap_per_N):
boot_data = data.sample(frac=1, random_state=boot_num)
current_boot_corr = correlation_analysis(
boot_data.iloc[0:i],
method=method,
check_norm=False,
permutation_test=False,
)["summary"][["r-value", "p-value", "N"]]
boot_corr_results = pd.concat(
[boot_corr_results, current_boot_corr], ignore_index=True
)
corr_results = pd.concat(
[corr_results, boot_corr_results], ignore_index=True
)
else:
# Run correlation with all data from first row until row i
current_corr = correlation_analysis(
data.iloc[0:i], method=method, check_norm=False, permutation_test=False
)["summary"][["r-value", "p-value", "N"]]
corr_results = pd.concat([corr_results, current_corr], ignore_index=True)
fig = None
if plot:
fig, ax = plt.subplots(figsize=figsize)
# Add r-value and p-value
_ = sns.lineplot(
x=corr_results["N"],
y=abs(corr_results["r-value"]),
label="absolute r-value",
ax=ax,
).set_title(
f"The absolute r-value between {feature1} and {feature2}\nas N increases {addition_to_title}"
)
_ = sns.lineplot(
x=corr_results["N"], y=corr_results["p-value"], label="p-value", ax=ax
)
# Add alpha level (threshold for p-value)
_ = ax.axhline(
y=alpha, color="black", alpha=0.5, linestyle="--", label=f">= {alpha}"
)
_ = ax.set_ylabel("")
_ = ax.set_ylim(0, 1)
_ = plt.legend()
return corr_results, fig
def multiple_univariate_OLSs(
X: pd.DataFrame,
y: pd.Series,
features_list: list,
):
all_coefs_df = pd.DataFrame()
for feature in features_list:
mod = sm.OLS(endog=y, exog=sm.add_constant(X[[feature]]))
res = mod.fit()
coef_df = pd.read_html(
res.summary().tables[1].as_html(), header=0, index_col=0
)[0].drop("const")
coef_df = coef_df.assign(
**{"rsquared": res.rsquared, "rsquared_adj": res.rsquared_adj}
)
all_coefs_df = pd.concat([all_coefs_df, coef_df])
return all_coefs_df
def potential_for_change_index(
data: pd.DataFrame,
features_list: list,
target: str,
minimum_measure: str = "min",
centrality_measure: str = "mean",
maximum_measure: str = "max",
weight_measure: str = "rsquared_adj",
scale_data: bool = True,
pci_heatmap: bool = True,
pci_heatmap_figsize: Tuple[float, float] = (1.0, 4.0),
):
if scale_data:
data = data.pipe(apply_scaling)
if weight_measure == "rsquared_adj" or weight_measure == "rsquared":
tmp_X = data[features_list]
tmp_y = data[target]
weight_df = multiple_univariate_OLSs(
X=tmp_X, y=tmp_y, features_list=features_list
)
negative_list = weight_df[weight_df["coef"] < 0].index.tolist()
else:
output_dict = correlation_analysis(
data=data,
col_list=features_list,
row_list=[target],
method="pearson",
check_norm=False,
dropna="pairwise",
permutation_test=False,
n_permutations=10,
random_state=69420,
)
weight_df = output_dict["summary"].set_index("feature1")
negative_list = weight_df[weight_df["r-value"] < 0].index.tolist()
if len(negative_list) < 0:
pci_df = (
# room for improvement calculation (series)
(
data[features_list].agg(centrality_measure)
- (data[features_list].agg(maximum_measure))
).abs()
* weight_df[weight_measure] # weight (based on weight_measure series)
).to_frame("PCI")
else:
neg_pci_df = (
# room for improvement calculation (series)
(
data[negative_list].agg(centrality_measure)
- (data[negative_list].agg(minimum_measure))
).abs()
* weight_df.loc[
negative_list, weight_measure
] # weight (based on weight_measure series)
).to_frame("PCI")
pos_pci_df = (
# room for improvement calculation (series)
(
data[features_list].drop(negative_list, axis=1).agg(centrality_measure)
- (data[features_list].drop(negative_list, axis=1).agg(maximum_measure))
).abs()
* weight_df[weight_measure].drop(
negative_list, axis=0
) # weight (based on weight_measure series)
).to_frame("PCI")
pci_df = | pd.concat([pos_pci_df, neg_pci_df]) | pandas.concat |
# -*- coding: utf-8 -*-
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter.filedialog import askopenfilename,asksaveasfilename
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
from readlif.reader import LifFile
import pylab
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import PIL
from PIL import Image
import read_lif
import skimage # Library for image manipulation
from skimage import io
from skimage.io import imread # sublibrary from skimage
import trackpy as tp # Library for particle tracking
from scipy.ndimage import gaussian_filter
from collections import Counter, OrderedDict
from pywt import wavedecn, waverecn
import tifffile as tif
from scipy.ndimage import gaussian_filter
from joblib import Parallel, delayed
import matplotlib.backends.backend_pdf
from matplotlib.backends.backend_pdf import PdfPages
import PyPDF2
from fpdf import FPDF
from PyPDF2 import PdfFileMerger
from reportlab.pdfgen import canvas
from datetime import datetime
import multiprocessing
import seaborn as sns
import tkinter as tk
from cellpose import models
from cellpose import plot
#---------------------------------------------------------------------------------------------------------------------------------------------------
# GLOBAL LISTS
#---------------------------------------------------------------------------------------------------------------------------------------------------
sizes=[]
particle_sizes=[]
image=[]
number=[]
fp2=[]
number_of_parti=[]
number_of_part_in_Green=[]
number_of_part_in_Blue=[]
inpu_cell_size=[]
inpu_part_size=[]
negative_cells=[]
cells=[]
clean_div=[]
use_GPU = models.use_gpu()
model = models.Cellpose(gpu=use_GPU, model_type='cyto')
pdf_files=[]
noise_l=[]
gaus_f=[]
persentil=[]
persentil2=[]
#---------------------------------------------------------------------------------------------------------------------------------------------------
# FORMAT GUI
#---------------------------------------------------------------------------------------------------------------------------------------------------
# contrast border thumbnail
root = Tk()
root.title(" Channel Colocalization Tool")
root.geometry("1040x1040+0+0")#
root.configure(background='#4B637F')
root.attributes('-alpha',0.98)
my_img = tk.PhotoImage(file = r"C:\Users\Alexia\Desktop\bio2.png")
root.iconphoto(False, my_img)
root2 = Tk()
root2.title("Viewer")
root2.geometry("200x250+1050+0")#
root2.configure(background='#4B637F')
root2.attributes('-alpha',0.95)
root3 = Toplevel()
root3.title("Channel selection")
root3.geometry("200x150+1050+305")#
root3.configure(background='#4B637F')
root3.attributes('-alpha',0.92)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Channels ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold ", 10))
l.place(x=106,y=3)
T =Text(root3, height = 5, width = 5)
l = Label(root3, text = "Nucleus ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold underline", 10))
l.place(x=5,y=30)
var_n = IntVar()
var_c2= IntVar()
var_c3= IntVar()
var_c4= IntVar()
def test1():
global nucleus_channel
nucleus_channel=var_n.get()
c1=Checkbutton(root3, text="1",variable=var_n,onvalue = 1,bg='#4B637F',command = test1).place(x=85, y=30)
c2=Checkbutton(root3, text="2",variable=var_n,onvalue = 2,bg='#4B637F',command = test1).place(x=123, y=30)
c3=Checkbutton(root3, text="3",variable=var_n,onvalue = 3,bg='#4B637F',command = test1).place(x=160, y=30)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Cutoplasm ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold underline", 10))
l.place(x=5,y=55)
var_c1= IntVar()
def test2():
global channel1
channel1=var_c1.get()
d1=Checkbutton(root3, text="1",variable=var_c1,onvalue = 1,bg='#4B637F',command = test2).place(x=85, y=55)
d2=Checkbutton(root3, text="2",variable=var_c1,onvalue = 2,bg='#4B637F',command = test2).place(x=123, y=55)
d3=Checkbutton(root3, text="3",variable=var_c1,onvalue = 3,bg='#4B637F',command = test2).place(x=160, y=55)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Colocalized ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold underline", 10))
l.place(x=5,y=80)
def test3():
global channel2
channel2=var_c2.get()
def test4():
global channel3
channel3=var_c3.get()
def test5():
global channel4
channel4=var_c4.get()
da1=Checkbutton(root3, text="1",variable=var_c2,onvalue = 1,bg='#4B637F',command = test3).place(x=85, y=80)
da2=Checkbutton(root3, text="2",variable=var_c3,onvalue = 2,bg='#4B637F',command = test4).place(x=123, y=80)
da3=Checkbutton(root3, text="3",variable=var_c4,onvalue = 3,bg='#4B637F',command = test5).place(x=160, y=80)
def on():
line = Frame(root3, height=3, width=454, relief='groove',background='#4B637F')
line.place(x=0, y=1)
line = Frame(root3, height=3, width=454, relief='groove',background='#4B637F')
line.place(x=0, y=146)
line = Frame(root3, height=165, width=3, relief='groove',background='#4B637F')
line.place(x=1, y=1)
line = Frame(root3, height=165, width=3, relief='groove',background='#4B637F')
line.place(x=196, y=1)
btn33 = Button(root3, text="Done", width=20, bg='#142841', fg='white', font=('ariel 11 bold'), relief=GROOVE, command=on)
btn33.place(x=5, y=110)
def impo(event=None):
import matplotlib
import matplotlib.pyplot as plt
def plotting_tocheck():
matplotlib.use('TkAgg') # <-- THIS MAKES IT FAST!
plt.figure(figsize=(10,10))
plt.imshow(z_0,cmap='Reds')
plt.show()
impo()
def plotting_tocheck2():
matplotlib.use('TkAgg') # <-- THIS MAKES IT FAST!
plt.figure(figsize=(10,10))
plt.imshow(z_1,cmap='Blues')
plt.show()
impo()
def plotting_tocheck3():
matplotlib.use('TkAgg') # <-- THIS MAKES IT FAST!
plt.figure(figsize=(10,10))
plt.imshow(z_2,cmap='Greens')
plt.show()
impo()
root4 = Tk()
root4.title("Channel selection")
root4.geometry("200x138+1050+510")
root4.configure(background='#4B637F')
root4.attributes('-alpha',0.92)
line = Frame(root4, height=3, width=20, relief='groove',background='#142841')
line.place(x=14, y=25)
btn1 = Button(root4, text="View Channel1", width=18,height=1,bg='#57CAC8', fg='WHITE',font=('ariel 9 bold'), relief=GROOVE, command=plotting_tocheck)
btn1.place(x=50, y=12)
line = Frame(root4, height=3, width=20, relief='groove',background='#142841')
line.place(x=14, y=62)
line = Frame(root4, height=3, width=20, relief='groove',background='#142841')
line.place(x=14, y=68)
btn1 = Button(root4, text="View Channel2", width=18,height=1,bg='#57CAC8', fg='WHITE',font=('ariel 9 bold'), relief=GROOVE, command=plotting_tocheck2)
btn1.place(x=50, y=55)
line = Frame(root4, height=3, width=20, relief='groove',background='#142841')
line.place(x=14, y=103)
line = Frame(root4, height=3, width=20, relief='groove',background='#142841')
line.place(x=14, y=109)
line = Frame(root4, height=3, width=20, relief='groove',background='#142841')
line.place(x=14, y=115)
btn1 = Button(root4, text="View Channel3", width=18,height=1,bg='#57CAC8', fg='WHITE',font=('ariel 9 bold'), relief=GROOVE, command=plotting_tocheck3)
btn1.place(x=50, y=98)
#---------------------------------------------------------------------------------------------------------------------------------------------------
# CANVASES
#---------------------------------------------------------------------------------------------------------------------------------------------------
canvas22 = Canvas(root2, width="165", height= "150", relief=RIDGE, bd=1, bg='white',highlightbackground='#1A507B',highlightthickness=5)
canvas22.place(x=10, y=28)
canvas2 = Canvas(root, width="447", height= "490", relief=RIDGE, bd=1, bg='white',highlightbackground='#1A507B',highlightthickness=5)
canvas2.place(x=540, y=80)
canvas3 = Canvas(root, width="395", height= "490" , bg='#263A55',highlightbackground='#263A55',highlightthickness=5)
canvas3.place(x=50, y=80)
canvas4 = Canvas(root, width="395", height= "120" , bg='#142841',highlightbackground='#142841',highlightthickness=5)
canvas4.place(x=50, y=450)
canvas5 = Canvas(root, width="395", height= "130" , bg='#263A55',highlightbackground='#263A55',highlightthickness=5)
canvas5.place(x=50, y=310)
#---------------------------------------------------------------------------------------------------------------------------------------------------
# LINES
#---------------------------------------------------------------------------------------------------------------------------------------------------
line = Frame(root, height=140, width=1, relief='groove')
line.place(x=302, y=310)
line = Frame(root, height=3, width=454, relief='groove',background='#1A507B')
line.place(x=540, y=414)
line = Frame(root, height=165, width=4, relief='groove',background='#1A507B')
line.place(x=770, y=414)
line = Frame(root, height=16, width=3, relief='groove')
line.place(x=62, y=344)
line =Frame(root, height=16, width=3, relief='groove')
line.place(x=62, y=392)
line = Frame(root, height=633, width=1, relief='groove')
line.place(x=490, y=7)
line = Frame(root, height=1, width=405, relief='groove')
line.place(x=50, y=450)
line = Frame(root, height=1, width=405, relief='groove')
line.place(x=50, y=250)
line = Frame(root, height=1, width=405, relief='groove')
line.place(x=50, y=50)
line = Frame(root3, height=1, width=100, relief='groove',background='#142841')
line.place(x=88, y=25)
line = Frame(root, height=140, width=4, relief='groove',background='#142841')
line.place(x=50, y=310)
line = Frame(root, height=140, width=4, relief='groove',background='#142841')
line.place(x=452, y=310)
line = Frame(root, height=137, width=4, relief='groove',background='#142841')
line.place(x=50, y=110)
line = Frame(root, height=137, width=4, relief='groove',background='#142841')
line.place(x=450, y=110)
line = Frame(root, height=4, width=400, relief='groove',background='#142841')
line.place(x=52, y=242)
line = Frame(root2, height=400, width=4, relief='groove',background='#142841')
line.place(x=0, y=0)
line = Frame(root3, height=400, width=4, relief='groove',background='#142841')
line.place(x=0, y=0)
line = Frame(root4, height=400, width=4, relief='groove',background='#142841')
line.place(x=0, y=0)
#---------------------------------------------------------------------------------------------------------------------------------------------------
# TEXT
#---------------------------------------------------------------------------------------------------------------------------------------------------
T = Text(root, height = 5, width = 52)
l = Label(root, text = "IMAGE PREVIEW",bg = "WHITE",fg='#666A68',width=45,height=2)
l.config(font =("helvetica", 12,'bold'))
l.place(x=540,y=20)
l = Label(root, text = "SELECT YOUR FILE",bg = "WHITE",fg='#666A68',width=40,height=2)
l.config(font =("helvetica ", 12, 'bold'))
l.place(x=50,y=20)
l = Label(root, text = " ",bg = "#57CAC8",fg='#152635',width=4,height=2)
l.config(font =("helvetica ", 12, 'bold'))
l.place(x=50,y=20)
line =Frame(l, height=3, width=23, relief='flat')
line.place(x=9, y=11)
line = Frame(l, height=3, width=23, relief='flat')
line.place(x=9, y=19)
line = Frame(l, height=3, width=23, relief='flat')
line.place(x=9, y=27)
l = Label(root, text = " ",bg = '#4B637F',fg='WHITE',width=40,height=1)
l.config(font =("helvetica ", 12, 'bold'))
l.place(x=50,y=246)
l = Label(root, text = "SETTING PARAMETERS",bg = "WHITE",fg='#666A68',width=40,height=2)
l.config(font =("helvetica ", 12, 'bold'))
l.place(x=50,y=271)
T = Text(root, height = 5, width = 52)
l = Label(root, text = "What model would you like: ",bg = '#263A55',fg='#AFB9BF')
l.config(font =("arial bold", 10))
l.place(x=70,y=140)
btt1 = Button(root, text="Cyto", width=6, bg='#57CAC8', fg='white', font=('ariel 9 bold'), relief=GROOVE)
btt1.place(x=310, y=140)
btt2 = Button(root, text="Nuclei", width=6, bg='#57CAC8', fg='white', font=('ariel 9 bold'), relief=GROOVE, command=None)
btt2.place(x=380, y=140)
fp=IntVar()
lp=IntVar()
T = Text(root, height = 5, width = 52)
l = Label(root, text = "Select images From: ",bg = '#263A55',fg='#AFB9BF')
l.config(font =("arial bold", 10))
l.place(x=70,y=190)
T = Text(root, height = 5, width = 52)
l = Label(root, text = "To ",bg = '#263A55',fg='#AFB9BF')
l.config(font =("arial bold", 10))
l.place(x=360,y=190)
ei = Entry(root, bd =1, bg="#C3CBCE",textvariable=fp,width=4,highlightbackground='#1A507B',highlightthickness=5)
ei.place(x=312, y=185)
eii = Entry(root, bd =1, bg="#C3CBCE",textvariable=lp,width=4,highlightbackground='#1A507B',highlightthickness=5)
eii.place(x=392, y=185)
ns = IntVar()
T = Text(root, height = 5, width = 52)
l = Label(root, text = "Approxiamte nucleus diameter: ",bg = '#263A55',fg='#AFB9BF')
l.config(font =("arial bold", 10))
l.place(x=70,y=340)
l = Label(root, text = " ",bg = "#C3CBCE",fg='#152635',width=4)
l.config(font =("arial bold", 10))
l.place(x=392,y=343)
e1 =Entry(root, bd =1, bg='white',textvariable=ns,width=6,highlightbackground='#1A507B',highlightthickness=5)
e1.place(x=320, y=340)
ts = IntVar()
T = Text(root, height = 5, width = 52)
l = Label(root, text = "Define the particle diameter: ",bg = '#263A55',fg='#AFB9BF')
l.config(font =("arial bold", 10))
l.place(x=70,y=390)
l = Label(root, text = " ",bg = "#C3CBCE",fg='#152635',width=4)
l.config(font =("arial bold", 10))
l.place(x=392,y=394)
e2 = Entry(root, bd =1, bg='white',textvariable=ts,width=6,highlightbackground='#1A507B',highlightthickness=5)
e2.place(x=320, y=390)
ps = IntVar()
T = Text(root, height = 5, width = 52)
l = Label(root, text = "Define 1st persentile: ",bg = '#142841',fg='#AFB9BF')
l.config(font =("arial bold", 10))
l.place(x=70,y=465)
l = Label(root, text = " ",bg = "#C3CBCE",fg='#152635',width=4)
l.config(font =("arial bold", 10))
l.place(x=392,y=468)
e3 = Entry(root, bd =1, bg='white',textvariable=ps,width=15,highlightbackground='#1A507B',highlightthickness=5)
e3.place(x=245, y=465)
ps2 = IntVar()
T = Text(root, height = 5, width = 52)
l = Label(root, text = "Define 2nd persentile: ",bg = '#142841',fg='#AFB9BF')
l.config(font =("arial bold", 10))
l.place(x=70,y=505)
l = Label(root, text = " ",bg = "#C3CBCE",fg='#152635',width=4)
l.config(font =("arial bold", 10))
l.place(x=392,y=508)
e4 = Entry(root, bd =1, bg='white',textvariable=ps2,width=15,highlightbackground='#1A507B',highlightthickness=5)
e4.place(x=245, y=505)
def process3(event=None):
content = e3.get()
l = Label(root, text = content ,bg = "#585E63",fg='#152635',width=4)
global Ps
Ps=ps.get()
print(ps.get())
l.config(font =("arial bold", 10))
l.place(x=392,y=340)
e3.bind('<Return>', process3)
def process4(event=None):
content = e4.get()
l = Label(root, text = content ,bg = "#585E63",fg='#152635',width=4)
global Ps
Ps2=ps2.get()
print(ps.get())
l.config(font =("arial bold", 10))
l.place(x=392,y=440)
e4.bind('<Return>', process4)
T = Text(root2, height = 5, width = 52)
l = Label(root2, text = "Previous Image: ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold", 10))
l.place(x=11,y=5)
#-----------------------------------------------------------------------------------------------------------------------------------------------
#
# FUNCTIONS
#-----------------------------------------------------------------------------------------------------------------------------------------------
def model_selection(event=None):
global img_path,var_model
pdf_files.append('Intro.pdf')
line = Frame(root, height=16, width=3, relief='groove')
line.place(x=62, y=144)
img_path = filedialog.askopenfilename(initialdir=os.getcwd())
line = Frame(root, height=16, width=3, relief='groove',background='#EA6060')
line.place(x=62, y=144)
var_model = tk.IntVar()
btt1 = Button(root, text="Cyto", width=6, bg='#57CAC8', fg='white', font=('ariel 9 bold'), relief=GROOVE, command=lambda: var_model.set(1))
btt1.place(x=310, y=140)
btt2 = Button(root, text="Nuclei", width=6, bg='#57CAC8', fg='white', font=('ariel 9 bold'), relief=GROOVE, command=lambda: var_model.set(2))
btt2.place(x=380, y=140)
root.wait_variable(var_model)
if var_model.get()==1:
btt1 = Button(root, text="Cyto", width=6, bg='#263A55', fg='white', font=('ariel 9 bold'), relief=GROOVE, command=lambda: var_model.set(1))
btt1.place(x=310, y=140)
else :
btt2 = Button(root, text="Nuclei", width=6, bg='#263A55', fg='white', font=('ariel 9 bold'), relief=GROOVE, command=lambda: var_model.set(2))
btt2.place(x=380, y=140)
line = Frame(root, height=19, width=7, relief='flat',background='#263A55')
line.place(x=60, y=142)
if var_model.get()==1:
root3 = Toplevel()
root3.title("Channel selection")
root3.geometry("200x150+1050+305")#
root3.configure(background='#4B637F')
root3.attributes('-alpha',0.99)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Channels ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold ", 10))
l.place(x=106,y=3)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Nucleus ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold underline", 10))
l.place(x=5,y=30)
line = Frame(root3, height=1, width=100, relief='groove',background='#142841')
line.place(x=88, y=25)
line = Frame(root3, height=3, width=454, relief='groove',background='#EA6060')
line.place(x=0, y=1)
line = Frame(root3, height=3, width=454, relief='groove',background='#EA6060')
line.place(x=0, y=146)
line = Frame(root3, height=165, width=3, relief='groove',background='#EA6060')
line.place(x=1, y=1)
line = Frame(root3, height=165, width=3, relief='groove',background='#EA6060')
line.place(x=196, y=1)
var_n = IntVar()
def test1():
global nucleus_channel
nucleus_channel=var_n.get()
c1=Checkbutton(root3, text="1",variable=var_n,onvalue = 1,bg='#4B637F',command = test1).place(x=85, y=30)
c2=Checkbutton(root3, text="2",variable=var_n,onvalue = 2,bg='#4B637F',command = test1).place(x=123, y=30)
c3=Checkbutton(root3, text="3",variable=var_n,onvalue = 3,bg='#4B637F',command = test1).place(x=160, y=30)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Cutoplasm ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold underline", 10))
l.place(x=5,y=55)
var_c1= IntVar()
def test2():
global channel1
channel1=var_c1.get()
d1=Checkbutton(root3, text="1",variable=var_c1,onvalue = 1,bg='#4B637F',command = test2).place(x=85, y=55)
d2=Checkbutton(root3, text="2",variable=var_c1,onvalue = 2,bg='#4B637F',command = test2).place(x=123, y=55)
d3=Checkbutton(root3, text="3",variable=var_c1,onvalue = 3,bg='#4B637F',command = test2).place(x=160, y=55)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Colocalized ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold underline", 10))
l.place(x=5,y=80)
var_c2= IntVar()
var_c3= IntVar()
var_c4= IntVar()
global channel2
global channel3
global channel4
channel2=0
channel3=0
channel4=0
def test3():
global channel2
channel2=0
channel2=var_c2.get()
def test4():
global channel3
channel3=0
channel3=var_c3.get()
def test5():
global channel4
channel4=0
channel4=var_c4.get()
da1=Checkbutton(root3, text="1",variable=var_c2,onvalue = 1,bg='#4B637F',command = test3).place(x=85, y=80)
da2=Checkbutton(root3, text="2",variable=var_c3,onvalue = 2,bg='#4B637F',command = test4).place(x=123, y=80)
da3=Checkbutton(root3, text="3",variable=var_c4,onvalue = 3,bg='#4B637F',command = test5).place(x=160, y=80)
cha=[]
def on():
channel4a=int(channel4)-1
channel3a=int(channel3)-1
channel2a=int(channel2)-1
ch=[channel4a,channel3a,channel2a]
for x in ch :
if x>=0:
cha.append(x)
global ch_1
global ch_2
ch_1=cha[0]
ch_2=cha[1]
line = Frame(root3, height=3, width=454, relief='groove',background='#4B637F')
line.place(x=0, y=1)
line = Frame(root3, height=3, width=454, relief='groove',background='#4B637F')
line.place(x=0, y=146)
line = Frame(root3, height=165, width=3, relief='groove',background='#4B637F')
line.place(x=1, y=1)
line = Frame(root3, height=165, width=3, relief='groove',background='#4B637F')
line.place(x=196, y=1)
image_params()
btn33 = Button(root3, text="Done", width=20, bg='#142841', fg='white', font=('ariel 11 bold'), relief=GROOVE, command=on)
btn33.place(x=5, y=106)
root3.mainloop()
else :
root3 = Toplevel()
root3.title("Channel selectioooon")
root3.geometry("200x150+1050+305")#
root3.configure(background='#4B637F')
root3.attributes('-alpha',0.99)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Channels ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold ", 10))
l.place(x=106,y=5)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Nucleus ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold underline", 10))
l.place(x=5,y=40)
var_c2= IntVar()
var_c3= IntVar()
var_c4= IntVar()
line = Frame(root3, height=1, width=100, relief='groove',background='#142841')
line.place(x=88, y=30)
line = Frame(root3, height=3, width=454, relief='groove',background='#EA6060')
line.place(x=0, y=1)
line = Frame(root3, height=3, width=454, relief='groove',background='#EA6060')
line.place(x=0, y=146)
line = Frame(root3, height=165, width=3, relief='groove',background='#EA6060')
line.place(x=1, y=1)
line = Frame(root3, height=165, width=3, relief='groove',background='#EA6060')
line.place(x=196, y=1)
var_n = IntVar()
def test1():
global nucleus_channel
nucleus_channel=var_n.get()
c1=Checkbutton(root3, text="1",variable=var_n,onvalue = 1,bg='#4B637F',command = test1).place(x=85, y=40)
c2=Checkbutton(root3, text="2",variable=var_n,onvalue = 2,bg='#4B637F',command = test1).place(x=123, y=40)
c3=Checkbutton(root3, text="3",variable=var_n,onvalue = 3,bg='#4B637F',command = test1).place(x=160, y=40)
T = Text(root3, height = 5, width = 5)
l = Label(root3, text = "Colocalized ",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold underline", 10))
l.place(x=5,y=70)
global channel22
global channel32
global channel42
channel22=0
channel32=0
channel42=0
def test3():
global channel22
channel22=0
channel22=var_c2.get()
def test4():
global channel32
channel32=0
channel32=var_c3.get()
def test5():
global channel42
channel42=0
channel42=var_c4.get()
da1=Checkbutton(root3, text="1",variable=var_c2,onvalue = 1,bg='#4B637F',command = test3).place(x=85, y=70)
da2=Checkbutton(root3, text="2",variable=var_c3,onvalue = 2,bg='#4B637F',command = test4).place(x=123, y=70)
da3=Checkbutton(root3, text="3",variable=var_c4,onvalue = 3,bg='#4B637F',command = test5).place(x=160, y=70)
cha=[]
def on():
channel4a=int(channel42)-1
channel3a=int(channel32)-1
channel2a=int(channel22)-1
ch=[channel4a,channel3a,channel2a]
for x in ch :
if x>=0:
cha.append(x)
global ch_1
global ch_2
ch_1=cha[0]
ch_2=cha[1]
line = Frame(root3, height=3, width=454, relief='groove',background='#4B637F')
line.place(x=0, y=1)
line = Frame(root3, height=3, width=454, relief='groove',background='#4B637F')
line.place(x=0, y=146)
line = Frame(root3, height=165, width=3, relief='groove',background='#4B637F')
line.place(x=1, y=1)
line = Frame(root3, height=165, width=3, relief='groove',background='#4B637F')
line.place(x=196, y=1)
image_params2()
btn33 = Button(root3, text="Done", width=20, bg='#142841', fg='white', font=('ariel 11 bold'), relief=GROOVE, command=on)
btn33.place(x=5, y=106)
root3.mainloop()
def pdf_output(img):
pdfs = ['GFG.pdf', '1st Channel stacked.pdf', '2nd Channel stacked.pdf', '3rd Channel stacked.pdf', 'Masked nucleus.pdf','Masked cytoplasm.pdf','Calculation area.pdf','Filtered channel 2 image.pdf','Filtered channel 3 image.pdf','Identified particles in 2nd channel.pdf','Identified particles in 3rd channel.pdf']
merger = PdfFileMerger()
for pdf in pdfs:
merger.append(pdf)
merger.write(str(img)+'.pdf')
pdfs_names=str(img)+'.pdf'
pdf_files.append(str(pdfs_names))
merger.close()
pdfs = ['GFG.pdf', '1st Channel stacked.pdf', '2nd Channel stacked.pdf', '3rd Channel stacked.pdf','Masked nucleus.pdf','Masked cytoplasm.pdf','Calculation area.pdf','Filtered channel 2 image.pdf','Filtered channel 3 image.pdf','Identified particles in 2nd channel.pdf','Identified particles in 3rd channel.pdf']
for f in pdfs:
os.remove(os.path.join('./', f))
def pdf_output2(img):
pdfs = ['GFG.pdf', '1st Channel stacked.pdf', '2nd Channel stacked.pdf', '3rd Channel stacked.pdf', 'Masked nucleus.pdf','Calculation area.pdf','Filtered channel 2 image.pdf','Filtered channel 3 image.pdf','Identified particles in 2nd channel.pdf','Identified particles in 3rd channel.pdf']
merger = PdfFileMerger()
for pdf in pdfs:
merger.append(pdf)
merger.write(str(img)+'.pdf')
pdfs_names=str(img)+'.pdf'
pdf_files.append(str(pdfs_names))
merger.close()
pdfs = ['GFG.pdf', '1st Channel stacked.pdf', '2nd Channel stacked.pdf', '3rd Channel stacked.pdf','Masked nucleus.pdf','Masked cytoplasm.pdf','Calculation area.pdf','Filtered channel 2 image.pdf','Filtered channel 3 image.pdf','Identified particles in 2nd channel.pdf','Identified particles in 3rd channel.pdf']
for f in pdfs:
os.remove(os.path.join('./', f))
def final_merging(counter):
merger = PdfFileMerger()
dateTimeObj = datetime.now()
date='This file was created the: '+str(dateTimeObj.year)+'/'+str(dateTimeObj.month)+'/'+str(dateTimeObj.day)
title='IMAGE ANALYSIS REPORT'
per='Persentile: '+str(persentil)+','+str(persentil2)
filt='Sigma Gaussian filter: '+str(gaus_f[0])
num='Number of analyzed images: '+str(counter)
c = canvas.Canvas("Intro.pdf")
c.setFont("Courier", 9) #choose your font type and font size
c.drawString(390, 820, date)
c.setFont("Helvetica", 26) #choose your font type and font size
c.drawString(130, 620, title)
c.setFont("Courier", 9) #choose your font type and font size
c.drawString(30, 80, per)
c.drawString(30, 50, filt)
c.save()
os.remove(os.path.join('./', 'Saturated image'))
os.remove(os.path.join('./', 'Channel 2 Image'))
os.remove(os.path.join('./', 'Channel 3 Image'))
for pdf in pdf_files:
merger.append(pdf)
merger.write(str('Report')+'.pdf')
merger.close()
for x in pdf_files:
os.remove(os.path.join('./', x))
popupmsg('Analysis completed','Terminal')
def get_going():
if var_model.get()==1:
image_analysis()
else:
image_analysis2()
def popupmsg1(msg, title):
root = tk.Tk()
root.title(title)
root.geometry("220x80+550+300")
root.attributes('-alpha',0.80)
root.configure(background='#581845')
label = Label(root, text=msg,background='#581845',fg='white')
label.pack(side="top", fill="x", pady=10)
B1 = tk.Button(root, text="START",activebackground="#142841", command = lambda:[get_going(),root.destroy()])
B1.pack()
root.mainloop()
def popupmsg(msg, title):
root = tk.Tk()
root.title(title)
root.geometry("140x80+550+300")#
root.attributes('-alpha',0.80)
root.configure(background='#581845')
label = Label(root, text=msg,background='#581845',fg='white')
label.pack(side="top", fill="x", pady=10)
B1 = tk.Button(root, text="Okay", command = root.destroy)
B1.pack()
def plot(image,color,plot_title):
matplotlib.use('agg')
import matplotlib.pyplot as plt
fig=plt.figure(figsize=(0.8,0.8))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.show()
plt.figure(figsize=(10,10))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.savefig(plot_title+'.pdf')
def ploting(image,color,plot_title):
matplotlib.use('agg')
import matplotlib.pyplot as plt
canvas22 = Canvas(root2, width="165", height= "150", relief=RIDGE, bd=1, bg='white',highlightbackground='#1A507B',highlightthickness=5)
canvas22.place(x=15, y=37)
fig=plt.figure(figsize=(0.8,0.8))
plt.imshow(image,cmap=color)
plt.show()
canvas = FigureCanvasTkAgg(fig, master=canvas22)
canvas.get_tk_widget().grid(row=0, column=0, ipadx=55, ipady=40)
plt.figure(figsize=(10,10))
plt.imshow(image,cmap=color)
plt.savefig(plot_title+'.pdf')
def plotting(image,color,plot_title):
matplotlib.use('agg')
import matplotlib.pyplot as plt
canvas2 = Canvas(root, width="450", height= "490", relief=RIDGE, bd=1, bg='white',highlightbackground='#1A507B',highlightthickness=5)
canvas2.place(x=550, y=86)
fig=plt.figure(figsize=(5,4))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.show()
canvas = FigureCanvasTkAgg(fig, master=canvas2)
canvas.get_tk_widget().grid(row=0, column=0, ipadx=40, ipady=20)
plt.figure(figsize=(10,10))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.savefig(plot_title+'.pdf')
def plotting2(image,color,plot_title):
matplotlib.use('agg')
import matplotlib.pyplot as plt
canvas2 = Canvas(root, width="450", height= "490", relief=RIDGE, bd=1, bg='white',highlightbackground='#1A507B',highlightthickness=5)
canvas2.place(x=549, y=425)
fig=plt.figure(figsize=(1.5,1.5))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.show()
canvas = FigureCanvasTkAgg(fig, master=canvas2)
canvas.get_tk_widget().grid(row=0, column=0, ipadx=30, ipady=20)
plt.figure(figsize=(10,10))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.savefig(plot_title+'.pdf')
def plotting3(image,color,plot_title):
matplotlib.use('agg')
import matplotlib.pyplot as plt
canvas2 = Canvas(root, width="450", height= "490", relief=RIDGE, bd=1, bg='white',highlightbackground='#1A507B',highlightthickness=5)
canvas2.place(x=575, y=425)
fig=plt.figure(figsize=(1.5,1.5))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.show()
canvas = FigureCanvasTkAgg(fig, master=canvas2)
canvas.get_tk_widget().grid(row=0, column=0, ipadx=30, ipady=20)
plt.figure(figsize=(10,10))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.savefig(plot_title+'.pdf')
def plotting4(image,color,plot_title):
matplotlib.use('agg')
import matplotlib.pyplot as plt
canvas2 = Canvas(root, width="450", height= "490", relief=RIDGE, bd=1, bg='white',highlightbackground='#1A507B',highlightthickness=5)
canvas2.place(x=801, y=425)
fig=plt.figure(figsize=(1.5,1.5))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.show()
canvas = FigureCanvasTkAgg(fig, master=canvas2)
canvas.get_tk_widget().grid(row=0, column=0, ipadx=30, ipady=20)
plt.figure(figsize=(10,10))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.savefig(plot_title+'.pdf')
def plotting5(image,color,plot_title):
matplotlib.use('agg')
import matplotlib.pyplot as plt
canvas2 = Canvas(root, width="450", height= "490", relief=RIDGE, bd=1, bg='white',highlightbackground='#1A507B',highlightthickness=5)
canvas2.place(x=801, y=425)
fig=plt.figure(figsize=(1.5,1.5))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.show()
canvas = FigureCanvasTkAgg(fig, master=canvas2)
canvas.get_tk_widget().grid(row=0, column=0, ipadx=30, ipady=20)
plt.figure(figsize=(10,10))
plt.imshow(image,cmap=color)
plt.title(plot_title)
plt.savefig(plot_title+'.pdf')
def stacking_step(z_list_0,z_list_1,z_list_2):
z_st_0=[]
for x in range(len(z_list_0)):
pic=np.array(z_list_0[x])
z_st_0.append(pic)
if len(z_st_0)==19:
global z_0
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18]])
elif len(z_st_0)==30:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20],z_st_0[21],z_st_0[22],z_st_0[23],z_st_0[24],z_st_0[25],z_st_0[26],z_st_0[27],z_st_0[28],z_st_0[29]])
elif len(z_st_0)==29:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20],z_st_0[21],z_st_0[22],z_st_0[23],z_st_0[24],z_st_0[25],z_st_0[26],z_st_0[27],z_st_0[28]])
elif len(z_st_0)==28:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20],z_st_0[21],z_st_0[22],z_st_0[23],z_st_0[24],z_st_0[25],z_st_0[26],z_st_0[27]])
elif len(z_st_0)==27:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20],z_st_0[21],z_st_0[22],z_st_0[23],z_st_0[24],z_st_0[25],z_st_0[26]])
elif len(z_st_0)==26:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20],z_st_0[21],z_st_0[22],z_st_0[23],z_st_0[24],z_st_0[25]])
elif len(z_st_0)==25:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20],z_st_0[21],z_st_0[22],z_st_0[23],z_st_0[24]])
elif len(z_st_0)==24:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20],z_st_0[21],z_st_0[22],z_st_0[23]])
elif len(z_st_0)==23:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20],z_st_0[21],z_st_0[22]])
elif len(z_st_0)==22:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20],z_st_0[21]])
elif len(z_st_0)==21:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19],z_st_0[20]])
elif len(z_st_0)==20:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17],z_st_0[18],z_st_0[19]])
elif len(z_st_0)==18:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16],z_st_0[17]])
elif len(z_st_0)==17:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15],z_st_0[16]])
elif len(z_st_0)==16:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14],z_st_0[15]])
elif len(z_st_0)==15:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13],z_st_0[14]])
elif len(z_st_0)==14:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12],z_st_0[13]])
elif len(z_st_0)==13:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11],z_st_0[12]])
elif len(z_st_0)==12:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10],z_st_0[11]])
elif len(z_st_0)==11:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9],z_st_0[10]])
elif len(z_st_0)==10:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8],z_st_0[9]])
elif len(z_st_0)==9:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7],z_st_0[8]])
elif len(z_st_0)==8:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6],z_st_0[7]])
elif len(z_st_0)==7:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5],z_st_0[6]])
elif len(z_st_0)==6:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4],z_st_0[5]])
elif len(z_st_0)==5:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3],z_st_0[4]])
elif len(z_st_0)==4:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2],z_st_0[3]])
elif len(z_st_0)==3:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1],z_st_0[2]])
elif len(z_st_0)==2:
z_0=np.maximum.reduce([z_st_0[0],z_st_0[1]])
elif len(z_st_0)==1:
z_0=np.maximum.reduce([z_st_0[0]])
plot(z_0,'Reds','1st Channel stacked')
z_st_1=[]
for x in range(len(z_list_1)):
pic=np.array(z_list_1[x])
z_st_1.append(pic)
if len(z_st_1)==19:
global z_1
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18]])
elif len(z_st_1)==30:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20],z_st_1[21],z_st_1[22],z_st_1[23],z_st_1[24],z_st_1[25],z_st_1[26],z_st_1[27],z_st_1[28],z_st_1[29]])
elif len(z_st_1)==29:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20],z_st_1[21],z_st_1[22],z_st_1[23],z_st_1[24],z_st_1[25],z_st_1[26],z_st_1[27],z_st_1[28]])
elif len(z_st_1)==28:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20],z_st_1[21],z_st_1[22],z_st_1[23],z_st_1[24],z_st_1[25],z_st_1[26],z_st_1[27]])
elif len(z_st_1)==27:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20],z_st_1[21],z_st_1[22],z_st_1[23],z_st_1[24],z_st_1[25],z_st_1[26]])
elif len(z_st_1)==26:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20],z_st_1[21],z_st_1[22],z_st_1[23],z_st_1[24],z_st_1[25]])
elif len(z_st_1)==25:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20],z_st_1[21],z_st_1[22],z_st_1[23],z_st_1[24]])
elif len(z_st_1)==24:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20],z_st_1[21],z_st_1[22],z_st_1[23]])
elif len(z_st_1)==23:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20],z_st_1[21],z_st_1[22]])
elif len(z_st_1)==22:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20],z_st_1[21]])
elif len(z_st_1)==21:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19],z_st_1[20]])
elif len(z_st_1)==20:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17],z_st_1[18],z_st_1[19]])
elif len(z_st_1)==18:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16],z_st_1[17]])
elif len(z_st_1)==17:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15],z_st_1[16]])
elif len(z_st_1)==16:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14],z_st_1[15]])
elif len(z_st_1)==15:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13],z_st_1[14]])
elif len(z_st_1)==14:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12],z_st_1[13]])
elif len(z_st_1)==13:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11],z_st_1[12]])
elif len(z_st_1)==12:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10],z_st_1[11]])
elif len(z_st_1)==11:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9],z_st_1[10]])
elif len(z_st_1)==10:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8],z_st_1[9]])
elif len(z_st_1)==9:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7],z_st_1[8]])
elif len(z_st_1)==8:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6],z_st_1[7]])
elif len(z_st_1)==7:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5],z_st_1[6]])
elif len(z_st_1)==6:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4],z_st_1[5]])
elif len(z_st_1)==5:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3],z_st_1[4]])
elif len(z_st_1)==4:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2],z_st_1[3]])
elif len(z_st_1)==3:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1],z_st_1[2]])
elif len(z_st_1)==2:
z_1=np.maximum.reduce([z_st_1[0],z_st_1[1]])
elif len(z_st_1)==1:
z_1=np.maximum.reduce([z_st_1[0]])
plot(z_1,'Greens','2nd Channel stacked')
z_st_2=[]
for x in range(len(z_list_2)):
pic=np.array(z_list_2[x])
z_st_2.append(pic)
if len(z_st_2)==19:
global z_2
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18]])
elif len(z_st_2)==30:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20],z_st_2[21],z_st_2[22],z_st_2[23],z_st_2[24],z_st_2[25],z_st_2[26],z_st_2[27],z_st_2[28],z_st_2[29]])
elif len(z_st_2)==29:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20],z_st_2[21],z_st_2[22],z_st_2[23],z_st_2[24],z_st_2[25],z_st_2[26],z_st_2[27],z_st_2[28]])
elif len(z_st_2)==28:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20],z_st_2[21],z_st_2[22],z_st_2[23],z_st_2[24],z_st_2[25],z_st_2[26],z_st_2[27]])
elif len(z_st_2)==27:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20],z_st_2[21],z_st_2[22],z_st_2[23],z_st_2[24],z_st_2[25],z_st_2[26]])
elif len(z_st_2)==26:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20],z_st_2[21],z_st_2[22],z_st_2[23],z_st_2[24],z_st_2[25]])
elif len(z_st_2)==25:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20],z_st_2[21],z_st_2[22],z_st_2[23],z_st_2[24]])
elif len(z_st_2)==24:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20],z_st_2[21],z_st_2[22],z_st_2[23]])
elif len(z_st_2)==23:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20],z_st_2[21],z_st_2[22]])
elif len(z_st_2)==22:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20],z_st_2[21]])
elif len(z_st_2)==21:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19],z_st_2[20]])
elif len(z_st_2)==20:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17],z_st_2[18],z_st_2[19]])
elif len(z_st_2)==18:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16],z_st_2[17]])
elif len(z_st_2)==17:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15],z_st_2[16]])
elif len(z_st_2)==16:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14],z_st_2[15]])
elif len(z_st_2)==15:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13],z_st_2[14]])
elif len(z_st_2)==14:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12],z_st_2[13]])
elif len(z_st_2)==13:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11],z_st_2[12]])
elif len(z_st_2)==12:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10],z_st_2[11]])
elif len(z_st_2)==11:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9],z_st_2[10]])
elif len(z_st_2)==10:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8],z_st_2[9]])
elif len(z_st_2)==9:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7],z_st_2[8]])
elif len(z_st_2)==8:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6],z_st_2[7]])
elif len(z_st_2)==7:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5],z_st_2[6]])
elif len(z_st_2)==6:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4],z_st_2[5]])
elif len(z_st_2)==5:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3],z_st_2[4]])
elif len(z_st_2)==4:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2],z_st_2[3]])
elif len(z_st_2)==3:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1],z_st_2[2]])
elif len(z_st_2)==2:
z_2=np.maximum.reduce([z_st_2[0],z_st_2[1]])
elif len(z_st_2)==1:
z_2=np.maximum.reduce([z_st_2[0]])
plot(z_2,'Blues','3rd Channel stacked')
return z_0,z_1,z_2
def saturated_images(z_list_0,z_list_2):
#Images with maximum intensity
z_st_0=[]
for x in range(len(z_list_0)):
pic=np.array(z_list_0[x])
z_st_0.append(pic)
if len(z_st_0)==10:
global z_3
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]
elif len(z_st_0)==30:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]+z_st_0[21]+z_st_0[22]+z_st_0[23]+z_st_0[24]+z_st_0[25]+z_st_0[26]+z_st_0[27]+z_st_0[28]+z_st_0[29]
elif len(z_st_0)==29:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]+z_st_0[21]+z_st_0[22]+z_st_0[23]+z_st_0[24]+z_st_0[25]+z_st_0[26]+z_st_0[27]+z_st_0[28]
elif len(z_st_0)==28:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]+z_st_0[21]+z_st_0[22]+z_st_0[23]+z_st_0[24]+z_st_0[25]+z_st_0[26]+z_st_0[27]
elif len(z_st_0)==27:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]+z_st_0[21]+z_st_0[22]+z_st_0[23]+z_st_0[24]+z_st_0[25]+z_st_0[26]
elif len(z_st_0)==26:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]+z_st_0[21]+z_st_0[22]+z_st_0[23]+z_st_0[24]+z_st_0[25]
elif len(z_st_0)==25:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]+z_st_0[21]+z_st_0[22]+z_st_0[23]+z_st_0[24]
elif len(z_st_0)==24:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]+z_st_0[21]+z_st_0[22]+z_st_0[23]
elif len(z_st_0)==23:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]+z_st_0[21]+z_st_0[22]
elif len(z_st_0)==22:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]+z_st_0[21]
elif len(z_st_0)==21:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]+z_st_0[20]
elif len(z_st_0)==20:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]+z_st_0[19]
elif len(z_st_0)==19:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]+z_st_0[18]
elif len(z_st_0)==18:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]+z_st_0[17]
elif len(z_st_0)==17:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]+z_st_0[16]
elif len(z_st_0)==16:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]+z_st_0[15]
elif len(z_st_0)==15:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]+z_st_0[14]
elif len(z_st_0)==14:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]+z_st_0[13]
elif len(z_st_0)==13:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]+z_st_0[12]
elif len(z_st_0)==12:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]+z_st_0[11]
elif len(z_st_0)==11:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]+z_st_0[9]+z_st_0[10]
elif len(z_st_0)==9:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]+z_st_0[8]
elif len(z_st_0)==8:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]+z_st_0[7]
elif len(z_st_0)==7:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]+z_st_0[6]
elif len(z_st_0)==6:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]+z_st_0[5]
elif len(z_st_0)==5:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]+z_st_0[4]
elif len(z_st_0)==4:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]+z_st_0[3]
elif len(z_st_0)==3:
z_3=z_st_0[0]+z_st_0[1]+z_st_0[2]
elif len(z_st_0)==2:
z_3=z_st_0[0]+z_st_0[1]
elif len(z_st_0)==1:
z_3=z_st_0[0]
z_st_2=[]
for x in range(len(z_list_2)):
pic=np.array(z_list_2[x])
z_st_2.append(pic)
if len(z_st_2)==10:
global z_4
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]
elif len(z_st_2)==30:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]+z_st_2[21]+z_st_2[22]+z_st_2[23]+z_st_2[24]+z_st_2[25]+z_st_2[26]+z_st_2[27]+z_st_2[28]+z_st_2[29]
elif len(z_st_2)==29:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]+z_st_2[21]+z_st_2[22]+z_st_2[23]+z_st_2[24]+z_st_2[25]+z_st_2[26]+z_st_2[27]+z_st_2[28]
elif len(z_st_2)==28:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]+z_st_2[21]+z_st_2[22]+z_st_2[23]+z_st_2[24]+z_st_2[25]+z_st_2[26]+z_st_2[27]
elif len(z_st_2)==27:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]+z_st_2[21]+z_st_2[22]+z_st_2[23]+z_st_2[24]+z_st_2[25]+z_st_2[26]
elif len(z_st_2)==26:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]+z_st_2[21]+z_st_2[22]+z_st_2[23]+z_st_2[24]+z_st_2[25]
elif len(z_st_2)==25:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]+z_st_2[21]+z_st_2[22]+z_st_2[23]+z_st_2[24]
elif len(z_st_2)==24:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]+z_st_2[21]+z_st_2[22]+z_st_2[23]
elif len(z_st_2)==23:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]+z_st_2[21]+z_st_2[22]
elif len(z_st_2)==22:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]+z_st_2[21]
elif len(z_st_2)==21:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]+z_st_2[20]
elif len(z_st_2)==20:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]+z_st_2[19]
elif len(z_st_2)==19:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]+z_st_2[18]
elif len(z_st_2)==18:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]+z_st_2[17]
elif len(z_st_2)==17:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]+z_st_2[16]
elif len(z_st_2)==16:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]+z_st_2[15]
elif len(z_st_2)==15:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]+z_st_2[14]
elif len(z_st_2)==14:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]+z_st_2[13]
elif len(z_st_2)==13:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]+z_st_2[12]
elif len(z_st_2)==12:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]+z_st_2[11]
elif len(z_st_2)==11:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]+z_st_2[9]+z_st_2[10]
elif len(z_st_2)==9:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]+z_st_2[8]
elif len(z_st_2)==8:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]+z_st_2[7]
elif len(z_st_2)==7:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]+z_st_2[6]
elif len(z_st_2)==6:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]+z_st_2[5]
elif len(z_st_2)==5:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]+z_st_2[4]
elif len(z_st_2)==4:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]+z_st_2[3]
elif len(z_st_2)==3:
z_4=z_st_2[0]+z_st_2[1]+z_st_2[2]
elif len(z_st_2)==2:
z_4=z_st_2[0]+z_st_2[1]
elif len(z_st_2)==1:
z_4=z_st_2[0]
return z_3,z_4
def wavelet_based_BG_subtraction(image,num_levels,noise_lvl):
coeffs = wavedecn(image, 'db1', level=None) #decomposition
coeffs2 = coeffs.copy()
for BGlvl in range(1, num_levels):
coeffs[-BGlvl] = {k: np.zeros_like(v) for k, v in coeffs[-BGlvl].items()} #set lvl 1 details to zero
Background = waverecn(coeffs, 'db1') #reconstruction
del coeffs
BG_unfiltered = Background
Background = gaussian_filter(Background, sigma=2**num_levels) #gaussian filter sigma = 2^#lvls
coeffs2[0] = np.ones_like(coeffs2[0]) #set approx to one (constant)
for lvl in range(1, np.size(coeffs2)-noise_lvl):
coeffs2[lvl] = {k: np.zeros_like(v) for k, v in coeffs2[lvl].items()} #keep first detail lvl only
Noise = waverecn(coeffs2, 'db1') #reconstruction
del coeffs2
return Background, Noise, BG_unfiltered
def WBNS(input_image):
# Wavelet-based background and noise subtraction for fluorescence microscopy images
resolution_px = 6
#insert the noise level. If resolution_px > 6 then noise_lvl = 2 may be better
noise_lvl = int(3) #default = 1
#number of levels for background estimate
num_levels = np.uint16(np.ceil(np.log2(resolution_px)))
#read image file adjust shape if neccessary (padding) and plot
#padding=extending the area of which a convolutional neural network processes an image.
image = input_image
img_type = image.dtype
image = np.array(image,dtype = 'float32')
#image = np.array(io.imread(os.path.join(data_dir, file)),dtype = 'float32')
if np.ndim(image) == 2:
shape = np.shape(image)
image = np.reshape(image, [1, shape[0], shape[1]])
shape = np.shape(image)
if shape[1] % 2 != 0:
image = np.pad(image,((0,0), (0,1), (0, 0)), 'edge')
pad_1 = True
else:
pad_1 = False
if shape[2] % 2 != 0:
image = np.pad(image,((0,0), (0,0), (0, 1)), 'edge')
pad_2 = True
else:
pad_2 = False
#extract background and noise
num_cores = multiprocessing.cpu_count() #number of cores on your CPU
res = Parallel(n_jobs=num_cores,max_nbytes=None)(delayed(wavelet_based_BG_subtraction)(image[slice],num_levels, noise_lvl) for slice in range(np.size(image,0)))
Background, Noise, BG_unfiltered = zip(*res)
#convert to float64 numpy array
Noise = np.asarray(Noise,dtype = 'float32')
Background = np.asarray(Background,dtype = 'float32')
BG_unfiltered = np.asarray(BG_unfiltered,dtype = 'float32')
#undo padding
if pad_1:
image = image[:,:-1,:]
Noise = Noise[:,:-1,:]
Background = Background[:,:-1,:]
BG_unfiltered = BG_unfiltered[:,:-1,:]
if pad_2:
image = image[:,:,:-1]
Noise = Noise[:,:,:-1]
Background = Background[:,:,:-1]
BG_unfiltered = BG_unfiltered[:,:,:-1]
#save unfiltered BG
BG_unfiltered = np.asarray(BG_unfiltered,dtype=img_type.name)
#save and plot filtered BG
Background = np.asarray(Background,dtype=img_type.name)
#plt.figure()
#imgplot = plt.imshow(np.amax(Background,0), cmap='Blues')
#subtract BG only
result = image - Background
result[result<0] = 0 #positivity constraint
#save and plot noisy signal
result = np.asarray(result,dtype=img_type.name)
noisy_sig = result
#correct noise
Noise[Noise<0] = 0 #positivity constraint
noise_threshold = np.mean(Noise)+2*np.std(Noise)
Noise[Noise>noise_threshold] = noise_threshold #2 sigma threshold reduces artifacts
#subtract Noise
result = image - Background
result = result - Noise
result[result<0] = 0 #positivity constraint
#save noise
Noise = np.asarray(Noise,dtype=img_type.name)
#save result
result = np.asarray(result,dtype=img_type.name)
#Plot result
#plt.figure(figsize=(10,10))
#imgplot = plt.imshow(np.amax(result,0), cmap='Blues')
return result
def masking(nucleus,cytoplasm,image):
import matplotlib.pyplot as plt
cell_s=sizes[int(image)]
# model = models.Cellpose(gpu=use_GPU, model_type='nuclei')
masks, flows, styles, diams = model.eval(nucleus, diameter=int(cell_s), flow_threshold=None, channels=[0,0])
masks= gaussian_filter(masks, sigma=int(gaus_f[0]))
plotting3(masks,'Reds','Masked nucleus')
# model = models.Cellpose(gpu=use_GPU, model_type='cyto')
masksc, flowsc, stylesc, diamsc = model.eval(cytoplasm, diameter=int(cell_s)*2, flow_threshold=None, channels=[0,0])
masksc= gaussian_filter(masksc, sigma=int(gaus_f[0]))
plotting4(masksc,'Blues','Masked cytoplasm')
masksc_copy=masksc.copy()
masksc[masksc>1]=1
masks_copy = masks.copy()
masks[masks==0]=266
masks[masks<266]=0
masking_preview = np.einsum('jk,jk->jk',masksc_copy, masks) # remove the nucleus for noise
plotting4(masking_preview, 'Oranges', 'Calculation area')
return masks,masksc_copy,masks_copy,masksc
def masking2(nucleus,cytoplasm,image):
# model = models.Cellpose(gpu=use_GPU, model_type='nuclei')
cell_s=sizes[int(image)]
masks, flows, styles, diams = model.eval(nucleus, diameter=int(cell_s), flow_threshold=None, channels=[0,0])
masks_copy = masks.copy()
masks= gaussian_filter(masks, sigma=int(gaus_f[0]))
plotting(masks,'Oranges','Masked nucleus')
masksc, flowsc, stylesc, diamsc = model.eval(cytoplasm, diameter=int(cell_s)*1.5, flow_threshold=None, channels=[0,0])
masksc= gaussian_filter(masksc, sigma=int(gaus_f[0]))
plotting(masksc,'Oranges','Masked cytoplasm')
masks[masks>1]=1
plotting(masks,'Oranges','Calculation area')
return masks,masksc,masks_copy
def quantitative_analysis(nucl_image):
#find how many cell identifiers exist in my array and see how many times each identifier occurs
counts=[]
for x in range(len(np.unique(nucl_image))):
if x == 0:
continue
count = np.count_nonzero(nucl_image == int(x))
counts.append(count)
# I find the biggest cell and see how far away the other ones are in oredr to create the exact number of cells so that i can devide in the end
cells=0
if len(counts)>5:
avg=sum(counts)/len(counts)
for x in counts:
percentage=int(x)/int(avg)*100
if percentage > 50:
cells+=100
else:
cells+=int(percentage)
else:
for x in counts:
percentage=int(x)/int(max(counts))*100
if percentage > 49:
cells+=100
else:
cells+=int(percentage)
number_of_cells=cells/100
return(counts,number_of_cells)
def tracking(img1,img2,number_of_cells,mask_cyto,image,img):
import matplotlib.pyplot as plt
particle_size=particle_sizes[int(image)]
ts=particle_sizes[int(image)]
f1 = tp.locate(img1,int(particle_size),percentile=int((persentil)[0]))
plt.figure(figsize=(50,50))
plt.title('Identified particles in 2nd channel')
f=tp.annotate(f1, img1)
f=f.figure
f.savefig('Identified particles in 2nd channel.pdf')
frame = pd.Series(np.zeros(len(f1['y'])))
f1=f1.assign(frame=frame.values)
green_filter_peaks=len(f1.index)/number_of_cells
f2 = tp.locate(img2,int(particle_size),percentile=int((persentil2)[0]))
plt.figure(figsize=(50,50))
plt.title('Identified particles in 3rd channel')
f=tp.annotate(f2,img2)
f=f.figure
f.savefig('Identified particles in 3rd channel.pdf')
frame = pd.Series(np.ones(len(f2['y'])))
f2=f2.assign(frame=frame.values)
blue_filter_peaks=len(f2.index)/number_of_cells
frames=[f1,f2]
f = pd.concat(frames)
tracking_space=int(ts)
t = tp.link_df(f, int(tracking_space), memory=2)
t1=tp.filter_stubs(t,2)
number_of_part=t1['particle'].nunique()
##########################################################################################################################################################################################################################
# Cell by cell analysis
##########################################################################################################################################################################################################################
zero=0
for cells in range(len(np.unique(mask_cyto))):
if cells!=0:
selected_masks= mask_cyto==int(cells) # Change this value to the specific cell that you need to select
removed_mask1 = np.einsum('jk,jk->jk',img1, selected_masks)
removed_mask2 = np.einsum('jk,jk->jk',img2, selected_masks)
f1 = tp.locate(removed_mask1,int(particle_size),percentile=int((persentil)[0]))
frame = pd.Series(np.zeros(len(f1['y'])))
f1=f1.assign(frame=frame.values)
f2 = tp.locate(removed_mask2,int(particle_size),percentile=int((persentil2)[0]))
frame = pd.Series(np.ones(len(f2['y'])))
f2=f2.assign(frame=frame.values)
if len(f1)==0 or len(f2)==0:
zero+=1
else:
frames=[f1,f2]
f = pd.concat(frames)
tracking_space=int(ts)
t = tp.link_df(f, int(tracking_space), memory=2)
t1=tp.filter_stubs(t,2)
if t1['particle'].nunique()==0:
zero+=1
global fp
clean=number_of_part/number_of_cells
fp=number_of_cells-zero
if int(fp)==0 or int(fp)<0:
global part
part=0
else:
part=number_of_part/fp
if fp<0:
fp=0
img=img+1
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size = 25)
text='IMAGE '+str(img)
pdf.cell(200, 10, txt = text, ln = 140, align = 'C')
pdf.output("GFG.pdf")
pdf_output(img)
number.append(number_of_cells)
fp2.append(fp)
number_of_parti.append(part)
number_of_part_in_Green.append(green_filter_peaks)
number_of_part_in_Blue.append(blue_filter_peaks)
inpu_cell_size.append(sizes[image])
inpu_part_size.append(particle_sizes[image])
negative_cells.append(zero)
clean_div.append(clean)
return (number_of_cells,part,green_filter_peaks,blue_filter_peaks,zero,clean,fp)
def tracking2(img1,img2,number_of_cells,mask_cyto,image,img):
particle_size=particle_sizes[int(image)]
ts=particle_sizes[int(image)]
f1 = tp.locate(img1,int(particle_size),percentile=int((persentil)[0]))
plt.figure(figsize=(50,50))
plt.title('Identified particles in 2nd channel')
f=tp.annotate(f1, img1)
f=f.figure
f.savefig('Identified particles in 2nd channel.pdf')
frame = pd.Series(np.zeros(len(f1['y'])))
f1=f1.assign(frame=frame.values)
green_filter_peaks=len(f1.index)/number_of_cells
f2 = tp.locate(img2,int(particle_size),percentile=int((persentil2)[0]))
plt.figure(figsize=(50,50))
plt.title('Identified particles in 3rd channel')
f=tp.annotate(f2,img2)
f=f.figure
f.savefig('Identified particles in 3rd channel.pdf')
frame = pd.Series(np.ones(len(f2['y'])))
f2=f2.assign(frame=frame.values)
blue_filter_peaks=len(f2.index)/number_of_cells
frames=[f1,f2]
f = pd.concat(frames)
tracking_space=int(ts)
t = tp.link_df(f, int(tracking_space), memory=2)
t1=tp.filter_stubs(t,2)
number_of_part=t1['particle'].nunique()
##########################################################################################################################################################################################################################
# Cell by cell analysis
##########################################################################################################################################################################################################################
zero=0
for cells in range(len(np.unique(mask_cyto))):
if cells!=0:
selected_masks= mask_cyto==int(cells) # Change this value to the specific cell that you need to select
removed_mask1 = np.einsum('jk,jk->jk',img1, selected_masks)
removed_mask2 = np.einsum('jk,jk->jk',img2, selected_masks)
f1 = tp.locate(removed_mask1,int(particle_size),percentile=int((persentil)[0]))
frame = pd.Series(np.zeros(len(f1['y'])))
f1=f1.assign(frame=frame.values)
plt.figure(figsize=(10,10))
f=tp.annotate(f1, removed_mask1)
f2 = tp.locate(removed_mask2,int(particle_size),percentile=int((persentil2)[0]))
frame = pd.Series(np.ones(len(f2['y'])))
f2=f2.assign(frame=frame.values)
plt.figure(figsize=(10,10))
f=tp.annotate(f2, removed_mask2)
if len(f1)==0 or len(f2)==0:
zero+=1
else:
frames=[f1,f2]
f = pd.concat(frames)
tracking_space=int(ts)
t = tp.link_df(f, int(tracking_space), memory=2)
t1=tp.filter_stubs(t,2)
if t1['particle'].nunique()==0:
zero+=1
global fp
clean=number_of_part/number_of_cells
fp=number_of_cells-zero
if int(fp)==0 or int(fp)<0:
global part
part=0
else:
part=number_of_part/fp
if fp<0:
fp=0
img=img+1
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size = 25)
text='IMAGE '+str(img)
pdf.cell(200, 10, txt = text, ln = 140, align = 'C')
pdf.output("GFG.pdf")
pdf_output2(img)
number.append(number_of_cells)
fp2.append(fp)
number_of_parti.append(part)
number_of_part_in_Green.append(green_filter_peaks)
number_of_part_in_Blue.append(blue_filter_peaks)
inpu_cell_size.append(sizes[image])
inpu_part_size.append(particle_sizes[image])
negative_cells.append(zero)
clean_div.append(clean)
print(number_of_cells,part,green_filter_peaks,blue_filter_peaks,zero,clean,fp)
return (number_of_cells,part,green_filter_peaks,blue_filter_peaks,zero,clean,fp)
def image_analysis(event=None):
global i
counter=0
for i in range(len(img_list)):
import matplotlib.pyplot as plt
if i > int(first_page)-1 and i<int(last_page)+1:
img=int(i)-1
image.append(img+1)
img_0 = file.get_image(int(img))
img_0.get_frame(z=0, t=0, c=0)
c1=int(nucleus_channel)
c2=int(channel1)
#print(int(c1),print(c2),print(c3))
frame_list = [i for i in img_0.get_iter_t(c=0, z=0)]
channel_list = [i for i in img_0.get_iter_c(t=0, z=0)]
z_list_0 = [i for i in img_0.get_iter_z(t=0, c=int(nucleus_channel)-1)]
z_list_c = [i for i in img_0.get_iter_z(t=0, c=int(channel1)-1)]
z_list_2 = [i for i in img_0.get_iter_z(t=0, c=int(ch_1))]
z_list_3 = [i for i in img_0.get_iter_z(t=0, c=int(ch_2))]
images=stacking_step(z_list_0,z_list_2,z_list_3)
mask=masking(saturated_images(z_list_0,z_list_c)[0],saturated_images(z_list_0,z_list_c)[1],counter)
image1=WBNS(images[1])
image2=WBNS(images[2])
removed_mask1 = np.einsum('jk,jk->jk',image1[0], mask[0]) # remove all the background of the cells
removed_mask2 = np.einsum('jk,jk->jk',image2[0], mask[0])
removed_mask_1 = np.einsum('jk,jk->jk',removed_mask1, mask[3]) # remove the nucleus for noise
removed_mask_2 = np.einsum('jk,jk->jk',removed_mask2, mask[3])
img_copy1 = removed_mask_1.copy() # making a copy of our img
img_gaussian_filter_simga_1 = gaussian_filter(img_copy1, sigma=int((gaus_f)[0]))
import matplotlib.pyplot as plt
plotting3(img_gaussian_filter_simga_1,'Greens','Filtered channel 2 image')
img_copy2 = removed_mask_2.copy() # making a copy of our img
img_gaussian_filter_simga_2 = gaussian_filter(img_copy2, sigma=int((gaus_f)[0]))
import matplotlib.pyplot as plt
plotting4(img_gaussian_filter_simga_2,'Blues','Filtered channel 3 image')
number_of_cells,part,green_filter_peaks,blue_filter_peaks,zero,clean,fp=tracking(img_gaussian_filter_simga_1,img_gaussian_filter_simga_2,quantitative_analysis(mask[2])[1],mask[1],counter,img)
counter+=1
import matplotlib.pyplot as plt
excel_output(image,number,number_of_parti,number_of_part_in_Green,number_of_part_in_Blue,inpu_cell_size,inpu_part_size, negative_cells, clean_div,fp2)
def image_analysis2(event=None):
global i
counter=0
for i in range(len(img_list)):
if i > int(first_page)-1 and i<int(last_page)+1:
img=int(i)-1
image.append(img+1)
img_0 = file.get_image(int(img))
img_0.get_frame(z=0, t=0, c=0)
c1=int(nucleus_channel)-1
frame_list = [i for i in img_0.get_iter_t(c=0, z=0)]
channel_list = [i for i in img_0.get_iter_c(t=0, z=0)]
z_list_0 = [i for i in img_0.get_iter_z(t=0, c=int(nucleus_channel)-1)]
z_list_1 = [i for i in img_0.get_iter_z(t=0, c=int(ch_1))]
z_list_2 = [i for i in img_0.get_iter_z(t=0, c=int(ch_2))]
images=stacking_step(z_list_0,z_list_1,z_list_2)
mask=masking2(saturated_images(z_list_0,z_list_0)[0],saturated_images(z_list_0,z_list_0)[1],counter)
image1=WBNS(images[1])
image2=WBNS(images[2])
removed_mask1 = np.einsum('jk,jk->jk',image1[0], mask[0]) # remove all the background of the cells
removed_mask2 = np.einsum('jk,jk->jk',image2[0], mask[0])
img_copy1 = removed_mask1.copy() # making a copy of our img
img_gaussian_filter_simga_1 = gaussian_filter(img_copy1, sigma=int((gaus_f)[0]))
plotting(img_gaussian_filter_simga_1,'Greens','Filtered channel 2 image')
img_copy2 = removed_mask2.copy() # making a copy of our img
img_gaussian_filter_simga_2 = gaussian_filter(img_copy2, sigma=int((gaus_f)[0]))
plotting(img_gaussian_filter_simga_2,'Blues','Filtered channel 3 image')
number_of_cells,part,green_filter_peaks,blue_filter_peaks,zero,clean,fp=tracking2(img_gaussian_filter_simga_1,img_gaussian_filter_simga_2,quantitative_analysis(mask[2])[1],mask[2],counter,img)
counter+=1
excel_output(image,number,number_of_parti,number_of_part_in_Green,number_of_part_in_Blue,inpu_cell_size,inpu_part_size, negative_cells, clean_div,fp2)
def secondary_param(event=None):
line = Frame(root, height=3, width=406, relief='groove',background='#EA6060')
line.place(x=50, y=450)
line = Frame(root, height=3, width=406, relief='groove',background='#EA6060')
line.place(x=50, y=580)
line = Frame(root, height=127, width=3, relief='groove',background='#EA6060')
line.place(x=50, y=453)
line = Frame(root, height=127, width=3, relief='groove',background='#EA6060')
line.place(x=453, y=453)
line = Frame(root, height=16, width=3, relief='groove',bg='#EA6060')
line.place(x=62, y=468)
e3 = Entry(root, bd =1, bg='white',textvariable=ps,width=15,highlightbackground='#1A507B',highlightthickness=5)
e3.place(x=245, y=465)
intvar3 = IntVar()
intvar33= IntVar()
def process3(event=None):
content = e3.get()
Ps=ps.get()
persentil.append(Ps)
intvar3.set(100)
line = Frame(root, height=16, width=5, relief='flat',bg='#142841')
line.place(x=61, y=468)
l = Label(root, text = content ,bg = "#585E63",fg='#152635',width=4)
l.config(font =("arial bold", 10))
l.place(x=392,y=468)
e3.bind('<Return>', process3)
e3.wait_variable(intvar3)
line = Frame(root, height=16, width=3, relief='groove',bg='#EA6060')
line.place(x=62, y=508)
e4 = Entry(root, bd =1, bg='white',textvariable=ps2,width=15,highlightbackground='#1A507B',highlightthickness=5)
e4.place(x=245, y=505)
def process4(event=None):
content2 = e4.get()
Ps2=ps2.get()
persentil2.append(Ps2)
intvar33.set(100)
line = Frame(root, height=16, width=3, relief='groove',bg='#142841')
line.place(x=62, y=508)
line = Frame(root, height=16, width=5, relief='flat',bg='#142841')
line.place(x=61, y=474)
l = Label(root, text = content2 ,bg = "#585E63",fg='#152635',width=4)
l.config(font =("arial bold", 10))
l.place(x=392,y=508)
popupmsg('Suggested sigma level: 2', 'Information')
e4.bind('<Return>', process4)
e4.wait_variable(intvar33)
line = Frame(root, height=16, width=3, relief='groove',bg='#EA6060')
line.place(x=62, y=543)
intvar4 = IntVar()
def test1():
s=var_s.get()
gaus_f.append(s)
intvar4.set(100)
line = Frame(root, height=16, width=3, relief='flat',bg='#142841')
line.place(x=62, y=514)
line = Frame(root, height=16, width=3, relief='groove',bg='#EA6060')
line.place(x=62, y=543)
line = Frame(root, height=16, width=3, relief='groove',bg='#142841')
line.place(x=62, y=543)
popupmsg1('Ready to start, this might take a while!', 'Initiate')
c1=Checkbutton(root, text="1",variable=var_s,onvalue = 1, offvalue = 0,bg='#142841',command = test1)
c1.place(x=130, y=540)
c1=Checkbutton(root, text="2",variable=var_s,onvalue = 2, offvalue = 0,bg='#142841',command = test1)
c1.place(x=178, y=540)
c1=Checkbutton(root, text="3",variable=var_s,onvalue = 3, offvalue = 0,bg='#142841',command = test1)
c1.place(x=223, y=540)
c1=Checkbutton(root, text="4",variable=var_s,onvalue = 4, offvalue = 0,bg='#142841',command = test1)
c1.place(x=268, y=540)
c1=Checkbutton(root, text="5",variable=var_s,onvalue = 5, offvalue = 0,bg='#142841',command = test1)
c1.place(x=313, y=540)
c1=Checkbutton(root, text="6",variable=var_s,onvalue = 6, offvalue = 0,bg='#142841',command = test1)
c1.place(x=359, y=540)
c1=Checkbutton(root, text="7",variable=var_s,onvalue = 7, offvalue = 0,bg='#142841',command = test1)
c1.place(x=410, y=540)
noise_l.append(int(3))
def image_params():
global img ,img_list,file,first_page,last_page
#img_path = filedialog.askopenfilename(initialdir=os.getcwd())
file = LifFile(str(img_path))
img_list = [i for i in file.get_iter_image()]
enum=[]
# fp=IntVar()
ei = Entry(root, bd =1, bg="#C3CBCE",textvariable=fp,width=4,highlightbackground='#EA6060',highlightthickness=5)
ei.place(x=312, y=185)
intvar22 = IntVar()
line = Frame(root, height=19, width=7, relief='flat',background='#263A55')
line.place(x=60, y=142)
def process4(event=None):
global first_page
first_page=fp.get()
ei = Entry(root, bd =2, bg='#263A55',fg='white',relief='flat',textvariable=fp,width=4,highlightbackground='#263A55',highlightthickness=5,highlightcolor='#263A55')
ei.place(x=312, y=185)
eii = Entry(root, bd =1, bg="#C3CBCE",textvariable=lp,width=4,highlightbackground='#EA6060',highlightthickness=5)
eii.place(x=392, y=185)
intvar22.set(100)
ei.bind('<Return>', process4)
ei.wait_variable(intvar22)
# lp=IntVar()
eii = Entry(root, bd =1, bg="#C3CBCE",textvariable=lp,width=4,highlightbackground='#EA6060',highlightthickness=5)
eii.place(x=392, y=185)
intvar33 = IntVar()
def process5(event=None):
global last_page
last_page=lp.get()
eii = Entry(root, bd =2, bg='#263A55',fg='white',relief='flat',textvariable=lp,width=4,highlightbackground='#263A55',highlightthickness=5,highlightcolor='#263A55')
eii.place(x=392, y=185)
intvar33.set(100)
eii.bind('<Return>', process5)
eii.wait_variable(intvar33)
for i in range(len(img_list)):
if i > int(first_page)-1 and i<int(last_page)+1:
img=int(i)-1
enum.append(i)
img_0 = file.get_image(int(img))
img_0.get_frame(z=0, t=0, c=0)
c1=int(nucleus_channel)
c2=int(channel1)
c3=int(channel2)
#print(int(c1),print(c2),print(c3))
frame_list = [i for i in img_0.get_iter_t(c=0, z=0)]
channel_list = [i for i in img_0.get_iter_c(t=0, z=0)]
z_list_0 = [i for i in img_0.get_iter_z(t=0, c=int(nucleus_channel)-1)]
z_list_c = [i for i in img_0.get_iter_z(t=0, c=int(channel1)-1)]
z_list_2 = [i for i in img_0.get_iter_z(t=0, c=int(ch_1))]
z_list_3 = [i for i in img_0.get_iter_z(t=0, c=int(ch_2))]
ns = IntVar()
ts = IntVar()
plotting(saturated_images(z_list_0, z_list_c)[0],'Reds','Saturated image: '+ str(img+1))
plotting3(stacking_step(z_list_0,z_list_2,z_list_3)[1],'Blues','Channel 2 Image: '+ str(img+1))
plotting4(stacking_step(z_list_0,z_list_2,z_list_3)[2],'Greens','Channel 3 Image: '+ str(img+1))
line = Frame(root, height=16, width=3, relief='groove',bg='#EA6060')
line.place(x=62, y=344)
e1 =Entry(root, bd =1, bg='white',textvariable=ns,width=6,highlightbackground='#1A507B',highlightthickness=5)
e1.place(x=320, y=340)
intvar = IntVar()
def process1(event=None):
content = e1.get()
Ns=ns.get()
intvar.set(100)
l = Label(root, text = content,bg = "#C3CBCE",fg='#152635',width=4)
l.config(font =("arial bold", 10))
sizes.append(Ns)
l.place(x=392,y=343)
e1.bind('<Return>', process1)
e1.wait_variable(intvar)
line = Frame(root, height=16, width=3, relief='groove',bg='white')
line.place(x=62, y=344)
line = Frame(root, height=16, width=3, relief='groove',bg='#EA6060')
line.place(x=62, y=392)
e2 = Entry(root, bd =1, bg='white',textvariable=ts,width=6,highlightbackground='#1A507B',highlightthickness=5)
e2.place(x=320, y=390)
intvar2 = IntVar()
def process2(event=None):
content = e2.get()
Ts=ts.get()
if Ts % 2:
intvar2.set(100)
particle_sizes.append(Ts)
l = Label(root, text = content,bg = "#C3CBCE",fg='#152635',width=4)
l.config(font =("arial bold", 10))
l.place(x=392,y=394)
import matplotlib.pyplot as plt
ploting(saturated_images(z_list_0, z_list_2)[0],'Reds','Saturated image: '+ str(img+1))
T = Text(root2, height = 5, width = 52)
l = Label(root2, text = "Diamater input: "+str(ns.get())+" pixels",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold", 8))
l.place(x=11,y=202)
l = Label(root2, text = "Particle input: "+str(ts.get())+" pixels",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold", 8))
l.place(x=11,y=222)
else:
messagebox.showinfo("Task failed succesfully!", "Please instert only odd numbers (ex:1,3)")
e2.bind('<Return>', process2)
e2.wait_variable(intvar2)
line = Frame(root, height=16, width=3, relief='groove',bg='white')
line.place(x=62, y=392)
secondary_param()
def image_params2():
global img ,img_list,file,first_page,last_page
#img_path = filedialog.askopenfilename(initialdir=os.getcwd())
file = LifFile(str(img_path))
img_list = [i for i in file.get_iter_image()]
enum=[]
# fp=IntVar()
ei = Entry(root, bd =1, bg="#C3CBCE",textvariable=fp,width=4,highlightbackground='#EA6060',highlightthickness=5)
ei.place(x=312, y=185)
intvar22 = IntVar()
line = Frame(root, height=19, width=7, relief='flat',background='#263A55')
line.place(x=60, y=142)
def process4(event=None):
global first_page
first_page=fp.get()
ei = Entry(root, bd =2, bg='#263A55',fg='white',relief='flat',textvariable=fp,width=4,highlightbackground='#263A55',highlightthickness=5,highlightcolor='#263A55')
ei.place(x=312, y=185)
eii = Entry(root, bd =1, bg="#C3CBCE",textvariable=lp,width=4,highlightbackground='#EA6060',highlightthickness=5)
eii.place(x=392, y=185)
intvar22.set(100)
ei.bind('<Return>', process4)
ei.wait_variable(intvar22)
# lp=IntVar()
eii = Entry(root, bd =1, bg="#C3CBCE",textvariable=lp,width=4,highlightbackground='#EA6060',highlightthickness=5)
eii.place(x=392, y=185)
intvar33 = IntVar()
def process5(event=None):
global last_page
last_page=lp.get()
eii = Entry(root, bd =2, bg='#263A55',fg='white',relief='flat',textvariable=lp,width=4,highlightbackground='#263A55',highlightthickness=5,highlightcolor='#263A55')
eii.place(x=392, y=185)
intvar33.set(100)
eii.bind('<Return>', process5)
eii.wait_variable(intvar33)
for i in range(len(img_list)):
if i > int(first_page)-1 and i<int(last_page)+1:
img=int(i)-1
enum.append(i)
img_0 = file.get_image(int(img))
img_0.get_frame(z=0, t=0, c=0)
c1=int(nucleus_channel)
#print(int(c1),print(c2),print(c3))
frame_list = [i for i in img_0.get_iter_t(c=0, z=0)]
channel_list = [i for i in img_0.get_iter_c(t=0, z=0)]
z_list_0 = [i for i in img_0.get_iter_z(t=0, c=int(nucleus_channel)-1)]
z_list_2 = [i for i in img_0.get_iter_z(t=0, c=int(ch_1))]
z_list_3 = [i for i in img_0.get_iter_z(t=0, c=int(ch_2))]
ns = IntVar()
ts = IntVar()
plotting(saturated_images(z_list_0, z_list_0)[0],'Reds','Saturated image: '+ str(img+1))
plotting3(stacking_step(z_list_0,z_list_2,z_list_3)[1],'Blues','Channel 2 Image: '+ str(img+1))
plotting4(stacking_step(z_list_0,z_list_2,z_list_3)[2],'Greens','Channel 3 Image: '+ str(img+1))
line = Frame(root, height=16, width=3, relief='groove',bg='#EA6060')
line.place(x=62, y=344)
e1 =Entry(root, bd =1, bg='white',textvariable=ns,width=6,highlightbackground='#1A507B',highlightthickness=5)
e1.place(x=320, y=340)
intvar = IntVar()
def process1(event=None):
content = e1.get()
Ns=ns.get()
intvar.set(100)
l = Label(root, text = content,bg = "#C3CBCE",fg='#152635',width=4)
l.config(font =("arial bold", 10))
sizes.append(Ns)
l.place(x=392,y=343)
e1.bind('<Return>', process1)
e1.wait_variable(intvar)
line = Frame(root, height=16, width=3, relief='groove',bg='white')
line.place(x=62, y=344)
line = Frame(root, height=16, width=3, relief='groove',bg='#EA6060')
line.place(x=62, y=392)
e2 = Entry(root, bd =1, bg='white',textvariable=ts,width=6,highlightbackground='#1A507B',highlightthickness=5)
e2.place(x=320, y=390)
intvar2 = IntVar()
def process2(event=None):
content = e2.get()
Ts=ts.get()
if Ts % 2:
intvar2.set(100)
particle_sizes.append(Ts)
l = Label(root, text = content,bg = "#C3CBCE",fg='#152635',width=4)
l.config(font =("arial bold", 10))
l.place(x=392,y=394)
import matplotlib.pyplot as plt
ploting(saturated_images(z_list_0, z_list_2)[0],'Reds','Saturated image: '+ str(img+1))
T = Text(root2, height = 5, width = 52)
l = Label(root2, text = "Diamater input: "+str(ns.get())+" pixels",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold", 8))
l.place(x=11,y=202)
l = Label(root2, text = "Particle input: "+str(ts.get())+" pixels",bg = '#4B637F',fg='#AFB9BF')
l.config(font =("arial bold", 8))
l.place(x=11,y=222)
else:
messagebox.showinfo("Task failed succesfully!", "Please instert only odd numbers (ex:1,3)")
e2.bind('<Return>', process2)
e2.wait_variable(intvar2)
line = Frame(root, height=16, width=3, relief='groove',bg='white')
line.place(x=62, y=392)
secondary_param()
def excel_output(image,number,number_of_part,number_of_part_in_Green,number_of_part_in_Blue,inpu_cell_size,inpu_part_size,zero_cell,clean_cell,fp2):
global name
t='output.xlsx'
image=pd.DataFrame(image,columns=['Image'])
number=pd.DataFrame(number,columns=['Number of Totally identified cells'])
fp2=pd.DataFrame(fp2,columns=['Number of positive cells'])
number_of_part=pd.DataFrame(number_of_part,columns=['Particles per positive cells with colocalization '])
number_of_part_in_Green=pd.DataFrame(number_of_part_in_Green,columns=['Particles per cell in the 2nd Channel'])
number_of_part_in_Blue=pd.DataFrame(number_of_part_in_Blue,columns=['Particles per cell in the 3rd Channel'])
inpu_cell_size=pd.DataFrame(inpu_cell_size,columns=['Selected nucleus size (px)'])
inpu_part_size=pd.DataFrame(inpu_part_size,columns=['Selected particle size (px)'])
zero_cell=pd.DataFrame(negative_cells,columns=['Cells with 0 signal'])
clean_cell=pd.DataFrame(clean_div,columns=['Particles with colocalization per entirety of cells'])
result = | pd.concat([image,inpu_cell_size,inpu_part_size,number,fp2,zero_cell,number_of_part_in_Green,number_of_part_in_Blue,number_of_part,clean_cell], axis=1, sort=False) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
| tm.assert_numpy_array_equal(left == pd.NaT, expected) | pandas.util.testing.assert_numpy_array_equal |
#!/usr/bin/env python3
# (c) 2017-2020 L.Spiegelberg
# validates output of flights query
import pandas as pd
import os
import glob
import numpy as np
import json
import re
from tqdm import tqdm
root_path = '.'
def compare_dfs(dfA, dfB):
if len(dfA) != len(dfB):
print('not equal, lengths do not coincide {} != {}'.format(len(dfA), len(dfB)))
return False
if len(dfA.columns) != len(dfB.columns):
print('number of columns do not coincide')
return False
str_cols = list(dfA.select_dtypes([object]).columns)
numeric_cols = list(dfA.select_dtypes([bool, int, float]).columns)
# print(numeric_cols)
# print(str_cols)
if len(str_cols) + len(numeric_cols) != len(dfA.columns):
print('column separation wrong')
return False
# go over each single row (will take a lot of time)
for i in tqdm(range(len(dfA))):
rowA = dfA.iloc[i].copy()
rowB = dfB.iloc[i].copy()
num_valsA = rowA[numeric_cols].astype(np.float64)
num_valsB = rowB[numeric_cols].astype(np.float64)
if str(rowA[str_cols].values) != str(rowB[str_cols].values):
print('{} != {}'.format(str(rowA[str_cols].values), str(rowB[str_cols].values)))
print(i)
return False
if not np.allclose(num_valsA, num_valsB, rtol=1e-3, atol=1e-3, equal_nan=True):
print('{} != {}'.format(num_valsA, num_valsB))
print(i)
return False
return True
def main():
spark_folder = 'pyspark_output'
dask_folder = 'dask_output'
root_path = '.'
paths = os.listdir(root_path)
paths_to_verify = []
spark_paths = []
dask_paths = []
if spark_folder in paths:
spark_paths = glob.glob(os.path.join(root_path, spark_folder, '*.csv'))
paths_to_verify += spark_paths
if dask_folder in paths:
dask_paths = glob.glob(os.path.join(root_path, dask_folder, '*part*'))
dask_paths = sorted(dask_paths, key=lambda p: int(re.sub('[^0-9]', '', os.path.basename(p))))
paths_to_verify += dask_paths
print('>>> loading dask files ({} found)'.format(len(dask_paths)))
df_dask = pd.DataFrame()
for path in dask_paths:
df_dask = pd.concat((df_dask, pd.read_csv(path, low_memory=False)))
print('>>> loading spark files ({} found)'.format(len(spark_paths)))
df_spark = pd.DataFrame()
for path in spark_paths:
df = | pd.read_csv(path, low_memory=False) | pandas.read_csv |
import pandas as pd
import json
import pickle as p
#------------------------------------- PART 1 -----------------------------------#
# ------- Step 1 ---------#
# load file 100506.json into a Python dictionary of dictionaries
with open('100506.json') as input_file:
jsondat=json.load(input_file)
ratings_list = list()
new_dict = {}
categories = ['Author','Date','Ratings']
for review in jsondat['Reviews']:
for key,value in review.iteritems():
if str(key) in categories:
new_dict[key] = value
ratings_list.append(new_dict)
new_dict = {}
# len(ratings_list[0]['Ratings']
new_dictionary = {}
Ratings = ['Business service (e.g., internet access)','Check in / front desk','Cleanliness','Location','Overall','Rooms','Service','Sleep Quality','Value']
current_ratings=list()
# rating_list = ratings_list[4]['Ratings']
i=0
for review in ratings_list:
if len(review['Ratings']) < 9:
for key,value in review['Ratings'].iteritems():
current_ratings.append(str(key))
for ratings in Ratings:
if ratings not in current_ratings:
new_dictionary[ratings] = 'NaN'
review['Ratings'].update(new_dictionary)
current_ratings=list()
new_dictionary = {}
# df_reviews = pd.DataFrame(ratings_list)
# df_reviews.to_csv('review_check')
#------------- Step 2 ------------#
#df_reviews_test1 = pd.DataFrame.from_dict(ratings_list[0]['Ratings'], orient = 'index')
#df_reviews_test2 = pd.DataFrame.from_dict(ratings_list[1]['Ratings'], orient = 'index')
#df_reviews_test1 = df_reviews_test1.T # transpose the DataFrame
#df_reviews_test2.T
# first create a DataFrame just f0r the 48 ratings
df_ratings_workhorse= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
# TODO: json -> msgpack
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/cluster_1.parquet",
"dataset_uuid/helper/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/cluster_1.parquet",
"dataset_uuid/core/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_helper = pd.DataFrame(
{
"P": [1, 2, 3, 1, 2, 3],
"L": [1, 1, 1, 2, 2, 2],
"info": string.ascii_lowercase[:2],
}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/P=1/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/P=1/L=1/cluster_1.parquet",
"dataset_uuid/core/P=1/L=1/cluster_2.parquet",
"dataset_uuid/core/P=1/L=2/cluster_1.parquet",
"dataset_uuid/core/P=1/L=2/cluster_2.parquet",
"dataset_uuid/core/P=2/L=1/cluster_1.parquet",
"dataset_uuid/core/P=2/L=1/cluster_2.parquet",
"dataset_uuid/core/P=2/L=2/cluster_1.parquet",
"dataset_uuid/core/P=2/L=2/cluster_2.parquet",
"dataset_uuid/core/P=3/L=1/cluster_1.parquet",
"dataset_uuid/core/P=3/L=1/cluster_2.parquet",
"dataset_uuid/core/P=3/L=2/cluster_1.parquet",
"dataset_uuid/core/P=3/L=2/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col(
store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col_simple_group(
store_factory, bound_store_dataframes
):
"""
Pandas seems to stop evaluating the groupby expression if the dataframes after the first column split
is of length 1. This seems to be an optimization which should, however, still raise a KeyError
"""
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
assert any([sorted(p) == ["cluster_1", "cluster_2"] for p in index_dct.values()])
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
# Store a second table with shared columns. All shared columns must be of the same type
# This may fail in the presence of empty partitions if the schema validation doesn't account for it
df_shared_cols = df_all_types.loc[:, df_all_types.columns[:3]]
df_shared_cols["different_col"] = "a"
assert df_empty.empty
df_list = [
{
"label": "cluster_1",
"data": [("tableA", df_empty), ("tableB", df_shared_cols.copy(deep=True))],
},
{
"label": "cluster_2",
"data": [
("tableA", df_all_types),
("tableB", df_shared_cols.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableA"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableA"], store=store
)
# Roundtrips for type date are not type preserving
df_stored["date"] = df_stored["date"].dt.date
pdt.assert_frame_equal(df_all_types, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [
[
{
"label": "cluster_1",
"data": [("core", df)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_1"] for v in values_p1}
)
},
},
{
"label": "cluster_2",
"data": [("core", df2)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_2"] for v in values_p2}
)
},
},
]
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df2, df_stored)
assert stored_dataset.indices["P"].to_dict() == {
1: np.array(["cluster_1"], dtype=object),
2: np.array(["cluster_1"], dtype=object),
3: np.array(["cluster_1"], dtype=object),
4: np.array(["cluster_2"], dtype=object),
5: np.array(["cluster_2"], dtype=object),
6: np.array(["cluster_2"], dtype=object),
}
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
{
"label": "cluster_2",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_list_input(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame(
{
"P": np.arange(100, 110),
"L": np.arange(100, 110),
"TARGET": np.arange(10, 20),
}
)
df_list = [df, df2]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store_factory())
assert dataset == stored_dataset
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame({"P": np.arange(0, 10), "info": np.arange(100, 110)})
mp = MetaPartition(
label=gen_uuid(),
data={"core": df, "helper": df2},
metadata_version=metadata_version,
)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [
{
"label": "label",
"data": [("order_proposals", df.head(0))],
"indices": {"location": {}},
},
{
"label": "label",
"data": [("order_proposals", df)],
"indices": {"location": {k: ["label"] for k in df["location"].unique()}},
},
]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int32),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": | pd.Series([2], dtype=np.int16) | pandas.Series |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = | pd.Series([15., 20., 30.], dtype='float') | pandas.Series |
import unittest
import numpy as np
import pandas as pd
from haychecker.chc.metrics import grouprule
class TestGroupRule(unittest.TestCase):
def test_empty(self):
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (C) 2018 GuQiangJs. https://github.com/GuQiangJS
# Licensed under Apache License 2.0 <see LICENSE file>
import datetime
import unittest
import numpy as np
import pandas as pd
from finance_datareader_py.sina import SinaQuoteReader
from finance_datareader_py.sina import get_cpi
from finance_datareader_py.sina import get_dividends
from finance_datareader_py.sina import get_gold_and_foreign_exchange_reserves
from finance_datareader_py.sina import get_measure_of_money_supply
from finance_datareader_py.sina import get_ppi
from finance_datareader_py.sina import get_required_reserve_ratio
class sina_TestCase(unittest.TestCase):
def test_get_dividends(self):
df1, df2 = get_dividends('000541')
self.assertIsNotNone(df1)
self.assertFalse(df1.empty)
self.assertIsNotNone(df2)
self.assertFalse(df2.empty)
print(df1)
print('------------')
print(df2)
dt = datetime.date(2018, 5, 5)
df1 = df1.loc[df1['公告日期'] == dt]
self.assertEqual(np.float64(3.29), df1.at[0, '派息(税前)(元)'])
self.assertTrue(pd.isna(df1.at[0, '红股上市日']))
self.assertEqual(pd.Timestamp(2018, 5, 10), df1.at[0, '股权登记日'])
self.assertEqual(np.float64(1), df1.at[0, '转增(股)'])
self.assertEqual(np.float64(0), df1.at[0, '送股(股)'])
self.assertEqual(pd.Timestamp(2018, 5, 11), df1.at[0, '除权除息日'])
dt = datetime.date(1994, 12, 24)
df2 = df2.loc[df2['公告日期'] == dt]
self.assertEqual(np.float64(2), df2.at[0, '配股方案(每10股配股股数)'])
self.assertEqual(np.float64(8), df2.at[0, '配股价格(元)'])
self.assertEqual(np.float64(115755000), df2.at[0, '基准股本(万股)'])
self.assertEqual(pd.Timestamp(1995, 1, 4), df2.at[0, '除权日'])
self.assertEqual( | pd.Timestamp(1995, 1, 3) | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 02:17:55 2020
Preprocessing for multiclass classification
@author: <NAME>
"""
# --------------------------------------------------------------------------- #
# Import the necessary packages
# numpy for linear algebra, cv2 for image processing
# glob and os to navigate directories
import numpy as np
import glob
import os
import sys
import pandas as pd
import random
# matplotlib for plotting
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['DejaVu Sans']})
rc('text', usetex=True)
params = {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']}
plt.rcParams.update(params);
plt.close('all');
# --------------------------------------------------------------------------- #
# Sort out utilities for file naming
# get the name of this script
file_name = os.path.basename(sys.argv[0]);
if file_name[-3:] == '.py':
script_name = file_name[:-3];
elif file_name[-3:] == '.ipynb':
script_name = file_name[:-6];
else:
script_name = 'main_xx';
full_name = script_name+'_'+ "undersampl";
# --------------------------------------------------------------------------- #
path_base = 'TCC_dataset/'
print('Available classes in the dataset are: ');
classes_list = os.listdir(path_base)
print(classes_list);
# --------------------------------------------------------------------------- #
# Load the dataset
file_extension = "jpg";
classes_count = np.zeros([len(classes_list)],dtype=int);
for i in range(len(classes_list)):
classes_count[i] = len(glob.glob1(path_base + classes_list[i]+"/","*."+file_extension));
classes_count_total = np.sum(classes_count);
print('Our dataset comprises of %d images.' %classes_count_total);
classes_prob = classes_count*(1/np.sum(classes_count));
classes_mean = np.mean(classes_count);
classes_std = np.std(classes_count);
print("The mean number of examples is %.3f \n" %classes_mean);
print("The standard deviation is %.3f examples. \n" %classes_std);
chosen_classes = ['Audi','BMW','Lexus','Mercedes-Benz'];
print('We will classify images between the following classes:');
print(chosen_classes);
chosen_classes_num = np.zeros([len(chosen_classes)],dtype=int);
for i in range(len(chosen_classes)):
chosen_classes_num[i] = classes_count[classes_list.index(chosen_classes[i])];
chosen_classes_total = np.sum(chosen_classes_num);
print('This subset consists of %d images.' %chosen_classes_total);
# --------------------------------------------------------------------------- #
fig = plt.figure(1);
pos = np.arange(len(classes_list));
color_list = ['limegreen','indianred','teal','darkorange','cornflowerblue','lightsalmon'];
for index in pos:
plt.bar(index,classes_count[index],color=color_list[index],edgecolor='dimgray',label=r"%.3f" %(classes_prob[index]));
plt.xticks(pos,classes_list);
plt.title(r"\textbf{Distribution of classes in the} \textit{TCC dataset}",fontsize=12)
plt.xlabel(r"\textbf{Classes}")
plt.ylabel(r"\textbf{Count}")
plt.legend(loc='upper left');
plt.savefig(full_name+'_full_dataset.png');
#plt.savefig(full_name+'full_dataset.pdf');
plt.show();
# --------------------------------------------------------------------------- #
smallest_count_chosen = np.min(chosen_classes_num);
smallest_count_chosen_index = np.argmin(chosen_classes_num);
smallest_count_chosen_id = chosen_classes[smallest_count_chosen_index];
print('The least represented class is %s which has %d examples.' %(smallest_count_chosen_id,smallest_count_chosen));
print('We will undersample the other classes so that we end up with a balanced dataset')
# --------------------------------------------------------------------------- #
# Create list of file names for each class to undersample
# Choose randomly in this list to obtain the required number of examples
overall_files_list = [];
for i in range(0,len(chosen_classes)):
files_list = [];
for file in glob.glob(path_base+"/"+chosen_classes[i]+"/*."+file_extension):
index_for_filename = file.index('\\');
files_list.append(file[index_for_filename+1:]);
random.shuffle(files_list);
overall_files_list.extend(files_list[:smallest_count_chosen]);
df_list = | pd.DataFrame(overall_files_list) | pandas.DataFrame |
import os
import gc
import re
import json
import random
import numpy as np
import pandas as pd
import scipy.io as sio
from tqdm import tqdm
import matplotlib.pyplot as plt
from daisy.utils.data import incorporate_in_ml100k
from scipy.sparse import csr_matrix
from collections import defaultdict
from IPython import embed
def convert_unique_idx(df, col):
column_dict = {x: i for i, x in enumerate(df[col].unique())}
df[col] = df[col].apply(column_dict.get)
assert df[col].min() == 0
assert df[col].max() == len(column_dict) - 1
return df
def cut_down_data_half(df):
cut_df = pd.DataFrame([])
for u in np.unique(df.user):
aux = df[df['user'] == u].copy()
cut_df = cut_df.append(df.sample(int(len(aux) / 2)))
return cut_df
def filter_users_and_items(df, num_users=None, freq_items=None, top_items=None, keys=['user', 'item']):
'''
Reduces the dataframe to a number of users = num_users and it filters the items by frequency
'''
if num_users is not None:
# df = df[df['user_id'].isin(np.unique(df.user_id)[:num_users])]
df = df[df[keys[0]].isin(np.unique(df[keys[0]])[:num_users])]
# Get top5k books
if top_items is not None:
top5k_books = df[keys[1]].value_counts()[:top_items].index
df = df[df[keys[1]].isin(top5k_books)]
if freq_items is not None:
frequent_items = df['item'].value_counts()[df['item'].value_counts() > freq_items].index
df = df[df[keys[1]].isin(frequent_items)]
return df
def run_statistics(df, src):
path = f'histograms/{src}'
bins = 30
os.makedirs(path, exist_ok=True)
f = open(os.path.join(path, "information.txt"), "w+")
f.write("Information:\n")
f.write("==========================\n")
f.write(f"Interactions: {len(df)}\n")
f.write(f"#users = {df['user'].nunique()}\n")
f.write(f"#items = {df['item'].nunique()}\n")
f.close()
for key in ['user', 'item']:
# OPCIÓ A: HISTOGRAMA
a = pd.DataFrame(df.groupby([key])[key].count())
a.columns = ['value_counts']
a.reset_index(level=[0], inplace=True)
dims = (15, 5)
fig, ax = plt.subplots(figsize=dims)
a["value_counts"].hist(bins=200)
# fig.savefig('hist.jpg')
fig.savefig(os.path.join(path, f'{src}_histogram_{key}_bins={bins}.png'))
fig.clf()
# OPCIÓ : BARPLOT
# a = pd.DataFrame(df_year.groupby(['user'])['user'].count())
# a.columns = ['value_counts']
# a.reset_index(level=[0], inplace=True)
# dims = (15, 5)
# fig, ax = plt.subplots(figsize=dims)
# sns.set_style("darkgrid")
# sns.barplot(ax=ax, x="user", y="value_counts", data=a, palette="Blues_d")
# ax.set(xlabel="User", ylabel="Value Counts")
# plt.xticks(rotation=45)
# plt.show()
# fig.savefig('data.jpg')
def load_rate(src='ml-100k', prepro='origin', binary=True, pos_threshold=None, level='ui', context=False,
gce_flag=False, cut_down_data=False, side_info=False, context_type='', context_as_userfeat=False,
flag_run_statistics=False, remove_top_users=0, remove_on='item'):
"""
Method of loading certain raw data
Parameters
----------
src : str, the name of dataset
prepro : str, way to pre-process raw data input, expect 'origin', f'{N}core', f'{N}filter', N is integer value
binary : boolean, whether to transform rating to binary label as CTR or not as Regression
pos_threshold : float, if not None, treat rating larger than this threshold as positive sample
level : str, which level to do with f'{N}core' or f'{N}filter' operation (it only works when prepro contains 'core' or 'filter')
Returns
-------
df : pd.DataFrame, rating information with columns: user, item, rating, (options: timestamp)
user_num : int, the number of users
item_num : int, the number of items
"""
df = pd.DataFrame()
# import mat73
# a = mat73.loadmat('data/gen-disease/genes_phenes.mat')
# which dataset will use
if src == 'ml-100k':
df = pd.read_csv(f'./data/{src}/u.data', sep='\t', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
if cut_down_data:
df = cut_down_data_half(df) # from 100k to 49.760 interactions
elif src == 'drugs':
union = False
if union == True:
df = pd.read_csv(f'./data/{src}/train_data_contextUNION_sideeffect.csv', engine='python', index_col=0)
df.drop(columns=['context'], inplace=True)
df.rename(columns={'drug': 'user', 'disease': 'item',
'context_union': 'context',
'proteins': 'item-feat', 'side_effect': 'user-feat'}, inplace=True)
else:
df = pd.read_csv(f'./data/{src}/train_data_allcontext_sideeffect.csv', engine='python', index_col=0)
df.rename(columns={'drug': 'user', 'disease': 'item',
# 'proteins_drug': 'user-feat',
'proteins': 'item-feat', 'side_effect': 'user-feat'}, inplace=True)
if not context:
df = df[['user', 'item']]
else:
if context_as_userfeat:
df = df[['user', 'item', 'user-feat', 'item-feat']]
else:
df = df[['user', 'item', 'context', 'user-feat']]
df['array_context_flag'] = True
df['timestamp'] = 1
df['rating'] = 1
elif src == 'ml-1m':
df = pd.read_csv(f'./data/{src}/ratings.dat', sep='::', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
# only consider rating >=4 for data density
# df = df.query('rating >= 4').reset_index(drop=True).copy()
elif src == 'ml-10m':
df = pd.read_csv(f'./data/{src}/ratings.dat', sep='::', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
# df = df.query('rating >= 4').reset_index(drop=True).copy()
elif src == 'ml-20m':
df = pd.read_csv(f'./data/{src}/ratings.csv')
df.rename(columns={'userId': 'user', 'movieId': 'item'}, inplace=True)
# df = df.query('rating >= 4').reset_index(drop=True)
elif src == 'books':
if not os.path.exists(f'./data/{src}/preprocessed_books_complete_timestamp.csv'):
df = pd.read_csv(f'./data/{src}/preprocessed_books_complete.csv', sep=',', engine='python')
df.rename(columns={'user_id': 'user', 'book_id': 'item', 'date_added': 'timestamp'}, inplace=True)
df = convert_unique_idx(df, 'user')
df = convert_unique_idx(df, 'item')
df['rating'] = 1.0
# if type(df['timestamp'][0]) == 'str':
df['date'] = pd.to_datetime(df['timestamp'])
df['timestamp'] = pd.to_datetime(df['date'], utc=True)
df['timestamp'] = df.timestamp.astype('int64') // 10 ** 9
df.to_csv(f'./data/{src}/preprocessed_books_complete_timestamp.csv', sep=',', index=False)
else:
df = pd.read_csv(f'./data/{src}/preprocessed_books_complete_timestamp.csv', sep=',', engine='python')
del df['date']
# reduce users to 3000 and filter items by clicked_frequency > 10
df = filter_users_and_items(df, num_users=4000, freq_items=50, top_items=5000, keys=['user', 'item']) # 35422 books
elif src == 'music':
df = pd.read_csv(f'./data/{src}-context/train.csv')
if side_info:
# ['user_id', 'track_id', 'hashtag', 'created_at', 'score', 'lang', 'tweet_lang', 'time_zone',
# 'instrumentalness', 'liveness', 'speechiness', 'danceability', 'valence', 'loudness', 'tempo',
# 'acousticness', 'energy', 'mode', 'key', 'rating']
df.rename(columns={'user_id': 'user', 'track_id': 'item', 'created_at': 'timestamp', 'speechiness': 'side_info'},
inplace=True)
df = df[['user', 'item', 'timestamp', 'side_info']]
# PREPROCESS SPEECHINESS # VALUE 10 FOR NON EXISTING FEATURE
df['side_info'] = df['side_info'].round(1)
df['side_info'] = df['side_info']*10
df['side_info'] = df['side_info'].fillna(10)
df['side_info'] = df['side_info'].astype(int)
else:
df.rename(columns={'user_id': 'user', 'track_id': 'item', 'created_at': 'timestamp'}, inplace=True)
df = df[['user', 'item', 'timestamp']]
# df = df.query('rating >= 4').reset_index(drop=True)
df = convert_unique_idx(df, 'user')
df = convert_unique_idx(df, 'item')
df = filter_users_and_items(df, num_users=3000, freq_items=20, keys=['user', 'item']) # 18508 songs - 3981 users
# FILTER USERS WHITH LESS THAN 4 INTERACTIONS
df_aux = df.groupby('user').count().reset_index()[['user', 'item']]
indexes = df_aux[df_aux['item'] >= 3]['user'].index
df = df[df['user'].isin(indexes)]
df['rating'] = 1.0
df['timestamp'] = pd.to_datetime(df['timestamp'], utc=True)
df['timestamp'] = df.timestamp.astype('int64') // 10 ** 9
prepro = 'origin'
elif src == 'frappe':
df1 = pd.read_csv(f'./data/{src}/{src}_xin/train.csv', sep=',', header=None)
df2 = | pd.read_csv(f'./data/{src}/{src}_xin/test.csv', sep=',', header=None) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
from aif360.datasets import AdultDataset
from aif360.datasets import GermanDataset
from aif360.datasets import MEPSDataset19
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import ClassificationMetric
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
def Adult_dataset(name_prot = 'sex'):
dataset_orig = AdultDataset(protected_attribute_names=['sex'],
privileged_classes= [['Male']],
features_to_keep=['age', 'education-num'])
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
data, _ = dataset_orig.convert_to_dataframe()
data.rename(columns={'income-per-year':'labels'}, inplace = True)
data.reset_index(inplace = True, drop = True)
sensitive = data[name_prot]
output = dataset_orig.labels
atribute = data.drop('labels', axis = 1, inplace = False)
atribute.drop(name_prot, axis = 1, inplace = True)
return data, atribute, sensitive, output, privileged_groups, unprivileged_groups
def german_dataset_age(name_prot=['age']):
dataset_orig = GermanDataset(
protected_attribute_names = name_prot,
privileged_classes=[lambda x: x >= 25],
features_to_drop=['personal_status', 'sex']
)
privileged_groups = [{'age': 1}]
unprivileged_groups = [{'age': 0}]
data, _ = dataset_orig.convert_to_dataframe()
data.rename(columns={'credit':'labels'}, inplace = True)
sensitive = data[name_prot]
output = data['labels']
output.replace((1,2),(0,1),inplace = True)
atribute = data.drop('labels', axis = 1, inplace = False)
atribute.drop(name_prot, axis = 1, inplace = True)
return data, atribute, sensitive, output, privileged_groups, unprivileged_groups
def german_dataset_sex(name_prot=['sex']):
dataset_orig = GermanDataset(
protected_attribute_names = name_prot,
features_to_drop=['personal_status', 'age']
)
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
data, _ = dataset_orig.convert_to_dataframe()
data.rename(columns={'credit':'labels'}, inplace = True)
sensitive = data[name_prot]
output = data['labels']
output.replace((1,2),(0,1),inplace = True)
atribute = data.drop('labels', axis = 1, inplace = False)
atribute.drop(name_prot, axis = 1, inplace = True)
return data, atribute, sensitive, output, privileged_groups, unprivileged_groups
def medical_dataset(name_prot = 'RACE'):
dataset_orig = MEPSDataset19()
privileged_groups = [{'RACE': 1}]
unprivileged_groups = [{'RACE': 0}]
data, _ = dataset_orig.convert_to_dataframe()
data.reset_index(inplace = True, drop = True)
data.rename(columns={'UTILIZATION':'labels'}, inplace = True)
sensitive = data[name_prot]
atribute = data.drop(name_prot, axis = 1, inplace = False)
atribute.drop(['labels'], axis =1, inplace =True)
output = data['labels']
return data, atribute, sensitive, output, privileged_groups, unprivileged_groups
def Readmission_dataset():
folder_name = os.path.join('datasets_raw','readmission.csv')
data = pd.read_csv(folder_name)
data.drop(['ID','readmitDAYS'], axis = 1, inplace = True)
data.rename(columns={'readmitBIN':'labels'}, inplace = True)
sensitive = data['FEMALE']
output = data['labels']
atribute = data.drop(['labels','FEMALE'], axis = 1)
pr_gr = [{'FEMALE': 0}]
un_gr = [{'FEMALE': 1}]
return data, atribute, sensitive, output, pr_gr, un_gr
def format_datasets(data, atribute, sensitive, output, out_name = "labels", sens_name = "sex", test_s = 0.15, val_s = 0.15):
data_train, data_test_all = train_test_split(data, test_size = test_s + val_s, random_state = 30)
data_val, data_test = train_test_split(data_test_all, test_size = test_s/(test_s + val_s), random_state = 30)
sensitive_train = data_train[sens_name]
sensitive_val = data_val[sens_name]
sensitive_test = data_test[sens_name]
output_train = data_train[out_name]
output_val = data_val[out_name]
output_test = data_test[out_name]
atribute_train = data_train.drop([out_name, sens_name], axis = 1, inplace=False)
atribute_val = data_val.drop([out_name, sens_name], axis = 1, inplace=False)
atribute_test = data_test.drop([out_name, sens_name], axis = 1, inplace=False)
return data_train, data_test, data_val, atribute_train, atribute_val, atribute_test, sensitive_train, sensitive_val, sensitive_test, output_train, output_val, output_test
def test(dataset_val, dataset_test,
model, y_val, y_test, A_val, A_test, thresh,
model_AIF, k, dataloader_val, dataloader_test, protected, unprivileged_groups, privileged_groups):
protected = [protected]
bld_val = BinaryLabelDataset(df = dataset_val, label_names = ['labels'],
protected_attribute_names=protected)
bld_test = BinaryLabelDataset(df = dataset_test, label_names = ['labels'],
protected_attribute_names=protected)
if np.isin(k ,model_AIF):
y_val_pred_prob_val = model.predict_proba(bld_val)
A_prob_val = 0
y_val_pred_prob_test = model.predict_proba(bld_test)
A_prob_test = 0
else:
y_val_pred_prob_val, A_prob_val = model.predict_proba(dataloader_val)
y_val_pred_prob_test, A_prob_test = model.predict_proba(dataloader_test)
def metrics_form(y_val_pred_prob, y_test, A_prob, A_test, bld, dataset):
metric_arrs = np.empty([0,8])
if np.isin(k ,model_AIF):
y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
else:
y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
# A_pred = (A_prob > thresh).astype(np.float64)
metric_arrs = np.append(metric_arrs, roc_auc_score(y_test, y_val_pred_prob))
# print("y {}".format(roc_auc_score(y_test, y_val_pred_prob)))
metric_arrs = np.append(metric_arrs, accuracy_score(y_test, y_val_pred))
if np.isin(k,model_AIF):
metric_arrs = np.append(metric_arrs, 0)
else:
# metric_arrs = np.append(metric_arrs, roc_auc_score(A_test, A_prob))
metric_arrs = np.append(metric_arrs, 0)
# print("A {}".format(roc_auc_score(A_test, A_prob)))
dataset_pred = dataset.copy()
dataset_pred.labels = y_val_pred
bld2 = BinaryLabelDataset(df = dataset_pred, label_names = ['labels'], protected_attribute_names = protected)
metric = ClassificationMetric(
bld, bld2,
unprivileged_groups = unprivileged_groups,
privileged_groups = privileged_groups)
metric_arrs = np.append(metric_arrs, ((metric.true_positive_rate() + metric.true_negative_rate()) / 2))
metric_arrs = np.append(metric_arrs, np.abs(metric.average_odds_difference()))
metric_arrs = np.append(metric_arrs, metric.disparate_impact())
metric_arrs = np.append(metric_arrs, np.abs(metric.statistical_parity_difference()))
metric_arrs = np.append(metric_arrs, np.abs(metric.equal_opportunity_difference()))
return metric_arrs
metric_val = metrics_form(y_val_pred_prob_val, y_val, A_prob_val, A_val, bld_val, dataset_val)
metric_test = metrics_form(y_val_pred_prob_test, y_test, A_prob_test, A_test, bld_test, dataset_test)
return metric_val, metric_test
class Dataset_format(Dataset):
def __init__(self, atribute, sensitive, output):
self.atribute = atribute.values
self.sensitive = sensitive.values
self.output = output.values
def __len__(self):
return len(self.atribute)
def __getitem__(self, idx):
return self.atribute[idx], self.output[idx], self.sensitive[idx]
def Pareto_optimal(dataset, FAIR = True):
def identify_pareto(scores):
# Count number of items
population_size = scores.shape[0]
# Create a NumPy index for scores on the pareto front (zero indexed)
population_ids = np.arange(population_size)
# Create a starting list of items on the Pareto front
# All items start off as being labelled as on the Parteo front
pareto_front = np.ones(population_size, dtype=bool)
# Loop through each item. This will then be compared with all other items
for i in range(population_size):
# Loop through all other items
for j in range(population_size):
# Check if our 'i' pint is dominated by out 'j' point
if all(scores[j] >= scores[i]) and any(scores[j] > scores[i]):
# j dominates i. Label 'i' point as not on Pareto front
pareto_front[i] = 0
# Stop further comparisons with 'i' (no more comparisons needed)
break
# Return ids of scenarios on pareto front
return population_ids[pareto_front]
points = pd.DataFrame()
for i in dataset.index.unique():
score = dataset[dataset.index == i].values.copy()
if FAIR == True:
score[:,1] = 100 - score[:,1]
population_ids = identify_pareto(score)
points = points.append(dataset[dataset.index == i].iloc[population_ids,[2,3]])
score = points.values.copy()
if FAIR == True:
score[:,1] = 100 - score[:,1]
population_ids = identify_pareto(score)
pareto_optimal = points.iloc[population_ids,:]
return pareto_optimal, points
def Pareto_optimal_total(dataset, FAIR = True, name = "proba"):
def identify_pareto(scores):
# Count number of items
population_size = scores.shape[0]
# Create a NumPy index for scores on the pareto front (zero indexed)
population_ids = np.arange(population_size)
# Create a starting list of items on the Pareto front
# All items start off as being labelled as on the Parteo front
pareto_front = np.ones(population_size, dtype=bool)
# Loop through each item. This will then be compared with all other items
for i in range(population_size):
# Loop through all other items
for j in range(population_size):
# Check if our 'i' pint is dominated by out 'j' point
if all(scores[j] >= scores[i]) and any(scores[j] > scores[i]):
# j dominates i. Label 'i' point as not on Pareto front
pareto_front[i] = 0
# Stop further comparisons with 'i' (no more comparisons needed)
break
# Return ids of scenarios on pareto front
return population_ids[pareto_front]
points = pd.DataFrame()
for i in dataset.index.unique():
score = dataset[dataset.index == i].values.copy()
if FAIR == True:
score[:,1] = 100 - score[:,1]
score[:,2] = 100 - score[:,2]
score[:,3] = 100 - score[:,3]
population_ids = identify_pareto(score)
points = points.append(dataset[dataset.index == i].iloc[population_ids,[4,5,6,7]])
score = points.values.copy()
if FAIR == True:
score[:,1] = 100 - score[:,1]
score[:,2] = 100 - score[:,2]
score[:,3] = 100 - score[:,3]
population_ids = identify_pareto(score)
pareto_optimal = points.iloc[population_ids,:]
pareto_optimal.to_excel("{}.xlsx".format(name))
return pareto_optimal
def plot_Pareto_fronts(PO_points_AOD, PO_points_ASPD, PO_points_AEOD, upper_bound = 0.1, lower_bound = -0.002, name = "Readmission"):
dict_marker = {"PR":'o', "DI-NN":'v', "DI-RF":'^', "Reweighing-NN":'>', "Reweighing-RF":'<', "FAD":'8',
'FAD-prob':'s', "FAIR-scalar":'p', 'FAIR-betaREP':'P', "FAIR-Bernoulli":"*", "FAIR-betaSF":"h"}
dict_color = {"PR":'b', "DI-NN":'g', "DI-RF":'r', "Reweighing-NN":'c', "Reweighing-RF":'m', "FAD":'y',
'FAD-prob':'k', "FAIR-scalar":'brown', 'FAIR-betaREP':'teal', "FAIR-Bernoulli":"blueviolet", "FAIR-betaSF":"crimson"}
size = 100
figure1 = plt.figure(figsize=(9, 12))
PO_points_AOD['labels'] = PO_points_AOD.index
ax1 = plt.subplot(311)
for k,d in PO_points_AOD.groupby('labels'):
if k == "FAD-prob":
continue
ax1.scatter(d.iloc[:,1], d.iloc[:,0], label=k, c=dict_color[k], marker = dict_marker[k], s=size)
# ax1.set_ylim(0.5,1)
ax1.set_xlim(lower_bound, upper_bound)
# ax1.set_xlim(0,model1.time_control[-1])
ax1.set_ylabel('AUC$_y$', fontweight="bold")
ax1.set_xlabel("AOD", fontweight="bold")
ax1.grid()
ax1.legend(loc = 'lower right')
PO_points_ASPD['labels'] = PO_points_ASPD.index
ax2 = plt.subplot(312)
for k,d in PO_points_ASPD.groupby('labels'):
if k == "FAD-prob":
continue
ax2.scatter(d.iloc[:,1], d.iloc[:,0], label=k, c=dict_color[k], marker = dict_marker[k], s=size)
# ax2.set_ylim(0.5,1)
ax2.set_xlim(lower_bound, upper_bound)
# ax1.set_xlim(0,model1.time_control[-1])
ax2.set_ylabel('AUC$_y$', fontweight="bold")
ax2.set_xlabel("ASD", fontweight="bold")
ax2.grid()
ax2.legend(loc = 'lower right')
PO_points_AEOD['labels'] = PO_points_AEOD.index
ax3 = plt.subplot(313)
for k,d in PO_points_AEOD.groupby('labels'):
if k == "FAD-prob":
continue
ax3.scatter(d.iloc[:,1], d.iloc[:,0], label=k, c=dict_color[k], marker = dict_marker[k], s=size)
# ax3.set_ylim(0.5,1)
ax3.set_xlim(lower_bound, upper_bound)
# ax1.set_xlim(0,model1.time_control[-1])
ax3.set_ylabel('AUC$_y$', fontweight="bold")
ax3.set_xlabel("AEOD", fontweight="bold")
ax3.grid()
ax3.legend(loc = 'lower right')
plt.setp([a.get_xticklabels() for a in [ax1, ax2]], visible=False)
plt.savefig('{}.png'.format(name))
def plot_AUC_Y_AUC_A(name):
figure2 = plt.figure(figsize=(9, 8))
points = pd.read_excel("Results/Ger_age.xls", index_col=0)
ax1 = plt.subplot(211)
ax1.plot(points[points.index == 'FAIR-scalar']["alpha"], points[points.index == 'FAIR-scalar'].iloc[:,0], label = "AUC$_y$")
ax1.plot(points[points.index == 'FAIR-scalar']["alpha"], points[points.index == 'FAIR-scalar'].iloc[:,2], label = "AUC$_s$")
plt.xscale("log")
ax1.set_ylabel('AUC', fontweight="bold")
ax1.set_title("German age", fontweight="bold")
# ax1.set_xlabel("alpha", fontweight="bold")
ax1.grid()
ax1.set_xlim(0, 1000)
ax1.legend()
points = pd.read_excel("Results/ger_sex.xlsx", index_col=0)
ax2 = plt.subplot(212)
ax2.plot(points[points.index == 'FAIR-scalar']["alpha"], points[points.index == 'FAIR-scalar'].iloc[:,0], label = "AUC$_y$")
ax2.plot(points[points.index == 'FAIR-scalar']["alpha"], points[points.index == 'FAIR-scalar'].iloc[:,2], label = "AUC$_s$")
plt.xscale("log")
ax2.set_ylabel('AUC', fontweight="bold")
ax2.set_title("German sex", fontweight="bold")
ax2.set_xlabel(r'$\alpha$', fontweight="bold")
ax2.grid()
ax2.set_xlim(0, 1000)
ax2.legend()
plt.setp([a.get_xticklabels() for a in [ax1]], visible=False)
plt.savefig('{}.png'.format(name))
if __name__ == "__main__":
col_AUC_y_val = 0
col_AUC_A_val = 7
add = 4
aaa = np.logspace(-2, np.log10(5), num = 8)
points = pd.read_excel("Results/readmission.xls", index_col=0)
po_Read_AOD, po_Read_AOD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
po_Read_total = Pareto_optimal_total(points.iloc[:,[0, 4, 5, 6, 7, 11, 12, 13]], FAIR=True, name = "Results/Readmission_PO")
points = | pd.read_excel("Results/Adult.xls", index_col=0) | pandas.read_excel |
import os
import re
import sys
import numpy as np
import pandas as pd
import pathlib
from biopandas.pdb import PandasPdb
import glob
import json
complexes = []
hdir = "/media/hdd1/roproQ3drew"
dirs = os.listdir(hdir)
master_blaster = | pd.DataFrame(columns=['key', 'complex_name', 'complex', 'complex_relaxed', 'directory']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = | algos.factorize(x, sort=True) | pandas.core.algorithms.factorize |
import os
import numpy as np
import pandas as pd
from scipy.io import loadmat, savemat
import matplotlib.pyplot as plt
import seaborn as sns
def match_strings(strings, path, any_or_all='any'):
if any_or_all == 'any':
return any([string in path for string in strings])
elif any_or_all == 'all':
return all([string in path for string in strings])
def fix(datum, path):
## hopefully vestigial code used previously used in load_data to fix a massive diagonal EV matrix.
if datum['EV_vec'].shape[0] > 1:
print('fixing EV_vec')
print(datum['EV_vec'].shape)
print(path)
datum['EV_vec'] = np.diag(datum['EV_vec'])
savemat(path, datum, appendmat=False)
return datum
def load_data(mani_dir, exclude=[]):
# takes a directory and loads all the .mat files from batch_manifold_analysis.m
# return paths and data
paths = np.sort(np.array(os.listdir(mani_dir)))
if len(exclude)>0:
paths = [path for path in paths if not match_strings(exclude, path)]
data = []
for path in paths:
datum = loadmat(mani_dir+path)
if 'EV_vec' in datum.keys():
#datum = fix(datum, mani_dir+path)
pass
data.append(datum)
return paths, np.array(data)
#def load_data(mani_dir, exclude=[]):
# # takes a directory and loads all the .mat files from batch_manifold_analysis.m
# # return paths and data
# paths = np.sort(np.array(os.listdir(mani_dir)))
# if len(exclude)>0:
# paths = [path for path in paths if not match_strings(exclude, path)]
# data = np.array([loadmat(mani_dir+path) for path in paths])
# return paths, data
def get_layer_type(path, types):
for t in types:
if t in path:
return t
def mi_outliers(data_vec):
for i in range(len(data_vec)):
row_mean = data_vec[i].mean()
row_std = data_vec[i].std()
for j in range(len(data_vec[i])):
if np.abs(data_vec[i][j] - row_mean) > row_std*2:
data_vec[i][j] = row_mean
return data_vec
def fill_input_features(df, input_features=3072):
df['featnum'] = df['featnum'].fillna(value=input_features)
def frame_constructor(paths, data, key, tag=None, mean=False, verbose=False, rm_outliers=True):
perm_seed = [catch(path, 'seed') for path in paths]
featnum = [catch(path, 'featnum') for path in paths]
acc = [catch(path, 'acc') for path in paths]
arch = [catch(path, 'arch') for path in paths]
RP = [catch(path, 'RP') for path in paths]
lnum = [path.split('-')[3].split('_')[1] for path in paths]
coding = [path.split('-')[3].split('_')[0] for path in paths]
epochs = np.array([int(path.split('-')[1].split('_')[1]) for path in paths])
image_set = np.array([path.split('-')[0] for path in paths])
data_vec = np.array([np.squeeze(datum[key]) for datum in data])
if mean:
if rm_outliers:
mi_outliers(data_vec)
data_vec = np.mean(data_vec,axis=1)
data_vec = np.atleast_2d(data_vec)
if verbose:
print('data_vec.shape: ', data_vec.shape)
if data_vec.shape[0]<data_vec.shape[1]:
data_vec = data_vec.T
df = pd.DataFrame(
columns=[
'path',
'imageset',
'epoch',
'layer number',
'coding',
'seed',
'featnum',
'acc',
'arch',
'RP',
'value',
'measure',
'tag'
],
data=np.array([
np.repeat([paths],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([image_set],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([epochs],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([lnum],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([coding],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([perm_seed],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([featnum],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([acc],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([arch],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([RP],data_vec.shape[-1],axis=0).T.reshape(-1),
data_vec.reshape(-1),
np.repeat(key,data_vec.size),
np.repeat(tag,data_vec.size)
]).T
)
types = ['input', 'AvgPool2d', 'MaxPool2d', 'Conv2d', 'ReLU', 'Sequential', 'Linear', 'BatchNorm2d', 'Softmax']
df['type'] = df.path.apply(lambda x: get_layer_type(x, types))
df['value'] = pd.to_numeric(df['value'], errors='coerce')
df['acc'] = pd.to_numeric(df['acc'], errors='coerce')
df['epoch'] = pd.to_numeric(df['epoch'], errors='coerce')
df['seed'] = pd.to_numeric(df['seed'], errors='coerce')
df['featnum'] = pd.to_numeric(df['featnum'], errors='coerce')
df['layer number'] = | pd.to_numeric(df['layer number'], errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from glob import glob
import os.path as path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
"""
analyze_player.py
This program implements functions to analyze (and assist in analyzing) player
stats.
"""
def join_years(player_dir):
"""Join the stat years for a player into one pandas dataframe.
:player_dir: TODO
:returns: TODO
"""
# Sort the files by year.
year_csvs = sorted(glob(path.join(player_dir, "*")))
dfs = []
master_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Implements the feature tranformers of the VAEP framework."""
from typing import Callable, List, Type
import numpy as np
import pandas as pd
from pandera.typing import DataFrame
import socceraction.atomic.spadl.config as atomicspadl
from socceraction.atomic.spadl import AtomicSPADLSchema
from socceraction.vaep.features import (
actiontype,
bodypart,
bodypart_onehot,
gamestates,
simple,
team,
time,
time_delta,
)
__all__ = [
'feature_column_names',
'play_left_to_right',
'gamestates',
'actiontype',
'actiontype_onehot',
'bodypart',
'bodypart_onehot',
'team',
'time',
'time_delta',
'location',
'polar',
'movement_polar',
'direction',
'goalscore',
]
Actions = Type[DataFrame[AtomicSPADLSchema]]
GameStates = List[Actions]
Features = Type[DataFrame]
FeatureTransfomer = Callable[[GameStates], Features]
def feature_column_names(fs: List[FeatureTransfomer], nb_prev_actions: int = 3) -> List[str]:
"""Return the names of the features generated by a list of transformers.
Parameters
----------
fs : list(callable)
A list of feature transformers.
nb_prev_actions : int (default = 3)
The number of previous actions included in the game state.
Returns
-------
list(str)
The name of each generated feature.
"""
spadlcolumns = [
'game_id',
'period_id',
'time_seconds',
'timestamp',
'team_id',
'player_id',
'x',
'y',
'dx',
'dy',
'bodypart_id',
'bodypart_name',
'type_id',
'type_name',
]
dummy_actions = pd.DataFrame(np.zeros((10, len(spadlcolumns))), columns=spadlcolumns)
for c in spadlcolumns:
if 'name' in c:
dummy_actions[c] = dummy_actions[c].astype(str)
gs = gamestates(dummy_actions, nb_prev_actions)
return list(pd.concat([f(gs) for f in fs], axis=1).columns)
def play_left_to_right(gamestates: GameStates, home_team_id: int) -> GameStates:
"""Perform all action in the same playing direction.
This changes the start and end location of each action, such that all actions
are performed as if the team plays from left to right.
Parameters
----------
gamestates : list(pd.DataFrame)
The game states of a game.
home_team_id : int
The ID of the home team.
Returns
-------
list(pd.DataFrame)
The game states with all actions performed left to right.
"""
a0 = gamestates[0]
away_idx = a0.team_id != home_team_id
for actions in gamestates:
actions.loc[away_idx, 'x'] = atomicspadl.field_length - actions[away_idx]['x'].values
actions.loc[away_idx, 'y'] = atomicspadl.field_width - actions[away_idx]['y'].values
actions.loc[away_idx, 'dx'] = -actions[away_idx]['dx'].values
actions.loc[away_idx, 'dy'] = -actions[away_idx]['dy'].values
return gamestates
@simple
def actiontype_onehot(actions: Actions) -> Features:
"""Get the one-hot-encoded type of each action.
Parameters
----------
actions : pd.DataFrame
The actions of a game.
Returns
-------
pd.DataFrame
A one-hot encoding of each action's type.
"""
X = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, timedelta
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT
import pandas.compat as compat
from pandas import (
DatetimeIndex, Index, NaT, Period, Series, Timedelta, TimedeltaIndex,
Timestamp, isna)
from pandas.core.arrays import PeriodArray
from pandas.util import testing as tm
@pytest.mark.parametrize("nat,idx", [(Timestamp("NaT"), DatetimeIndex),
(Timedelta("NaT"), TimedeltaIndex),
(Period("NaT", freq="M"), PeriodArray)])
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"])
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
ser = Series(idx)
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeIndex._bool_ops:
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", [None, np.nan, iNaT, float("nan"),
NaT, "NaT", "nat"])
def test_identity(klass, value):
assert klass(value) is NaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan])
def test_equality(klass, value):
if klass is Period and value == "":
pytest.skip("Period cannot parse empty string")
assert klass(value).value == iNaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta])
@pytest.mark.parametrize("method", ["round", "floor", "ceil"])
@pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
def test_round_nat(klass, method, freq):
# see gh-14940
ts = klass("nat")
round_method = getattr(ts, method)
assert round_method(freq) is ts
@pytest.mark.parametrize("method", [
"astimezone", "combine", "ctime", "dst", "fromordinal",
"fromtimestamp", "isocalendar", "strftime", "strptime",
"time", "timestamp", "timetuple", "timetz", "toordinal",
"tzname", "utcfromtimestamp", "utcnow", "utcoffset",
"utctimetuple", "timestamp"
])
def test_nat_methods_raise(method):
# see gh-9513, gh-17329
msg = "NaTType does not support {method}".format(method=method)
with pytest.raises(ValueError, match=msg):
getattr(NaT, method)()
@pytest.mark.parametrize("method", [
"weekday", "isoweekday"
])
def test_nat_methods_nan(method):
# see gh-9513, gh-17329
assert np.isnan(getattr(NaT, method)())
@pytest.mark.parametrize("method", [
"date", "now", "replace", "today",
"tz_convert", "tz_localize"
])
def test_nat_methods_nat(method):
# see gh-8254, gh-9513, gh-17329
assert getattr(NaT, method)() is NaT
@pytest.mark.parametrize("get_nat", [
lambda x: NaT,
lambda x: Timedelta(x),
lambda x: Timestamp(x)
])
def test_nat_iso_format(get_nat):
# see gh-12300
assert get_nat("NaT").isoformat() == "NaT"
@pytest.mark.parametrize("klass,expected", [
(Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period", "tz"]),
(Timedelta, ["components", "delta", "is_populated", "to_pytimedelta",
"to_timedelta64", "view"])
])
def test_missing_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# Here, we check which public methods NaT does not have. We
# ignore any missing private methods.
nat_names = dir(NaT)
klass_names = dir(klass)
missing = [x for x in klass_names if x not in nat_names and
not x.startswith("_")]
missing.sort()
assert missing == expected
def _get_overlap_public_nat_methods(klass, as_tuple=False):
"""
Get overlapping public methods between NaT and another class.
Parameters
----------
klass : type
The class to compare with NaT
as_tuple : bool, default False
Whether to return a list of tuples of the form (klass, method).
Returns
-------
overlap : list
"""
nat_names = dir(NaT)
klass_names = dir(klass)
overlap = [x for x in nat_names if x in klass_names and
not x.startswith("_") and
callable(getattr(klass, x))]
# Timestamp takes precedence over Timedelta in terms of overlap.
if klass is Timedelta:
ts_names = dir(Timestamp)
overlap = [x for x in overlap if x not in ts_names]
if as_tuple:
overlap = [(klass, method) for method in overlap]
overlap.sort()
return overlap
@pytest.mark.parametrize("klass,expected", [
(Timestamp, ["astimezone", "ceil", "combine", "ctime", "date", "day_name",
"dst", "floor", "fromisoformat", "fromordinal",
"fromtimestamp", "isocalendar", "isoformat", "isoweekday",
"month_name", "now", "replace", "round", "strftime",
"strptime", "time", "timestamp", "timetuple", "timetz",
"to_datetime64", "to_numpy", "to_pydatetime", "today",
"toordinal", "tz_convert", "tz_localize", "tzname",
"utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple",
"weekday"]),
(Timedelta, ["total_seconds"])
])
def test_overlap_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# In case when Timestamp, Timedelta, and NaT are overlap, the overlap
# is considered to be with Timestamp and NaT, not Timedelta.
# "fromisoformat" was introduced in 3.7
if klass is Timestamp and not compat.PY37:
expected.remove("fromisoformat")
assert _get_overlap_public_nat_methods(klass) == expected
@pytest.mark.parametrize("compare", (
_get_overlap_public_nat_methods(Timestamp, True) +
_get_overlap_public_nat_methods(Timedelta, True))
)
def test_nat_doc_strings(compare):
# see gh-17327
#
# The docstrings for overlapping methods should match.
klass, method = compare
klass_doc = getattr(klass, method).__doc__
nat_doc = getattr(NaT, method).__doc__
assert klass_doc == nat_doc
_ops = {
"left_plus_right": lambda a, b: a + b,
"right_plus_left": lambda a, b: b + a,
"left_minus_right": lambda a, b: a - b,
"right_minus_left": lambda a, b: b - a,
"left_times_right": lambda a, b: a * b,
"right_times_left": lambda a, b: b * a,
"left_div_right": lambda a, b: a / b,
"right_div_left": lambda a, b: b / a,
}
@pytest.mark.parametrize("op_name", list(_ops.keys()))
@pytest.mark.parametrize("value,val_type", [
(2, "scalar"),
(1.5, "scalar"),
(np.nan, "scalar"),
(timedelta(3600), "timedelta"),
(Timedelta("5s"), "timedelta"),
(datetime(2014, 1, 1), "timestamp"),
(Timestamp("2014-01-01"), "timestamp"),
(Timestamp("2014-01-01", tz="UTC"), "timestamp"),
(Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"),
(pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"),
])
def test_nat_arithmetic_scalar(op_name, value, val_type):
# see gh-6873
invalid_ops = {
"scalar": {"right_div_left"},
"timedelta": {"left_times_right", "right_times_left"},
"timestamp": {"left_times_right", "right_times_left",
"left_div_right", "right_div_left"}
}
op = _ops[op_name]
if op_name in invalid_ops.get(val_type, set()):
if (val_type == "timedelta" and "times" in op_name and
isinstance(value, Timedelta)):
msg = "Cannot multiply"
else:
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
op(NaT, value)
else:
if val_type == "timedelta" and "div" in op_name:
expected = np.nan
else:
expected = NaT
assert op(NaT, value) is expected
@pytest.mark.parametrize("val,expected", [
(np.nan, NaT),
(NaT, np.nan),
(np.timedelta64("NaT"), np.nan)
])
def test_nat_rfloordiv_timedelta(val, expected):
# see gh-#18846
#
# See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
td = Timedelta(hours=3, minutes=4)
assert td // val is expected
@pytest.mark.parametrize("op_name", [
"left_plus_right", "right_plus_left",
"left_minus_right", "right_minus_left"
])
@pytest.mark.parametrize("value", [
DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
TimedeltaIndex(["1 day", "2 day"], name="x"),
])
def test_nat_arithmetic_index(op_name, value):
# see gh-11718
exp_name = "x"
exp_data = [NaT] * 2
if isinstance(value, DatetimeIndex) and "plus" in op_name:
expected = DatetimeIndex(exp_data, name=exp_name, tz=value.tz)
else:
expected = TimedeltaIndex(exp_data, name=exp_name)
tm.assert_index_equal(_ops[op_name](NaT, value), expected)
@pytest.mark.parametrize("op_name", [
"left_plus_right", "right_plus_left",
"left_minus_right", "right_minus_left"
])
@pytest.mark.parametrize("box", [TimedeltaIndex, Series])
def test_nat_arithmetic_td64_vector(op_name, box):
# see gh-19124
vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
box_nat = box([NaT, NaT], dtype="timedelta64[ns]")
tm.assert_equal(_ops[op_name](vec, NaT), box_nat)
def test_nat_pinned_docstrings():
# see gh-17327
assert NaT.ctime.__doc__ == datetime.ctime.__doc__
def test_to_numpy_alias():
# GH 24653: alias .to_numpy() for scalars
expected = NaT.to_datetime64()
result = NaT.to_numpy()
assert isna(expected) and | isna(result) | pandas.isna |
import numpy as np
import pandas as pd
import os, sys
import pickle
from tqdm import tqdm
import matplotlib.pyplot as plt
import nibabel as nib
import json
from ipywidgets import widgets, interactive
sys.path.append("../../")
from bpreg.preprocessing.nifti2npy import *
from bpreg.settings import *
from bpreg.inference.inference_model import InferenceModel
def dicom2nifti(ifilepath, ofilepath):
import SimpleITK as sitk
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(ifilepath)
reader.SetFileNames(dicom_names)
image = reader.Execute()
sitk.WriteImage(image, ofilepath)
def convert_ct_lymph_nodes_to_nifti(dicom_path, nifti_path):
def get_ct_lymph_node_dicom_dir(base_path):
path = base_path + "/" + os.listdir(base_path)[0] + "/"
path += os.listdir(path)[0] + "/"
return path
dicom_dirs = [
mydir
for mydir in np.sort(os.listdir(dicom_path))
if mydir.startswith(("ABD", "MED"))
]
for dicom_dir in tqdm(dicom_dirs):
ifilepath = get_ct_lymph_node_dicom_dir(dicom_path + dicom_dir)
ofilepath = nifti_path + dicom_dir + ".nii.gz"
if os.path.exists(ofilepath):
continue
dicom2nifti(ifilepath, ofilepath)
def nifti2npy(nifti_path, npy_path):
base_path = "/".join(nifti_path.split("/")[0:-2]) + "/"
n2n = Nifti2Npy(
target_pixel_spacing=7, # in mm/pixel
min_hu=-1000, # in Hounsfield units
max_hu=1500, # in Hounsfield units
size=64, # x/y size
ipath=nifti_path, # input path
opath=npy_path, # output path
rescale_max=1, # rescale max value
rescale_min=-1,
) # rescale min value
filepaths = np.sort([nifti_path + f for f in os.listdir(nifti_path)])
df = n2n.convert(filepaths, save=True)
df.to_excel(base_path + "meta_data.xlsx")
def update_meta_data(landmark_filepath, meta_data_filepath):
"""
add information to train, val and test data to dataframe
"""
df_landmarks = pd.read_excel(landmark_filepath, sheet_name="database")
df_meta_data = pd.read_excel(meta_data_filepath, index_col=0)
train_filenames = [
f + ".npy" for f in df_landmarks.loc[df_landmarks.train == 1, "filename"]
]
val_filenames = [
f + ".npy" for f in df_landmarks.loc[df_landmarks.val == 1, "filename"]
]
test_filenames = [
f + ".npy" for f in df_landmarks.loc[df_landmarks.test == 1, "filename"]
]
df_meta_data["train_data"] = 1
df_meta_data.loc[val_filenames, "val_data"] = 1
df_meta_data.loc[test_filenames, "test_data"] = 1
df_meta_data.loc[val_filenames + test_filenames, "train_data"] = 0
df_meta_data.to_excel(meta_data_filepath)
def preprocess_ct_lymph_node_dataset(dicom_path, nifti_path, npy_path):
"""Convert DICOM files form CT Lymph node to downsampled npy volumes."""
# Convert Dicom to nifti
convert_ct_lymph_nodes_to_nifti(dicom_path, nifti_path)
# Convert nifti files to npy and save meta_data.xlsx file
nifti2npy(nifti_path, npy_path)
# update meta data with train/val/test data from landmark file
update_meta_data(
landmark_filepath="data/ct-lymph-nodes-annotated-landmarks.xlsx",
meta_data_filepath="data/meta_data.xlsx",
)
def crop_scores(scores, start_score, end_score):
scores = np.array(scores)
min_scores = np.where(scores < start_score)[0]
max_scores = np.where(scores > end_score)[0]
min_index = 0
max_index = len(scores)
if len(min_scores) > 0:
min_index = np.nanmax(min_scores)
if len(max_scores) > 0:
max_index = np.nanmin(max_scores)
return min_index, max_index
def plot_dicomexamined_distribution(
df,
column="BODY PART",
count_column="FILE",
fontsize=20,
others_percentage_upper_bound=0,
return_table=False,
):
color_counts = len(np.unique(df[column])) + 2
cmap = plt.cm.get_cmap("cubehelix", color_counts)
colors = [cmap(i) for i in range(color_counts - 1)]
bodypartexamined_dicomtags = df.groupby(column)[count_column].count() / len(df)
bodyparts2others = bodypartexamined_dicomtags[
bodypartexamined_dicomtags <= others_percentage_upper_bound
].index
if len(bodyparts2others) > 0:
bodypartexamined_dicomtags["OTHERS"] = 0
for bodypart in bodyparts2others:
bodypartexamined_dicomtags["OTHERS"] += bodypartexamined_dicomtags[bodypart]
del bodypartexamined_dicomtags[bodypart]
if np.round(bodypartexamined_dicomtags.sum(), 2) != 1:
bodypartexamined_dicomtags["NONE"] = 1 - bodypartexamined_dicomtags.sum()
bodypartexamined_dicomtags = bodypartexamined_dicomtags.sort_values()
_, ax = plt.subplots(figsize=(10, 10))
_, texts, autotexts = ax.pie(
bodypartexamined_dicomtags.values * 100,
labels=bodypartexamined_dicomtags.index,
autopct="%1.1f%%",
colors=colors,
)
for i, txt, txt2 in zip(np.arange(len(texts)), texts, autotexts):
txt.set_fontsize(fontsize)
txt2.set_fontsize(fontsize - 2)
if i < 4:
txt2.set_color("white")
ax.axis("equal")
plt.tight_layout()
if return_table:
bodypartexamined_dicomtags = bodypartexamined_dicomtags.sort_values(
ascending=False
)
bodypartexamined_dicomtags = pd.DataFrame(
np.round(bodypartexamined_dicomtags * 100, 1)
)
bodypartexamined_dicomtags.columns = ["Proportion [%]"]
return bodypartexamined_dicomtags
def load_json(filepath):
with open(filepath) as f:
x = json.load(f)
return x
def plot_scores(filepath, fontsize=18):
plt.figure(figsize=(12, 6))
x = load_json(filepath)
plt.plot(x["z"], x["cleaned slice scores"], label="cleaned slice scores")
plt.plot(
x["z"],
x["unprocessed slice scores"],
label="unprocessed slice scores",
linestyle="--",
)
try:
min_score = np.nanmin(x["unprocessed slice scores"])
max_score = np.nanmax(x["unprocessed slice scores"])
dflandmarks = pd.DataFrame(x["look-up table"]).T
landmarks = dflandmarks[
(dflandmarks["mean"] > min_score) & (dflandmarks["mean"] < max_score)
]
for landmark, row in landmarks.iloc[[0, -1]].iterrows():
plt.plot(
[0, np.nanmax(x["z"])],
[row["mean"], row["mean"]],
linestyle=":",
color="black",
linewidth=0.8,
)
plt.text(
5,
row["mean"] + 1,
landmark,
fontsize=fontsize - 4,
bbox=dict(
boxstyle="square",
fc=(1.0, 1, 1),
),
)
except:
pass
plt.xlabel("height [mm]", fontsize=fontsize)
plt.ylabel("Slice Scores", fontsize=fontsize)
plt.xticks(fontsize=fontsize - 2)
plt.yticks(fontsize=fontsize - 2)
plt.legend(loc=1, fontsize=fontsize)
plt.xlim((0, np.nanmax(x["z"])))
plt.title(
f"Predicted Body Range: {x['body part examined tag']}",
fontsize=fontsize - 2,
)
plt.show()
def get_updated_bodypartexamined_from_json_files(data_path):
files = [f for f in os.listdir(data_path) if f.endswith(".json")]
dftags = | pd.DataFrame(index=files, columns=["tag"]) | pandas.DataFrame |
## dea_waterbodies.py
"""
Description: This file contains a set of python functions for loading
and processing DEA Waterbodies.
License: The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth
Australia data is licensed under the Creative Commons by Attribution 4.0
license (https://creativecommons.org/licenses/by/4.0/).
Contact: If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.stackexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, file one on
Github: https://github.com/GeoscienceAustralia/dea-notebooks/issues/new
Functions included:
get_waterbody
get_waterbodies
get_geohashes
get_time_series
Last modified: November 2020
"""
import geopandas as gpd
from owslib.wfs import WebFeatureService
from owslib.fes import PropertyIsEqualTo
from owslib.etree import etree
import pandas as pd
WFS_ADDRESS = "https://geoserver.dea.ga.gov.au/geoserver/wfs"
def get_waterbody(geohash: str) -> gpd.GeoDataFrame:
"""Gets a waterbody polygon and metadata by geohash.
Parameters
----------
geohash : str
The geohash/UID for a waterbody in DEA Waterbodies.
Returns
-------
gpd.GeoDataFrame
A GeoDataFrame with the polygon.
"""
wfs = WebFeatureService(url=WFS_ADDRESS, version="1.1.0")
filter_ = PropertyIsEqualTo(propertyname="uid", literal=geohash)
filterxml = etree.tostring(filter_.toXML()).decode("utf-8")
response = wfs.getfeature(
typename="DigitalEarthAustraliaWaterbodies",
filter=filterxml,
outputFormat="json",
)
wb_gpd = gpd.read_file(response)
return wb_gpd
def get_waterbodies(bbox: tuple, crs="EPSG:4326") -> gpd.GeoDataFrame:
"""Gets the polygons and metadata for multiple waterbodies by bbox.
Parameters
----------
bbox : (xmin, ymin, xmax, ymax)
Bounding box.
crs : str
Optional CRS for the bounding box.
Returns
-------
gpd.GeoDataFrame
A GeoDataFrame with the polygons and metadata.
"""
wfs = WebFeatureService(url=WFS_ADDRESS, version="1.1.0")
response = wfs.getfeature(
typename="DigitalEarthAustraliaWaterbodies",
bbox=tuple(bbox) + (crs,),
outputFormat="json",
)
wb_gpd = gpd.read_file(response)
return wb_gpd
def get_geohashes(bbox: tuple = None, crs: str = "EPSG:4326") -> [str]:
"""Gets all waterbody geohashes.
Parameters
----------
bbox : (xmin, ymin, xmax, ymax)
Optional bounding box.
crs : str
Optional CRS for the bounding box.
Returns
-------
[str]
A list of geohashes.
"""
wfs = WebFeatureService(url=WFS_ADDRESS, version="1.1.0")
if bbox is not None:
bbox = tuple(bbox) + (crs,)
response = wfs.getfeature(
typename="DigitalEarthAustraliaWaterbodies",
propertyname="uid",
outputFormat="json",
bbox=bbox,
)
wb_gpd = gpd.read_file(response)
return list(wb_gpd["uid"])
def get_time_series(geohash: str = None, waterbody: pd.Series = None) -> pd.DataFrame:
"""Gets the time series for a waterbody. Specify either a GeoDataFrame row or a geohash.
Parameters
----------
geohash : str
The geohash/UID for a waterbody in DEA Waterbodies.
waterbody : pd.Series
One row of a GeoDataFrame representing a waterbody.
Returns
-------
pd.DataFrame
A time series for the waterbody.
"""
if waterbody is not None and geohash is not None:
raise ValueError("One of waterbody and geohash must be None")
if waterbody is None and geohash is None:
raise ValueError("One of waterbody and geohash must be specified")
if geohash is not None:
wb = get_waterbody(geohash)
url = wb.timeseries[0]
else:
url = waterbody.timeseries
wb_timeseries = | pd.read_csv(url) | pandas.read_csv |
import re
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas.util.testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
assert i.codes[0].dtype == "int8"
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(40)])
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(400)])
assert i.codes[1].dtype == "int16"
i = MultiIndex.from_product([["a"], range(40000)])
assert i.codes[1].dtype == "int32"
i = pd.MultiIndex.from_product([["a"], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [
(1, pd.Timestamp("2000-01-01")),
(2, pd.NaT),
(3, pd.Timestamp("2000-01-03")),
(1, pd.Timestamp("2000-01-04")),
(2, pd.Timestamp("2000-01-02")),
(3, pd.Timestamp("2000-01-03")),
]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
# TODO(GH-24559): Remove the FutureWarning
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
aware = | pd.DatetimeIndex(ints, tz="US/Central") | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table( | StringIO(data) | pandas.compat.StringIO |
from airflow.models import Variable
import pandas as pd
import sqlalchemy as db
import configparser
import logging
# variables
SOURCE_MYSQL_HOST = Variable.get('SOURCE_MYSQL_HOST')
SOURCE_MYSQL_PORT = Variable.get('SOURCE_MYSQL_PORT')
SOURCE_MYSQL_USER = Variable.get('SOURCE_MYSQL_USER')
SOURCE_MYSQL_PASSWORD = Variable.get('SOURCE_MYSQL_PASSWORD')
SOURCE_MYSQL_ROOT_PASSWORD = Variable.get('SOURCE_MYSQL_ROOT_PASSWORD')
SOURCE_MYSQL_DATABASE = Variable.get('SOURCE_MYSQL_DATABASE')
DW_MYSQL_HOST = Variable.get('DW_MYSQL_HOST')
DW_MYSQL_PORT = Variable.get('DW_MYSQL_PORT')
DW_MYSQL_USER = Variable.get('DW_MYSQL_USER')
DW_MYSQL_PASSWORD = Variable.get('DW_MYSQL_PASSWORD')
DW_MYSQL_ROOT_PASSWORD = Variable.get('DW_MYSQL_ROOT_PASSWORD')
DW_MYSQL_DATABASE = Variable.get('DW_MYSQL_DATABASE')
# Database connection URI
db_conn_url = "mysql+pymysql://{}:{}@{}:{}/{}".format(SOURCE_MYSQL_USER,
SOURCE_MYSQL_PASSWORD,
SOURCE_MYSQL_HOST,
SOURCE_MYSQL_PORT,
SOURCE_MYSQL_DATABASE)
db_engine = db.create_engine(db_conn_url)
# Data warehouse connection URI
dw_conn_url = "mysql+pymysql://{}:{}@{}:{}/{}".format(DW_MYSQL_USER,
DW_MYSQL_PASSWORD,
DW_MYSQL_HOST,
DW_MYSQL_PORT,
DW_MYSQL_DATABASE)
dw_engine = db.create_engine(dw_conn_url)
def get_dimStore_last_id(db_engine):
"""Function to get last store_key from dimemsion table `dimStore`"""
query = "SELECT max(store_key) AS last_id FROM dimStore"
tdf = pd.read_sql(query, db_engine)
return tdf.iloc[0]['last_id']
def extract_table_store(last_id, db_engine):
"""Function to extract table `store`"""
if last_id == None:
last_id = -1
query = "SELECT * FROM store WHERE store_id > {} LIMIT 100000".format(
last_id)
logging.info("query={}".format(query))
return pd.read_sql(query, db_engine)
def lookup_table_address(store_df, db_engine):
"""Function to lookup table `address`"""
unique_ids = list(store_df.address_id.unique())
unique_ids = list(filter(None, unique_ids))
query = "SELECT * FROM address WHERE address_id IN ({})".format(
','.join(map(str, unique_ids)))
return pd.read_sql(query, db_engine)
def lookup_table_city(address_df, db_engine):
"""Function to lookup table `city`"""
unique_ids = list(address_df.city_id.unique())
unique_ids = list(filter(None, unique_ids))
query = "SELECT * FROM city WHERE city_id IN ({})".format(
','.join(map(str, unique_ids)))
return pd.read_sql(query, db_engine)
def lookup_table_country(address_df, db_engine):
"""Function to lookup table `country`"""
unique_ids = list(address_df.country_id.unique())
unique_ids = list(filter(None, unique_ids))
query = "SELECT * FROM country WHERE country_id IN ({})".format(
','.join(map(str, unique_ids)))
return pd.read_sql(query, db_engine)
def lookup_table_staff(store_df, db_engine):
"""Function to lookup table `staff`"""
unique_ids = list(store_df.manager_staff_id.unique())
unique_ids = list(filter(None, unique_ids))
query = "SELECT * FROM staff WHERE staff_id IN ({})".format(
','.join(map(str, unique_ids)))
return | pd.read_sql(query, db_engine) | pandas.read_sql |
"""
Data extraction functions
"""
import datetime as dt
import norgatedata
import pandas as pd
import requests
from pandas.tseries.offsets import BDay
from yahoofinancials import YahooFinancials
class NorgateExtract():
"""
Functions to extract data from Norgate Data
"""
@staticmethod
def get_norgate_tickers(params):
"""
Create list of all available Norgate Commodity tickers
Returns
-------
tickers : List
Returns a list of ticker codes.
init_ticker_dict : Dict
Dictionary of ticker-security name pairs
"""
# Specify Norgate Cash Commodities database and extract data
databasename = 'Cash Commodities'
databasecontents = norgatedata.database(databasename)
# Create empty dictionary to store tickers
init_ticker_dict = {}
# For each dictionary in the data extract
for dicto in databasecontents:
# Add the symbol and security name to the ticker dict as a
# key-value pair
key = dicto['symbol']
value = dicto['securityname']
if 'Stocks' not in value:
init_ticker_dict[key] = value
# Specify Norgate Continuous Futures database and extract data
databasename = 'Continuous Futures'
databasecontents = norgatedata.database(databasename)
# For each dictionary in the data extract
for dicto in databasecontents:
# Add the symbol and security name to the ticker dict as a
# key-value pair
key = dicto['symbol']
value = dicto['securityname']
if params['tickers_adjusted']:
# Only take the back-adjusted tickers
if '_CCB' in key:
init_ticker_dict[key] = value
else:
if key.startswith('&') and '_CCB' not in key:
init_ticker_dict[key] = value
# Convert the ticker dict keys into a list
tickers = list(init_ticker_dict.keys())
params['tickers'] = tickers
params['init_ticker_dict'] = init_ticker_dict
return params
@classmethod
def importnorgate(cls, params, tables, mappings):
"""
Return dictionary of price histories from Norgate Data.
Parameters
----------
params : Dict
Dictionary of key parameters.
tables : Dict
Dictionary of key tables.
mappings : Dict
Dictionary of sector mappings.
Returns
-------
tables : Dict
raw_ticker_dict : Dict
Dictionary of price history DataFrames, one for each ticker.
params : Dict
ticker_name_dict : Dict
Dictionary mapping ticker to long name for each ticker.
ticker_short_name_dict : Dict
Dictionary mapping ticker to short name for each ticker.
mappings : Dict
Dictionary of sector mappings
"""
# Create empty dictionaries
tables['raw_ticker_dict'] = {}
params['ticker_name_dict'] = {}
params['ticker_short_name_dict'] = {}
# Loop through list of tickers
for ticker in params['tickers'][:params['ticker_limit']]:
# Append character to each ticker to represent its type and create
# lowercase value
tick = params['ticker_types'][ticker[0]]+ticker[1:]
lowtick = tick.lower()
# Set data format and extract each DataFrame, storing as
# a key-value pair in ticker_dict
timeseriesformat = 'pandas-dataframe'
try:
data = norgatedata.price_timeseries(
ticker, start_date=params['start_date'],
end_date=params['end_date'],
format=timeseriesformat,)
data = data[['Open', 'High', 'Low', 'Close']]
tables['raw_ticker_dict'][lowtick] = data
# Extract the security name and store in ticker_name_dict
ticker_name = norgatedata.security_name(ticker)
params['ticker_name_dict'][lowtick] = ticker_name
# Set the proper length of DataFrame to help filter out
# missing data
params = MktUtils.window_set(frame=data, params=params)
except IndexError:
print('Error importing : ', ticker)
#try:
# Truncate the ticker name to improve charting legibility
# and store in ticker_short_name_dict
params['ticker_short_name_dict'][
lowtick] = ticker_name.partition(" Continuous")[0]
#except:
# params['ticker_short_name_dict'][lowtick] = ticker_name
# Create sector mappings DataFrame
mappings['sector_mappings_df'] = cls._commodity_sector_mappings(
params, mappings)
return params, tables, mappings
@staticmethod
def _commodity_sector_mappings(params, mappings):
"""
Create sector mappings DataFrame
Parameters
----------
params : Dict
Dictionary of key parameters.
mappings : Dict
Dictionary of sector mappings.
Returns
-------
sector_mappings_df : DataFrame
Sector mappings DataFrame.
"""
# Create empty dictionary
sectors = {}
# For each key-value pair in the default sector mappings dictionary
for key, value in mappings['commodity_sector_mappings'].items():
# If the first character in the key is in the list of keys from the
# ticker types dictionary
if key[0] in list(params['ticker_types'].keys()):
# Create a new key equal to the lower case original key with
# the first character replaced by the value in the ticker types
# dictionary
new_key = key.lower().replace(
key[0], params['ticker_types'][key[0]])
# create an entry in the sectors dictionary
sectors[new_key] = value
# Create a sector mappings DataFrame from the sectors dictionary using
# the default commodity sector levels list as the column headers
sector_mappings_df = pd.DataFrame.from_dict(
sectors,
orient='index',
columns=params['commodity_sector_levels'])
return sector_mappings_df
class YahooExtract():
"""
Functions to extract data from Yahoo Finance
"""
@staticmethod
def tickerextract(params, mappings):
"""
Extract list of S&P 500 Companies from Wikipedia.
Parameters
----------
params : Dict
Dictionary of key parameters.
mappings : Dict
Dictionary of sector mappings.
Returns
-------
params : Dict
tickers : List
List of stock tickers in the form of Reuters RIC codes
as strings.
ticker_name_dict : Dict
Dictionary mapping ticker to long name for each ticker.
mappings : Dict
Dictionary of sector mappings.
"""
# Extract data from the Wikipedia SPX page
url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
req = requests.get(url)
html_doc = req.text
spx_list = pd.read_html(html_doc)
# the first table on the page contains the stock data
spx_table = spx_list[0]
# create a list of the tickers from the 'Symbol' column
params['tickers'] = list(spx_table['Symbol'])
# create a dictionary mapping ticker to Security Name
params['ticker_name_dict'] = dict(
zip(spx_table['Symbol'], spx_table['Security']))
# Create a DataFrame from the default equity sectors dictionary
equity_sectors_df = pd.DataFrame.from_dict(
mappings['equity_sector_mappings'],
orient='index',
columns=['Sector',
'Industry Group',
'Industry'])
# Reset the index and rename as Sub-Industry column
equity_sectors_df = equity_sectors_df.reset_index()
equity_sectors_df = equity_sectors_df.rename(
columns={'index':'Sub-Industry'})
# Create a sector mappings DataFrame by joining the SPX table from
# Wikipedia to the Equity Sectors DataFrame
mappings['sector_mappings_df'] = spx_table.merge(
equity_sectors_df,
how='left',
left_on='GICS Sub-Industry',
right_on='Sub-Industry')
# Set the Index to the Ticker symbol
mappings['sector_mappings_df'] = mappings[
'sector_mappings_df'].set_index('Symbol')
# Keep only the columns related to the sector levels
mappings['sector_mappings_df'] = mappings['sector_mappings_df'][
['Sector', 'Industry Group', 'Industry', 'Sub-Industry',
'Security']]
return params, mappings
@classmethod
def importyahoo(cls, params, tables):
"""
Return dictionary of price histories from Yahoo Finance.
Parameters
----------
params : Dict
Dictionary of key parameters.
tables : Dict
Dictionary of key tables.
Returns
-------
tables : Dict
raw_ticker_dict : Dict
Dictionary of price history DataFrames, one for each
ticker.
params : Dict
exceptions : List
List of tickers that could not be returned.
"""
# Create empty dictionary and list
tables['raw_ticker_dict'] = {}
params['exceptions'] = []
# Loop through the tickers
for sym in params['tickers'][:params['ticker_limit']]:
# Attempt to return the data for given ticker
try:
tables['raw_ticker_dict'][sym], params = cls._returndata(
ticker=sym, params=params)
# If error, try replacing '.' with '-' in ticker
except KeyError:
try:
sym_alt = sym.replace('.','-')
tables['raw_ticker_dict'][sym], params = cls._returndata(
ticker=sym_alt, params=params)
# If error, add to list of exceptions and move to next
# ticker
except KeyError:
print("Error with "+sym)
params['exceptions'].append(sym)
continue
return params, tables
@staticmethod
def _returndata(ticker, params):
"""
Create DataFrame of historic prices for specified ticker.
Parameters
----------
ticker : Int
Stock to be returned in the form of Reuters RIC code as a
string.
params : Dict
start_date : Str
Start Date represented as a string in the
format 'YYYY-MM-DD'.
end_date : Str
End Date represented as a string in the
format 'YYYY-MM-DD'.
Returns
-------
frame : DataFrame
DataFrame of historic prices for given ticker.
params : Dict
Dictionary of key parameters.
"""
# Initialize a YahooFinancials object with the supplied ticker
yahoo_financials = YahooFinancials(ticker)
# Set frequency to daily
freq='daily'
# Extract historic prices
frame = yahoo_financials.get_historical_price_data(
params['start_date'], params['end_date'], freq)
# Reformat columns
frame = pd.DataFrame(frame[ticker]['prices']).drop(['date'], axis=1) \
.rename(columns={'formatted_date':'Date',
'open': 'Open',
'high': 'High',
'low': 'Low',
'close': 'Close',
'volume': 'Volume'}) \
.loc[:, ['Date','Open','High','Low','Close','Volume']] \
.set_index('Date')
# Set Index to Datetime
frame.index = pd.to_datetime(frame.index)
# Set the proper length of DataFrame to help filter out missing data
params = MktUtils.window_set(frame=frame, params=params)
return frame, params
class MktUtils():
"""
Various market data cleaning utilities
"""
@staticmethod
def ticker_clean(params, tables):
"""
Remove tickers with incomplete history
Parameters
----------
params : Dict
Dictionary of key parameters.
tables : Dict
Dictionary of key tables.
Returns
-------
tables : Dict
Dictionary of key tables.
"""
# Create empty list of tickers to be removed
params['drop_list'] = []
# Loop through each DataFrame in raw ticker dict
for ticker, frame in tables['raw_ticker_dict'].items():
# If the DataFrame has less than 90% full history or has too many
# repeated values
if (len(frame) < (params['window'] * 0.9)
or frame['Close'].nunique() < (params['lookback'] / 15)):
# Add ticker to the drop list
params['drop_list'].append(ticker)
# For each ticker in the drop list
for ticker in params['drop_list']:
# Delete the ticker from the dictionary
del tables['raw_ticker_dict'][ticker]
return tables
@staticmethod
def window_set(frame, params):
"""
Set the correct length of the selected data
Parameters
----------
frame : DataFrame
The historical prices.
params : Dict
start_date : Str
The chosen start date.
Returns
-------
params : Dict
Dictionary of key parameters.
"""
# If the history window has not yet been set
if params['window'] is None:
# If the difference in start dates between the chosen start date
# and the first value in the index is less than 5 days
if (( | pd.to_datetime(params['start_date']) | pandas.to_datetime |
from RLC.capture_chess.agent import Agent
from RLC.capture_chess.environment import Board
import numpy as np
from chess.pgn import Game
import pandas as pd
class Q_learning(object):
def __init__(self, agent, env, memsize=1000):
"""
Reinforce object to learn capture chess
Args:
agent: The agent playing the chess game as white
env: The environment including the python-chess board
memsize: maximum amount of games to retain in-memory
"""
self.agent = agent
self.env = env
self.memory = []
self.memsize = memsize
self.reward_trace = []
self.memory = []
self.sampling_probs = []
def learn(self, iters=100, c=10):
"""
Run the Q-learning algorithm. Play greedy on the final iter
Args:
iters: int
amount of games to train
c: int
update the network every c games
Returns: pgn (str)
pgn string describing final game
"""
for k in range(iters):
if k % c == 0:
print("iter", k)
self.agent.fix_model()
greedy = True if k == iters - 1 else False
self.env.reset()
self.play_game(k, greedy=greedy)
pgn = Game.from_board(self.env.board)
reward_smooth = pd.DataFrame(self.reward_trace)
reward_smooth.rolling(window=10, min_periods=0).mean().plot()
return pgn
def play_game(self, k, greedy=False, maxiter=25):
"""
Play a game of capture chess
Args:
k: int
game count, determines epsilon (exploration rate)
greedy: Boolean
if greedy, no exploration is done
maxiter: int
Maximum amount of steps per game
Returns:
"""
episode_end = False
turncount = 0
# Here we determine the exploration rate. k is divided by 250 to slow down the exploration rate decay.
eps = max(0.05, 1 / (1 + (k / 250))) if not greedy else 0.
# Play a game of chess
while not episode_end:
state = self.env.layer_board
explore = np.random.uniform(0, 1) < eps # determine whether to explore
if explore:
move = self.env.get_random_action()
move_from = move.from_square
move_to = move.to_square
else:
action_values = self.agent.get_action_values(np.expand_dims(state, axis=0))
action_values = np.reshape(np.squeeze(action_values), (64, 64))
action_space = self.env.project_legal_moves() # The environment determines which moves are legal
action_values = np.multiply(action_values, action_space)
move_from = np.argmax(action_values, axis=None) // 64
move_to = np.argmax(action_values, axis=None) % 64
moves = [x for x in self.env.board.generate_legal_moves() if \
x.from_square == move_from and x.to_square == move_to]
if len(moves) == 0: # If all legal moves have negative action value, explore.
move = self.env.get_random_action()
move_from = move.from_square
move_to = move.to_square
else:
move = np.random.choice(moves) # If there are multiple max-moves, pick a random one.
episode_end, reward = self.env.step(move)
new_state = self.env.layer_board
if len(self.memory) > self.memsize:
self.memory.pop(0)
self.sampling_probs.pop(0)
turncount += 1
if turncount > maxiter:
episode_end = True
reward = 0
if episode_end:
new_state = new_state * 0
self.memory.append([state, (move_from, move_to), reward, new_state])
self.sampling_probs.append(1)
self.reward_trace.append(reward)
self.update_agent(turncount)
return self.env.board
def sample_memory(self, turncount):
"""
Get a sample from memory for experience replay
Args:
turncount: int
turncount limits the size of the minibatch
Returns: tuple
a mini-batch of experiences (list)
indices of chosen experiences
"""
minibatch = []
memory = self.memory[:-turncount]
probs = self.sampling_probs[:-turncount]
sample_probs = [probs[n] / np.sum(probs) for n in range(len(probs))]
indices = np.random.choice(range(len(memory)), min(1028, len(memory)), replace=True, p=sample_probs)
for i in indices:
minibatch.append(memory[i])
return minibatch, indices
def update_agent(self, turncount):
"""
Update the agent using experience replay. Set the sampling probs with the td error
Args:
turncount: int
Amount of turns played. Only sample the memory of there are sufficient samples
Returns:
"""
if turncount < len(self.memory):
minibatch, indices = self.sample_memory(turncount)
td_errors = self.agent.network_update(minibatch)
for n, i in enumerate(indices):
self.sampling_probs[i] = np.abs(td_errors[n])
class Reinforce(object):
def __init__(self, agent, env):
"""
Reinforce object to learn capture chess
Args:
agent: The agent playing the chess game as white
env: The environment including the python-chess board
memsize: maximum amount of games to retain in-memory
"""
self.agent = agent
self.env = env
self.reward_trace = []
self.action_value_mem = []
def learn(self, iters=100, c=10):
"""
Run the Q-learning algorithm. Play greedy on the final iter
Args:
iters: int
amount of games to train
c: int
update the network every c games
Returns: pgn (str)
pgn string describing final game
"""
for k in range(iters):
self.env.reset()
states, actions, rewards, action_spaces = self.play_game(k)
self.reinforce_agent(states, actions, rewards, action_spaces)
pgn = Game.from_board(self.env.board)
reward_smooth = pd.DataFrame(self.reward_trace)
reward_smooth.rolling(window=10, min_periods=0).mean().plot()
return pgn
def play_game(self, k, maxiter=25):
"""
Play a game of capture chess
Args:
k: int
game count, determines epsilon (exploration rate)
greedy: Boolean
if greedy, no exploration is done
maxiter: int
Maximum amount of steps per game
Returns:
"""
episode_end = False
turncount = 0
states = []
actions = []
rewards = []
action_spaces = []
# Play a game of chess
while not episode_end:
state = self.env.layer_board
action_space = self.env.project_legal_moves() # The environment determines which moves are legal
action_probs = self.agent.model.predict([np.expand_dims(state, axis=0),
np.zeros((1, 1)),
action_space.reshape(1, 4096)])
self.action_value_mem.append(action_probs)
action_probs = action_probs / action_probs.sum()
move = np.random.choice(range(4096), p=np.squeeze(action_probs))
move_from = move // 64
move_to = move % 64
moves = [x for x in self.env.board.generate_legal_moves() if \
x.from_square == move_from and x.to_square == move_to]
assert len(moves) > 0 # should not be possible
if len(moves) > 1:
move = np.random.choice(moves) # If there are multiple max-moves, pick a random one.
elif len(moves) == 1:
move = moves[0]
episode_end, reward = self.env.step(move)
new_state = self.env.layer_board
turncount += 1
if turncount > maxiter:
episode_end = True
reward = 0
if episode_end:
new_state = new_state * 0
states.append(state)
actions.append((move_from, move_to))
rewards.append(reward)
action_spaces.append(action_space.reshape(1, 4096))
self.reward_trace.append(np.sum(rewards))
return states, actions, rewards, action_spaces
def reinforce_agent(self, states, actions, rewards, action_spaces):
"""
Update the agent using experience replay. Set the sampling probs with the td error
Args:
turncount: int
Amount of turns played. Only sample the memory of there are sufficient samples
Returns:
"""
self.agent.policy_gradient_update(states, actions, rewards, action_spaces)
class ActorCritic(object):
def __init__(self, actor, critic, env):
"""
ActorCritic object to learn capture chess
Args:
actor: Policy Gradient Agent
critic: Q-learning Agent
env: The environment including the python-chess board
memsize: maximum amount of games to retain in-memory
"""
self.actor = actor
self.critic = critic
self.env = env
self.reward_trace = []
self.action_value_mem = []
self.memory = []
self.sampling_probs = []
def learn(self, iters=100, c=10):
"""
Run the Q-learning algorithm. Play greedy on the final iter
Args:
iters: int
amount of games to train
c: int
update the network every c games
Returns: pgn (str)
pgn string describing final game
"""
for k in range(iters):
if k % c == 0:
self.critic.fix_model()
self.env.reset()
end_state = self.play_game(k)
pgn = Game.from_board(self.env.board)
reward_smooth = | pd.DataFrame(self.reward_trace) | pandas.DataFrame |
import pandas as pd
from argparse import ArgumentParser
input_ = ArgumentParser()
input_.add_argument("-f", dest = "fimo_file", required = True)
input_.add_argument("-p", dest = "peak_file", required = True)
args = input_.parse_args()
final_df = pd.DataFrame(columns=["total_fimo_hits","total_peaks","peaks_hit","peaks_not_hit"])
chromosomes = ["chr1","chr2","chr3","chr4","chr5"]
total_fimo_hits = 0
total_peaks = 0
peaks_hit = 0
peaks_not_hit = 0
for chromosome_ in chromosomes:
df_fimo = pd.read_csv(args.fimo_file, sep="\t", comment="#")
df_fimo["sequence_name"] = df_fimo["sequence_name"].replace({"1":"chr1", "2":"chr2", "3":"chr3", "4":"chr4", "5":"chr5"})
df_fimo = df_fimo.query("sequence_name == @chromosome_")
df_peaks = pd.read_csv(args.peak_file, sep="\t", header=None)
df_peaks.columns =["chromosome", "start", "end", "name", "score", "strand", "signalValue", "pValue", "qValue", "peak"]
df_peaks = df_peaks.query("chromosome == @chromosome_")
half_length_of_motif = int(len(df_fimo.iloc[0]["matched_sequence"])/2)
all_fimo_indices = df_fimo["start"].values + half_length_of_motif
def check_hit(row, indices = pd.Series(all_fimo_indices)):
return indices.between(row["start"],row["end"]).any()
df_peaks["hit"] = df_peaks.apply(check_hit, axis = 1)
total_fimo_hits += len(df_fimo)
total_peaks += len(df_peaks)
peaks_hit += len(df_peaks.query("hit == True"))
peaks_not_hit += len(df_peaks.query("hit == False"))
final_df = | pd.DataFrame([[total_fimo_hits,total_peaks,peaks_hit,peaks_not_hit]], columns=final_df.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
# Import python modules
import os, sys
# data handling libraries
import pandas as pd
import numpy as np
import pickle
import json
import dask
from multiprocessing import Pool
# graphical control libraries
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
# shape and layer libraries
import fiona
from shapely.geometry import MultiPolygon, shape, point, box
from descartes import PolygonPatch
from matplotlib.collections import PatchCollection
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import geopandas as gpd
# data wrangling libraries
import ftplib, urllib, wget, bz2
from bs4 import BeautifulSoup as bs
class ogh_meta:
"""
The json file that describes the Gridded climate data products
"""
def __init__(self):
self.__meta_data = dict(json.load(open('ogh_meta.json','rb')))
# key-value retrieval
def __getitem__(self, key):
return(self.__meta_data[key])
# key list
def keys(self):
return(self.__meta_data.keys())
# value list
def values(self):
return(self.__meta_data.values())
# print('Version '+datetime.fromtimestamp(os.path.getmtime('ogh.py')).strftime('%Y-%m-%d %H:%M:%S')+' jp')
def saveDictOfDf(outfilename, dictionaryObject):
# write a dictionary of dataframes to a json file using pickle
with open(outfilename, 'wb') as f:
pickle.dump(dictionaryObject, f)
f.close()
def readDictOfDf(infilename):
# read a dictionary of dataframes from a json file using pickle
with open(infilename, 'rb') as f:
dictionaryObject = pickle.load(f)
f.close()
return(dictionaryObject)
def reprojShapefile(sourcepath, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None):
"""
sourcepath: (dir) the path to the .shp file
newprojdictionary: (dict) the new projection definition in the form of a dictionary (default provided)
outpath: (dir) the output path for the new shapefile
"""
# if outpath is none, treat the reprojection as a file replacement
if isinstance(outpath, type(None)):
outpath = sourcepath
shpfile = gpd.GeoDataFrame.from_file(sourcepath)
shpfile = shpfile.to_crs(newprojdictionary)
shpfile.to_file(outpath)
def getFullShape(shapefile):
"""
Generate a MultiPolygon to represent each shape/polygon within the shapefile
shapefile: (dir) a path to the ESRI .shp shapefile
"""
shp = fiona.open(shapefile)
mp = [shape(pol['geometry']) for pol in shp]
mp = MultiPolygon(mp)
shp.close()
return(mp)
def getShapeBbox(polygon):
"""
Generate a geometric box to represent the bounding box for the polygon, shapefile connection, or MultiPolygon
polygon: (geometry) a geometric polygon, MultiPolygon, or shapefile connection
"""
# identify the cardinal bounds
minx, miny, maxx, maxy = polygon.bounds
bbox = box(minx, miny, maxx, maxy, ccw=True)
return(bbox)
def readShapefileTable(shapefile):
"""
read in the datatable captured within the shapefile properties
shapefile: (dir) a path to the ESRI .shp shapefile
"""
#cent_df = gpd.read_file(shapefile)
shp = fiona.open(shapefile)
centroid = [eachpol['properties'] for eachpol in shp]
cent_df = pd.DataFrame.from_dict(centroid, orient='columns')
shp.close()
return(cent_df)
def filterPointsinShape(shape, points_lat, points_lon, points_elev=None, buffer_distance=0.06, buffer_resolution=16,
labels=['LAT', 'LONG_', 'ELEV']):
"""
filter for datafiles that can be used
shape: (geometry) a geometric polygon or MultiPolygon
points_lat: (series) a series of latitude points in WGS84 projection
points_lon: (series) a series of longitude points in WGS84 projection
points_elev: (series) a series of elevation points in meters; optional - default is None
buffer_distance: (float64) a numerical multiplier to increase the geodetic boundary area
buffer_resolution: (float64) the increments between geodetic longlat degrees
labels: (list) a list of preferred labels for latitude, longitude, and elevation
"""
# add buffer region
region = shape.buffer(buffer_distance, resolution=buffer_resolution)
# construct points_elev if null
if isinstance(points_elev, type(None)):
points_elev=np.repeat(np.nan, len(points_lon))
# Intersection each coordinate with the region
limited_list = []
for lon, lat, elev in zip(points_lon, points_lat, points_elev):
gpoint = point.Point(lon, lat)
if gpoint.intersects(region):
limited_list.append([lat, lon, elev])
maptable = pd.DataFrame.from_records(limited_list, columns=labels)
## dask approach ##
#intersection=[]
#for lon, lat, elev in zip(points_lon, points_lat, points_elev):
# gpoint = point.Point(lon, lat)
# intersection.append(dask.delayed(gpoint.intersects(region)))
# limited_list.append([intersection, lat, lon, elev])
# convert to dataframe
#maptable = pd.DataFrame({labels[0]:points_lat, labels[1]:points_lon, labels[2]:points_elev}
# .loc[dask.compute(intersection)[0],:]
# .reset_index(drop=True)
return(maptable)
def scrapeurl(url, startswith=None, hasKeyword=None):
"""
scrape the gridded datafiles from a url of interest
url: (str) the web folder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
hasKeyword: (str) keywords represented in a webpage element; default is None
"""
# grab the html of the url, and prettify the html structure
page = urllib2.urlopen(url).read()
page_soup = bs(page, 'lxml')
page_soup.prettify()
# loop through and filter the hyperlinked lines
if pd.isnull(startswith):
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if hasKeyword in anchor['href']]
else:
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if anchor['href'].startswith(startswith)]
# convert to dataframe then separate the lon and lat as float coordinate values
temp = pd.DataFrame(temp, columns = ['filenames'])
return(temp)
def treatgeoself(shapefile, NAmer, folder_path=os.getcwd(), outfilename='mappingfile.csv', buffer_distance=0.06):
"""
TreatGeoSelf to some [data] lovin'!
shapefile: (dir) the path to an ESRI shapefile for the region of interest
Namer: (dir) the path to an ESRI shapefile, which has each 1/16th coordinate and elevation information from a DEM
folder_path: (dir) the destination folder path for the mappingfile output; default is the current working directory
outfilename: (str) the name of the output file; default name is 'mappingfile.csv'
buffer_distance: (float64) the multiplier to be applied for increasing the geodetic boundary area; default is 0.06
"""
# conform projections to longlat values in WGS84
reprojShapefile(shapefile, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None)
# read shapefile into a multipolygon shape-object
shape_mp = getFullShape(shapefile)
# read in the North American continental DEM points for the station elevations
NAmer_datapoints = readShapefileTable(NAmer).rename(columns={'Lat':'LAT','Long':'LONG_','Elev':'ELEV'})
# generate maptable
maptable = filterPointsinShape(shape_mp,
points_lat=NAmer_datapoints.LAT,
points_lon=NAmer_datapoints.LONG_,
points_elev=NAmer_datapoints.ELEV,
buffer_distance=buffer_distance, buffer_resolution=16, labels=['LAT', 'LONG_', 'ELEV'])
maptable.reset_index(inplace=True)
maptable = maptable.rename(columns={"index":"FID"})
print(maptable.shape)
print(maptable.tail())
# print the mappingfile
mappingfile=os.path.join(folder_path, outfilename)
maptable.to_csv(mappingfile, sep=',', header=True, index=False)
return(mappingfile)
def mapContentFolder(resid):
"""
map the content folder within HydroShare
resid: (str) a string hash that represents the hydroshare resource that has been migrated
"""
path = os.path.join('/home/jovyan/work/notebooks/data', str(resid), str(resid), 'data/contents')
return(path)
# ### CIG (DHSVM)-oriented functions
def compile_bc_Livneh2013_locations(maptable):
"""
compile a list of file URLs for bias corrected Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/Livneh/bcLivneh_WWA_2013/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_Livneh2013_locations(maptable):
"""
compile a list of file URLs for Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://www.cses.washington.edu/rocinante/Livneh/Livneh_WWA_2013/forcs_dhsvm/',basename]
locations.append(''.join(url))
return(locations)
### VIC-oriented functions
def compile_VICASCII_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 VIC.ASCII outputs
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Fluxes_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/VIC.ASCII/latitude.",str(row['LAT']),'/',loci,'.bz2']
locations.append(''.join(url))
return(locations)
def compile_VICASCII_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 VIC.ASCII outputs for the USA
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2/',
startswith='fluxes')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['VIC_fluxes_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
### Climate (Meteorological observations)-oriented functions
def canadabox_bc():
"""
Establish the Canadian (north of the US bounding boxes) Columbia river basin bounding box
"""
# left, bottom, right top
return(box(-138.0, 49.0, -114.0, 53.0))
def scrape_domain(domain, subdomain, startswith=None):
"""
scrape the gridded datafiles from a url of interest
domain: (str) the web folder path
subdomain: (str) the subfolder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
"""
# connect to domain
ftp = ftplib.FTP(domain)
ftp.login()
ftp.cwd(subdomain)
# scrape for data directories
tmp = [dirname for dirname in ftp.nlst() if dirname.startswith(startswith)]
geodf = pd.DataFrame(tmp, columns=['dirname'])
# conform to bounding box format
tmp = geodf['dirname'].apply(lambda x: x.split('.')[1:])
tmp = tmp.apply(lambda x: list(map(float,x)) if len(x)>2 else x)
# assemble the boxes
geodf['bbox']=tmp.apply(lambda x: box(x[0]*-1, x[2]-1, x[1]*-1, x[3]) if len(x)>2 else canadabox_bc())
return(geodf)
def mapToBlock(df_points, df_regions):
for index, eachblock in df_regions.iterrows():
for ind, row in df_points.iterrows():
if point.Point(row['LONG_'], row['LAT']).intersects(eachblock['bbox']):
df_points.loc[ind, 'blocks'] = str(eachblock['dirname'])
return(df_points)
def compile_dailyMET_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2/',
startswith='data')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
def compile_dailyMET_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/ascii/daily/latitude.", str(row['LAT']),"/",loci,".bz2"]
locations.append(''.join(url))
return(locations)
# ### WRF-oriented functions
def compile_wrfnnrp_raw_Salathe2014_locations(maptable):
"""
compile a list of file URLs for Salathe et al., 2014 raw WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/raw/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_wrfnnrp_bc_Salathe2014_locations(maptable):
"""
compile a list of file URLs for the Salathe et al., 2014 bias corrected WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/bc/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
# ## Data file migration functions
def ensure_dir(f):
"""
check if the destination folder directory exists; if not, create it and set it as the working directory
f: (dir) the directory to create and/or set as working directory
"""
if not os.path.exists(f):
os.makedirs(f)
os.chdir(f)
def wget_download(listofinterest):
"""
Download files from an http domain
listofinterest: (list) a list of urls to request
"""
# check and download each location point, if it doesn't already exist in the download directory
for fileurl in listofinterest:
basename = os.path.basename(fileurl)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
# Download the files to the subdirectory
def wget_download_one(fileurl):
"""
Download a file from an http domain
fileurl: (url) a url to request
"""
# check and download each location point, if it doesn't already exist in the download directory
basename=os.path.basename(fileurl)
# if it exists, remove for new download (overwrite mode)
if os.path.isfile(basename):
os.remove(basename)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
def wget_download_p(listofinterest, nworkers=20):
"""
Download files from an http domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 10
"""
pool = Pool(int(nworkers))
pool.map(wget_download_one, listofinterest)
pool.close()
pool.terminate()
def ftp_download(listofinterest):
"""
Download and decompress files from an ftp domain
listofinterest: (list) a list of urls to request
"""
for loci in listofinterest:
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_one(loci):
"""
Download and decompress a file from an ftp domain
loci: (url) a url to request
"""
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_p(listofinterest, nworkers=5):
"""
Download and decompress files from an ftp domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 5
"""
pool = Pool(int(nworkers))
pool.map(ftp_download_one, listofinterest)
pool.close()
pool.terminate()
def decompbz2(filename):
"""
Extract a file from a bz2 file of the same name, then remove the bz2 file
filename: (dir) the file path for a bz2 compressed file
"""
with open(filename.split(".bz2",1)[0], 'wb') as new_file, open(filename, 'rb') as zipfile:
decompressor = bz2.BZ2Decompressor()
for data in iter(lambda : zipfile.read(100 * 1024), b''):
new_file.write(decompressor.decompress(data))
os.remove(filename)
zipfile.close()
new_file.close()
print(os.path.splitext(filename)[0] + ' unzipped')
def catalogfiles(folderpath):
"""
make a catalog of the gridded files within a folderpath
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
"""
# read in downloaded files
temp = [eachfile for eachfile in os.listdir(folderpath) if not os.path.isdir(eachfile)]
if len(temp)==0:
# no files were available; setting default catalog output structure
catalog = pd.DataFrame([], columns=['filenames','LAT','LONG_'])
else:
# create the catalog dataframe and extract the filename components
catalog = pd.DataFrame(temp, columns=['filenames'])
catalog[['LAT','LONG_']] = catalog['filenames'].apply(lambda x: pd.Series(str(x).rsplit('_',2))[1:3]).astype(float)
# convert the filenames column to a filepath
catalog['filenames'] = catalog['filenames'].apply(lambda x: os.path.join(folderpath, x))
return(catalog)
def addCatalogToMap(outfilepath, maptable, folderpath, catalog_label):
"""
Update the mappingfile with a new column, a vector of filepaths for the downloaded files
outfilepath: (dir) the path for the output file
maptable: (dataframe) a dataframe containing the FID, LAT, LONG_, and ELEV information
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# assert catalog_label as a string-object
catalog_label = str(catalog_label)
# catalog the folder directory
catalog = catalogfiles(folderpath).rename(columns={'filenames':catalog_label})
# drop existing column
if catalog_label in maptable.columns:
maptable = maptable.drop(labels=catalog_label, axis=1)
# update with a vector for the catalog of files
maptable = maptable.merge(catalog, on=['LAT','LONG_'], how='left')
# remove blocks, if they were needed
if 'blocks' in maptable.columns:
maptable = maptable.drop(labels=['blocks'], axis=1)
# write the updated mappingfile
maptable.to_csv(outfilepath, header=True, index=False)
# Wrapper scripts
def getDailyMET_livneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/raw', catalog_label='dailymet_livneh2013'):
"""
Get the Livneh el al., 2013 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate DailyMET livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2013_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_livneh2015(homedir, mappingfile, subdir='livneh2015/Daily_MET_1950_2013/raw', catalog_label='dailymet_livneh2015'):
"""
Get the Livneh el al., 2015 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate Daily MET livneh 2015 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2015_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_bcLivneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/bc', catalog_label='dailymet_bclivneh2013'):
"""
Get the Livneh el al., 2013 bias corrected Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate baseline_corrected livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_bc_Livneh2013_locations(maptable)
# download the files
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyVIC_livneh2013(homedir, mappingfile, subdir='livneh2013/Daily_VIC_1915_2011', catalog_label='dailyvic_livneh2013'):
"""
Get the Livneh el al., 2013 Daily VIC files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# FIRST RUN
# check and generate VIC_ASCII Flux model livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points for USA
locations = compile_VICASCII_Livneh2013_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyVIC_livneh2015(homedir, mappingfile, subdir='livneh2015/Daily_VIC_1950_2013', catalog_label='dailyvic_livneh2015'):
"""
Get the Livneh el al., 2015 Daily VIC files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate Daily VIC.ASCII Flux model livneh 2015 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_VICASCII_Livneh2015_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyWRF_salathe2014(homedir, mappingfile, subdir='salathe2014/WWA_1950_2010/raw', catalog_label='dailywrf_salathe2014'):
"""
Get the Salathe el al., 2014 raw Daily WRF files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate the Daily Meteorology raw WRF Salathe 2014 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# read in the longitude and latitude points from the reference mapping file
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_wrfnnrp_raw_Salathe2014_locations(maptable)
# download the data
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyWRF_bcsalathe2014(homedir, mappingfile, subdir='salathe2014/WWA_1950_2010/bc', catalog_label='dailywrf_bcsalathe2014'):
"""
Get the Salathe el al., 2014 bias corrected Daily WRF files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate the Daily Meteorology bias corrected WRF Salathe 2014 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# read in the longitude and latitude points from the reference mapping file
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_wrfnnrp_bc_Salathe2014_locations(maptable)
# download the data
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
# # Data Processing libraries
def filesWithPath(folderpath):
"""
Create a list of filepaths for the files
folderpath: (dir) the folder of interest
"""
files =[os.path.join(folderpath, eachfile) for eachfile in os.listdir(folderpath)
if not eachfile.startswith('.') and not os.path.isdir(eachfile)] # exclude hidden files
return(files)
def compareonvar(map_df, colvar='all'):
"""
subsetting a dataframe based on some columns of interest
map_df: (dataframe) the dataframe of the mappingfile table
colvar: (str or list) the column(s) to use for subsetting; 'None' will return an outerjoin, 'all' will return an innerjoin
"""
# apply row-wise inclusion based on a subset of columns
if isinstance(colvar, type(None)):
return(map_df)
if colvar is 'all':
# compare on all columns except the station info
return(map_df.dropna())
else:
# compare on only the listed columns
return(map_df.dropna(subset=colvar))
def mappingfileToDF(mappingfile, colvar='all'):
"""
read in a dataframe and subset based on columns of interest
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
colvar: (str or list) the column(s) to use for subsetting; 'None' will return an outerjoin, 'all' will return an innerjoin
"""
# Read in the mappingfile as a data frame
map_df = pd.read_csv(mappingfile)
# select rows (datafiles) based on the colvar(s) chosen, default is
map_df = compareonvar(map_df=map_df, colvar=colvar)
# compile summaries
print(map_df.head())
print('Number of gridded data files:'+ str(len(map_df)))
print('Minimum elevation: ' + str(np.min(map_df.ELEV))+ 'm')
print('Mean elevation: '+ str(np.mean(map_df.ELEV))+ 'm')
print('Maximum elevation: '+ str(np.max(map_df.ELEV))+ 'm')
return(map_df, len(map_df))
def read_in_all_files(map_df, dataset, metadata, file_start_date, file_end_date, file_time_step, file_colnames, file_delimiter, subset_start_date, subset_end_date):
"""
Read in files based on dataset label
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
dataset: (str) the name of the dataset catalogged into map_df
metadata (str) the dictionary that contains the metadata explanations; default is None
file_colnames: (list) the list of shorthand variables; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
"""
# extract metadata if the information are not provided
if pd.notnull(metadata):
if isinstance(file_start_date, type(None)):
file_start_date = metadata[dataset]['start_date']
if isinstance(file_end_date, type(None)):
file_end_date = metadata[dataset]['end_date']
if isinstance(file_time_step, type(None)):
file_time_step = metadata[dataset]['temporal_resolution']
if isinstance(file_colnames, type(None)):
file_colnames = metadata[dataset]['variable_list']
if isinstance(file_delimiter, type(None)):
file_delimiter = metadata[dataset]['delimiter']
#initialize dictionary and time sequence
df_dict=dict()
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step) # daily
# import data for all climate stations
for ind, row in map_df.iterrows():
tmp = pd.read_table(row[dataset], header=None, delimiter=file_delimiter, names=file_colnames)
tmp.set_index(met_daily_dates, inplace=True)
# subset to the date range of interest (default is file date range)
tmp = tmp.iloc[(met_daily_dates>=subset_start_date) & (met_daily_dates<=subset_end_date),:]
# set row indices
df_dict[tuple(row[['FID','LAT','LONG_']].tolist())] = tmp
return(df_dict)
def read_files_to_vardf(map_df, df_dict, gridclimname, dataset, metadata,
file_start_date, file_end_date, file_delimiter, file_time_step, file_colnames,
subset_start_date, subset_end_date, min_elev, max_elev):
"""
# reads in the files to generate variables dataframes
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
df_dict: (dict) an existing dictionary where new computations will be stored
gridclimname: (str) the suffix for the dataset to be named; if None is provided, default to the dataset name
dataset: (str) the name of the dataset catalogged into map_df
metadata: (str) the dictionary that contains the metadata explanations; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_delimiter: (str) a file parsing character to be used for file reading
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
"""
# start time
starttime = pd.datetime.now()
# date range from ogh_meta file
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step)
met_daily_subdates=pd.date_range(subset_start_date, subset_end_date, freq=file_time_step)
# omit null entries or missing data file
map_df = map_df.loc[pd.notnull(map_df[dataset]),:]
print('Number of data files within elevation range ('+str(min_elev)+':'+str(max_elev)+'): '+str(len(map_df)))
# iterate through each data file
for eachvar in metadata[dataset]['variable_list']:
# identify the variable column index
usecols = [metadata[dataset]['variable_list'].index(eachvar)]
# initiate df as a list
df_list=[]
# loop through each file
for ind, row in map_df.iterrows():
# consider rewriting the params to just select one column by index at a time
var_series = dask.delayed(pd.read_table)(filepath_or_buffer=row[dataset],
delimiter=file_delimiter,header=None,usecols=usecols,
names=[tuple(row[['FID','LAT','LONG_']])])
# append the series into the list of series
df_list.append(var_series)
# concatenate list of series (axis=1 is column-wise) into a dataframe
df1 = dask.delayed(pd.concat)(df_list, axis=1)
# set and subset date_range index
df2 = df1.set_index(met_daily_dates, inplace=False).loc[met_daily_subdates]
# end of variable table
print(eachvar+ ' dataframe reading to start: ' + str(pd.datetime.now()-starttime))
# assign dataframe to dictionary object
df_dict['_'.join([eachvar, gridclimname])] = dask.compute(df2)[0]
print(eachvar+ ' dataframe complete:' + str(pd.datetime.now()-starttime))
return(df_dict)
def read_daily_streamflow(file_name, drainage_area_m2, file_colnames=None, delimiter='\t', header='infer'):
# read in a daily streamflow data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, delimiter=delimiter, header=header)
# set columns, if header=None
if file_colnames is not None:
daily_data.columns=file_colnames
else:
file_colnames=list(daily_data.columns)
# calculate cfs to cms conversion, or vice versa
if 'flow_cfs' in daily_data.columns:
flow_cfs=daily_data['flow_cfs']
flow_cms=flow_cfs/(3.28084**3)
flow_mmday=flow_cms*1000*3600*24/drainage_area_m2
elif 'flow_cms' in daily_data.columns:
flow_cms=daily_data['flow_cms']
flow_cfs=flow_cms*(3.28084**3)
flow_mmday=flow_cms*1000*3600*24/drainage_area_m2
# determine the datetime
date_index=[file_colnames.index(each) for each in ['year','month','day']]
row_dates=pd.to_datetime(daily_data[date_index])
# generate the daily_flow and set the datetime as row indices
daily_flow=pd.concat([flow_cfs, flow_cms, flow_mmday],axis=1)
daily_flow.set_index(row_dates, inplace=True)
daily_flow.columns=['flow_cfs', 'flow_cms', 'flow_mmday']
return(daily_flow)
def read_daily_precip(file_name, file_colnames=None, header='infer', delimiter='\s+'):
# read in a daily precipitation data set
# if file_colnames are supplied, use header=None
if ps.notnull(file_colnames):
header=None
# read in the data
daily_data=pd.read_table(file_name, delimiter=delimiter, header=header)
# set columns, if header=None
if pd.notnull(file_colnames):
daily_data.columns=file_colnames
else:
file_colnames=list(daily_data.columns)
# calculate cfs to cms conversion, or vice versa
if 'precip_m' in daily_data.columns:
precip_m=daily_data['precip_m']
precip_mm=precip_m*1000
# determine the datetime
date_index=[file_colnames.index(each) for each in ['year','month','day']]
row_dates=pd.to_datetime(daily_data[date_index])
# generate the daily_flow and set the datetime as row indices
daily_precip=pd.concat([precip_m, precip_mm],axis=1)
daily_precip.set_index(row_dates, inplace=True)
daily_precip.columns=['precip_m', 'precip_mm']
return(daily_precip)
def read_daily_snotel(file_name, file_colnames=None, usecols=None, delimiter=',', header='infer'):
# read in a daily SNOTEL observation data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, usecols=usecols, header=header, delimiter=delimiter)
# reset the colnames
daily_data.columns=['Date', 'Tmax_C', 'Tmin_C', 'Tavg_C', 'Precip_mm']
# transform the data
daily_data['Tmax_C']=(daily_data['Tmax_C'] -32)/1.8
daily_data['Tmin_C']=(daily_data['Tmin_C'] -32)/1.8
daily_data['Tavg_C']=(daily_data['Tavg_C'] -32)/1.8
daily_data['Precip_mm']=daily_data['Precip_mm'] *25.4
# determine the datetime
row_dates=pd.to_datetime(daily_data.Date)
# generate the daily_flow and set the datetime as row indices
daily_snotel=daily_data[['Tmax_C', 'Tmin_C', 'Tavg_C', 'Precip_mm']]
daily_snotel.set_index(row_dates, inplace=True)
return(daily_snotel)
def read_daily_coop(file_name, file_colnames=None, usecols=None, delimiter=',', header='infer'):
# read in a daily COOP observation data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, usecols=usecols, header=header, delimiter=delimiter,
date_parser=lambda x: pd.datetime.strptime(x, '%Y%m%d'),
parse_dates=[0],
na_values=-9999)
# reset the colnames
daily_data.columns=['Date', 'Precip_mm','Tmax_C', 'Tmin_C', 'Tavg_C']
# transform the data
daily_data['Tmax_C']=(daily_data['Tmax_C'] -32)/1.8
daily_data['Tmin_C']=(daily_data['Tmin_C'] -32)/1.8
daily_data['Tavg_C']=(daily_data['Tavg_C'] -32)/1.8
daily_data['Precip_mm']=daily_data['Precip_mm'] *25.4
# determine the datetime
row_dates=pd.to_datetime(daily_data.Date)
# generate the daily_flow and set the datetime as row indices
daily_coop=daily_data[['Precip_mm','Tmax_C', 'Tmin_C', 'Tavg_C']]
daily_coop.set_index(row_dates, inplace=True)
return(daily_coop)
# ### Data Processing functions
def generateVarTables(file_dict, gridclimname, dataset, metadata, df_dict=None):
"""
Slice the files by their common variable
all_files: (dict) a dictionary of dataframes for each tabular datafile
dataset: (str) the name of the dataset
metadata (dict) the dictionary that contains the metadata explanations; default is None
"""
# combine the files into a pandas panel
panel = pd.Panel.from_dict(file_dict)
# initiate output dictionary
if pd.isnull(df_dict):
df_dict = dict()
# slice the panel for each variable in list
for eachvar in metadata[dataset]['variable_list']:
df_dict['_'.join([eachvar, gridclimname])] = panel.xs(key=eachvar, axis=2)
return(df_dict)
# compare two date sets for the start and end of the overlapping dates
def overlappingDates(date_set1, date_set2):
# find recent date
if date_set1[0] > date_set2[0]:
start_date = date_set1[0]
else:
start_date = date_set2[0]
# find older date
if date_set1[-1] < date_set2[-1]:
end_date = date_set1[-1]
else:
end_date = date_set2[-1]
return(start_date, end_date)
# Calculate means by 8 different methods
def multigroupMeans(VarTable, n_stations, start_date, end_date):
Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
# e.g., Mean monthly temperature at each station
month_daily=Var_daily.groupby(Var_daily.index.month).mean() # average monthly minimum temperature at each station
# e.g., Mean monthly temperature averaged for all stations in analysis
meanmonth_daily=month_daily.mean(axis=1)
# e.g., Mean monthly temperature for minimum and maximum elevation stations
meanmonth_min_maxelev_daily=Var_daily.loc[:,analysis_elev_max_station].groupby(Var_daily.index.month).mean()
meanmonth_min_minelev_daily=Var_daily.loc[:,analysis_elev_min_station].groupby(Var_daily.index.month).mean()
# e.g., Mean annual temperature
year_daily=Var_daily.groupby(Var_daily.index.year).mean()
# e.g., mean annual temperature each year for all stations
meanyear_daily=year_daily.mean(axis=1)
# e.g., mean annual min temperature for all years, for all stations
meanallyear_daily=np.nanmean(meanyear_daily)
# e.g., anomoly per year compared to average
anom_year_daily=meanyear_daily-meanallyear_daily
return(month_daily,
meanmonth_daily,
meanmonth_min_maxelev_daily,
meanmonth_min_minelev_daily,
year_daily,
meanyear_daily,
meanallyear_daily,
anom_year_daily)
def specialTavgMeans(VarTable):
Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
# Average temperature for each month at each station
permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).mean()
# Average temperature each month averaged at all stations
meanpermonth_daily=permonth_daily.mean(axis=1)
# Average monthly temperature for all stations
meanallpermonth_daily=meanpermonth_daily.mean(axis=0)
# anomoly per year compared to average
anom_month_daily=(meanpermonth_daily-meanallpermonth_daily)/1000
return(permonth_daily,
meanpermonth_daily,
meanallpermonth_daily,
anom_month_daily)
def aggregate_space_time_average(VarTable, df_dict, suffix, start_date, end_date):
"""
VarTable: (dataframe) a dataframe with date ranges as the index
df_dict: (dict) a dictionary to which computed outputs will be stored
suffix: (str) a string representing the name of the original table
start_date: (date) the start of the date range within the original table
end_date: (date) the end of the date range within the original table
"""
starttime = pd.datetime.now()
# subset dataframe to the date range of interest
Var_daily = VarTable.loc[start_date:end_date,:]
# Mean monthly temperature at each station
df_dict['month_'+suffix] = Var_daily.groupby(Var_daily.index.month).mean()
# Mean monthly temperature averaged for all stations in analysis
df_dict['meanmonth_'+suffix] = Var_daily.groupby(Var_daily.index.month).mean().mean(axis=1)
# Mean annual temperature
df_dict['year_'+suffix] = Var_daily.groupby(Var_daily.index.year).mean()
# mean annual temperature each year for all stations
df_dict['meanyear_'+suffix] = Var_daily.groupby(Var_daily.index.year).mean().mean(axis=1)
# mean annual temperature for all years, for all stations
df_dict['meanallyear_'+suffix] = Var_daily.mean(axis=1).mean(axis=0)
# anomaly per year compared to average
df_dict['anom_year_'+suffix] = df_dict['meanyear_'+suffix] - df_dict['meanallyear_'+suffix]
print(suffix+ ' calculations completed in ' + str(pd.datetime.now()-starttime))
return(df_dict)
def aggregate_space_time_sum(VarTable, n_stations, start_date, end_date):
Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
# Average precipitation per month at each station
permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).sum()
# Average precipitation per month averaged at all stations
meanpermonth_daily=permonth_daily.mean(axis=1)
# Average monthly precipitation averaged at all stations
meanmonth_daily= meanpermonth_daily.groupby(meanpermonth_daily.index.month).mean()
return(Var_daily,
permonth_daily,
meanpermonth_daily,
meanmonth_daily)
#def aggregate_space_time_sum(VarTable, n_stations, start_date, end_date):
# Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
#
# # Average precipitation per month at each station
# permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).sum()
#
# # Average precipitation per month averaged at all stations
# meanpermonth_daily=permonth_daily.mean(axis=1)
#
# # Average monthly precipitation averaged at all stations
# meanmonth_daily= meanpermonth_daily.groupby(meanpermonth_daily.index.month).mean()
#
# return(Var_daily,
# permonth_daily,
# meanpermonth_daily,
# meanmonth_daily)
#def specialTavgMeans(VarTable):
# Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
#
# # Average temperature for each month at each station
# permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).mean()
#
# # Average temperature each month averaged at all stations
# meanpermonth_daily=permonth_daily.mean(axis=1)
#
# # Average monthly temperature for all stations
# meanallpermonth_daily=meanpermonth_daily.mean(axis=0)
#
# # anomoly per year compared to average
# anom_month_daily=(meanpermonth_daily-meanallpermonth_daily)/1000
#
# return(permonth_daily,
# meanpermonth_daily,
# meanallpermonth_daily,
# anom_month_daily)
def plotTavg(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_temp_avg_liv2013_met_daily' and 'meanmonth_temp_avg_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_temp_avg_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_maxelev_temp_avg_liv2013_met_daily'][wy_numbers],'r*--',linewidth=1, label='Liv Tavg- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_temp_avg_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Tavg- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_temp_avg_liv2013_met_daily'][wy_numbers],'rX--',linewidth=1, label='Liv Tavg- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_temp_avg_wrf2014_met_daily'][wy_numbers],'b^--',linewidth=1, label='WRF Tavg- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_temp_avg_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Tavg- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_temp_avg_wrf2014_met_daily'][wy_numbers],'bo--',linewidth=1, label='WRF Tavg- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g^--',linewidth=1, label='WRFbc Tavg- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Tavg- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'go--',linewidth=1, label='WRFbc Tavg- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Temperature (deg C)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nAverage Temperature\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('avg_monthly_temp'+str(loc_name)+'.png')
plt.show()
def plotPavg(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_precip_liv2013_met_daily' and 'meanmonth_precip_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_precip_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_maxelev_precip_liv2013_met_daily'][wy_numbers],'r^--',linewidth=1, label='Liv Precip- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_precip_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Precip- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_precip_liv2013_met_daily'][wy_numbers],'ro--',linewidth=1, label='Liv Precip- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_precip_wrf2014_met_daily'][wy_numbers],'b^--',linewidth=1, label='WRF Precip- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_precip_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Precip- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_precip_wrf2014_met_daily'][wy_numbers],'bo--',linewidth=1, label='WRF Precip- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g^--',linewidth=1, label='WRFbc Precip- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Precip- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'go--',linewidth=1, label='WRFbc Precip- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Precip (mm)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nAverage Precipitation\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('avg_monthly_precip'+str(loc_name)+'.png')
plt.show()
def gridclim_dict(mappingfile, dataset, gridclimname=None, metadata=None, min_elev=None, max_elev=None,
file_start_date=None, file_end_date=None, file_time_step=None,
file_colnames=None, file_delimiter=None,
subset_start_date=None, subset_end_date=None, df_dict=None, colvar='all'):
"""
# pipelined operation for assimilating data, processing it, and standardizing the plotting
mappingfile: (dir) the path directory to the mappingfile
dataset: (str) the name of the dataset within mappingfile to use
gridclimname: (str) the suffix for the dataset to be named; if None is provided, default to the dataset name
metadata: (str) the dictionary that contains the metadata explanations; default is None
min_elev: (float) the minimum elevation criteria; default is None
max_elev: (float) the maximum elevation criteria; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
file_delimiter: (str) a file parsing character to be used for file reading
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
df_dict: (dict) an existing dictionary where new computations will be stored
"""
# generate the climate locations and n_stations
locations_df, n_stations = mappingfileToDF(mappingfile, colvar=colvar)
# generate the climate station info
if pd.isnull(min_elev):
min_elev = locations_df.ELEV.min()
if pd.isnull(max_elev):
max_elev = locations_df.ELEV.max()
# extract metadata if the information are not provided
if not isinstance(metadata, type(None)):
if isinstance(file_start_date, type(None)):
file_start_date = metadata[dataset]['start_date']
if isinstance(file_end_date, type(None)):
file_end_date = metadata[dataset]['end_date']
if isinstance(file_time_step, type(None)):
file_time_step = metadata[dataset]['temporal_resolution']
if isinstance(file_colnames, type(None)):
file_colnames = metadata[dataset]['variable_list']
if isinstance(file_delimiter, type(None)):
file_delimiter = metadata[dataset]['delimiter']
# take all defaults if subset references are null
if pd.isnull(subset_start_date):
subset_start_date = file_start_date
if pd.isnull(subset_end_date):
subset_end_date = file_end_date
# initiate output dictionary df_dict was null
if pd.isnull(df_dict):
df_dict = dict()
if pd.isnull(gridclimname):
if pd.notnull(dataset):
gridclimname=dataset
else:
print('no suffix name provided. Provide a gridclimname or dataset label.')
return
# assemble the stations within min and max elevantion ranges
locations_df = locations_df[(locations_df.ELEV >= min_elev) & (locations_df.ELEV <= max_elev)]
# create dictionary of dataframe
df_dict = read_files_to_vardf(map_df=locations_df,
dataset=dataset,
metadata=metadata,
gridclimname=gridclimname,
file_start_date=file_start_date,
file_end_date=file_end_date,
file_delimiter=file_delimiter,
file_time_step=file_time_step,
file_colnames=file_colnames,
subset_start_date=subset_start_date,
subset_end_date=subset_end_date,
min_elev=min_elev,
max_elev=max_elev,
df_dict=df_dict)
#
vardf_list = [eachvardf for eachvardf in df_dict.keys() if eachvardf.endswith(gridclimname)]
# loop through the dictionary to compute each aggregate_space_time_average object
for eachvardf in vardf_list:
# update the dictionary with spatial and temporal average computations
df_dict.update(aggregate_space_time_average(VarTable=df_dict[eachvardf],
df_dict=df_dict,
suffix=eachvardf,
start_date=subset_start_date,
end_date=subset_end_date))
# if the number of stations exceeds 500, remove daily time-series dataframe
if len(locations_df)>500:
del df_dict[eachvardf]
return(df_dict)
def compute_diffs(df_dict, df_str, gridclimname1, gridclimname2, prefix1, prefix2='meanmonth', comp_dict=None):
#Compute difference between monthly means for some data (e.g,. Temp) for two different gridded datasets (e.g., Liv, WRF)
if isinstance(comp_dict, type(None)):
comp_dict=dict()
for each1 in prefix1:
for each2 in prefix2:
comp_dict['_'.join([str(each1),df_str])] = df_dict['_'.join([each2,each1,gridclimname1])]-df_dict['_'.join([each2,each1,gridclimname2])]
return(comp_dict)
def compute_ratios(df_dict, df_str, gridclimname1, gridclimname2, prefix1, prefix2='meanmonth', comp_dict=None):
#Compute difference between monthly means for some data (e.g,. Temp) for two different gridded datasets (e.g., Liv, WRF)
if isinstance(comp_dict, type(None)):
comp_dict=dict()
for each1 in prefix1:
for each2 in prefix2:
comp_dict['_'.join([str(each1),df_str])] = df_dict['_'.join([each2,each1,gridclimname1])]/df_dict['_'.join([each2,each1,gridclimname2])]
return(comp_dict)
def compute_elev_diffs(df_dict, df_str, gridclimname1, prefix1, prefix2a='meanmonth_minelev_', prefix2b='meanmonth_maxelev_'):
comp_dict=dict()
for each1 in prefix1:
comp_dict[str(each1)+df_str] = df_dict[prefix2a+each1+gridclimname1]-df_dict[prefix2b+each1+gridclimname1]
return(comp_dict)
def monthlyBiasCorrection_deltaTratioP_Livneh_METinput(homedir, mappingfile, BiasCorr,
lowrange='0to1000m', LowElev=range(0,1000),
midrange='1000to1500m', MidElev=range(1001,1501),
highrange='1500to3000m', HighElev=range(1501,3000),
data_dir=None, file_start_date=None, file_end_date=None):
np.set_printoptions(precision=3)
# take liv2013 date set date range as default if file reference dates are not given
if isinstance(file_start_date, type(None)):
file_start_date = pd.datetime(1915,1,1)
if isinstance(file_end_date, type(None)):
file_end_date = pd.datetime(2011,12,31)
# generate the month vector
month = pd.date_range(start=file_start_date, end=file_end_date).month
month = pd.DataFrame({'month':month})
# create NEW directory
dest_dir = os.path.join(homedir, 'biascorrWRF_liv')
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
print('destdir created')
# read in the Elevation table
zdiff = pd.read_table(mappingfile, sep=',', header='infer')
zdiff = zdiff.rename(columns={'RASTERVALU':'Elev','ELEV':'Elev'})
zdiff = zdiff[['LAT','LONG_', 'Elev']]
zdiff['filename'] = zdiff[['LAT','LONG_']].apply(lambda x: '_'.join(['Meteorology_Livneh_CONUSExt_v.1.2_2013',str(x[0]), str(x[1])]), axis=1)
#print(zdiff[0:10])
# lapse rate vector by month
# temperature adjustment vector by month
# identify the files to read
print('reading in data_long_lat files')
data_files = [os.path.join(data_dir,dat) for dat in os.listdir(data_dir) if os.path.basename(dat).startswith('Meteorology_Livneh_CONUSExt_v.1.2_2013')]
print('done reading data_long_lat files')
# loop through each file
for eachfile in data_files:
# subset the zdiff table using the eachfile's filename, then assign Elevation to equal the Elev value
Elevation = zdiff[zdiff['filename']==os.path.basename(eachfile)]['Elev'].reset_index(drop=True)
print(Elevation)
# decide on the elevation-based Tcorr
#print('Convert BiasCorr to a df')
if Elevation.iloc[0] in LowElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+lowrange)}
#BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
#BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
elif Elevation.iloc[0] in MidElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+midrange)}
#BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
#BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
elif Elevation.iloc[0] in HighElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+highrange)}
#BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
#BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
#print('reading in eachfile')
read_dat = pd.read_table(eachfile, delimiter='\s+', header=None)
read_dat.columns = ['precip', 'temp_max','temp_min','wind']
# print('done reading eachfile')
# extrapolate monthly values for each variable
for eachvar in ['precip', 'temp_max', 'temp_min']:
BiasCorr_sub_df = [BiasCorr_sub[eachkey] for eachkey in BiasCorr_sub.keys() if eachkey.startswith(eachvar)]
# subset the column for the eachfile station number
BiasCorr_sub_df = BiasCorr_sub_df.loc[:,zdiff[zdiff['filename']==eachfile].index]
BiasCorr_sub_df.columns = ['var']
# regenerate the month
BiasCorr_sub_df = BiasCorr_sub_df.reset_index().rename(columns={'index':'month'})
# generate s-vectors
month_obj = month.merge(BiasCorr_sub_df, how='left', on='month')
# generate the s-vector
s = pd.Series(month_obj.var)
#
if eachvar=='precip':
read_dat[eachvar] = np.array(read_dat[eachvar])*np.array(s)
else:
read_dat[eachvar] = np.array(read_dat[eachvar])+np.array(s)
#print('grabbing the S vector of monthlapse after the merge between month and Tcorr_df')
#print('number of corrections to apply: '+str(len(month)))
# write it out to the new destination location
read_dat.to_csv(os.path.join(dest_dir, os.path.basename(eachfile)), sep='\t', header=None, index=False)
print(os.path.join(dest_dir, os.path.basename(eachfile)))
print('mission complete.')
print('this device will now self-destruct.')
print('just kidding.')
def monthlyBiasCorrection_WRFlongtermmean_elevationbins_METinput(homedir, mappingfile, BiasCorr,
lowrange='0to1000m', LowElev=range(0,1000),
midrange='1000to1500m', MidElev=range(1001,1501),
highrange='1500to3000m', HighElev=range(1501,3000),
data_dir=None,
file_start_date=None,
file_end_date=None):
np.set_printoptions(precision=3)
# take liv2013 date set date range as default if file reference dates are not given
if isinstance(file_start_date, type(None)):
file_start_date = pd.datetime(1950,1,1)
if isinstance(file_end_date, type(None)):
file_end_date = pd.datetime(2010,12,31)
# generate the month vector
month = pd.date_range(start=file_start_date, end=file_end_date).month
month = pd.DataFrame({'month':month})
# create NEW directory
dest_dir = os.path.join(homedir, 'biascorr_WRF_ltm')
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
print('destdir created')
# read in the Elevation table
zdiff = pd.read_table(mappingfile, sep=',', header='infer')
zdiff = zdiff.rename(columns={'RASTERVALU':'Elev','ELEV':'Elev'})
zdiff = zdiff[['LAT','LONG_', 'Elev']]
zdiff['filename'] = zdiff[['LAT','LONG_']].apply(lambda x: '_'.join(['data',str(x[0]), str(x[1])]), axis=1)
#print(zdiff[0:10])
# lapse rate vector by month
# temperature adjustment vector by month
# identify the files to read
print('reading in data_long_lat files')
data_files = [os.path.join(data_dir,dat) for dat in os.listdir(data_dir) if os.path.basename(dat).startswith('data')]
#print('done reading data_long_lat files')
# loop through each file
for eachfile in data_files:
# subset the zdiff table using the eachfile's filename, then assign Elevation to equal the Elev value
Elevation = zdiff[zdiff['filename']==os.path.basename(eachfile)]['Elev'].reset_index(drop=True)
print(Elevation)
# decide on the elevation-based Tcorr
#print('Convert BiasCorr to a df')
if Elevation.iloc[0] in LowElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+lowrange)}
BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
elif Elevation.iloc[0] in MidElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+midrange)}
BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
elif Elevation.iloc[0] in HighElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+highrange)}
BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
print('reading in eachfile')
read_dat = pd.read_table(eachfile, delimiter='\s+', header=None)
read_dat.columns = ['precip', 'Tmax','Tmin','wind']
print('done reading eachfile')
# extrapolate monthly values
month_obj = month.merge(BiasCorr_sub_df, how='left', on='month')
#print('merged month with Tcorr_df')
#print(month_obj.head(35))
# generate s-vectors
s1 = pd.Series(month_obj.Tmin)
s2 = pd.Series(month_obj.Tmax)
s3 = pd.Series(month_obj.precip)
read_dat['Tmin'] = np.array(read_dat.Tmin)+np.array(s1)
read_dat['Tmax'] = np.array(read_dat.Tmax)+np.array(s2)
read_dat['precip'] = np.array(read_dat.precip)*np.array(s3)
# write it out to the new destination location
read_dat.to_csv(os.path.join(dest_dir, os.path.basename(eachfile)), sep='\t', header=None, index=False)
print(os.path.join(dest_dir, os.path.basename(eachfile)))
print('mission complete.')
print('this device will now self-destruct.')
print('just kidding.')
def switchUpVICSoil(input_file=None,
output_file='soil',
mappingfile=None,
homedir=None):
#Read in table of VIC soil inputs -- assumes all Lat/Long set to zero
soil_base = pd.read_table(input_file,header=None)
#Make a list of all lat/long values
latlong=soil_base.apply(lambda x:tuple([x[2],x[3]]), axis=1)
#Read in mappingfile from TreatGeoSelf()
maptable = pd.read_table(mappingfile,sep=",")
#Make a list Lat/Long files that need to switched up
latlong_1=maptable.apply(lambda x:tuple([x['LAT'],x['LONG_']]), axis=1)
#Switch up from 0 to 1 so VIC will run for this Lat/Long point - print new output file (VIC model input file)
soil_base[0] = latlong.apply(lambda x: 1 if x in set(latlong_1) else 0)
soil_base.to_csv(output_file, header=False, index=False, sep="\t")
print(str(soil_base[0].sum()) +' VIC grid cells have successfully been switched up.')
print('Check your home directory for your new VIC soil model input set to your list of Lat/Long grid centroids.')
def makebelieve(homedir, mappingfile, BiasCorr, metadata, start_catalog_label, end_catalog_label,
file_start_date=None, file_end_date=None,
data_dir=None, dest_dir_suffix=None):
np.set_printoptions(precision=6)
# take liv2013 date set date range as default if file reference dates are not given
if isinstance(file_start_date, type(None)):
file_start_date = metadata[start_catalog_label]['start_date']
if isinstance(file_end_date, type(None)):
file_end_date = metadata[start_catalog_label]['end_date']
# generate the month vector
month = pd.date_range(start=file_start_date, end=file_end_date).month
month = pd.DataFrame({'month':month})
# create NEW directory
if isinstance(dest_dir_suffix, type(None)):
dest_dir_suffix = 'biascorr_output/'
dest_dir = os.path.join(homedir, dest_dir_suffix)
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
print('destdir created')
# read in the mappingfile
map_df, nstations = mappingfileToDF(mappingfile, colvar='all')
# compile the BiasCorr dictionary into a pandas panel
BiasCorr=pd.Panel.from_dict(BiasCorr)
# loop through each file
for ind, eachfile in enumerate(map_df.loc[:,start_catalog_label]):
# identify the file
station = map_df.loc[map_df.loc[:,start_catalog_label]==eachfile,['FID', 'LAT', 'LONG_']].reset_index(drop=True)
# subset the bias correction to the file at hand
print(str(ind)+' station: '+str(tuple(station.loc[0,:])))
BiasCorr_df = BiasCorr.xs(key=tuple(station.loc[0,:]),axis=2)
# read in the file to be corrected
read_dat = pd.read_table(eachfile, delimiter=metadata[start_catalog_label]['delimiter'],
header=None, names=metadata[start_catalog_label]['variable_list'])
# extrapolate monthly values for each variable
for eachvar in read_dat.columns:
# identify the corresponding bias correction key
for eachkey in BiasCorr_df.columns:
if eachkey.startswith(eachvar):
# subset the dataframe to the variable in loop
BiasCorr_subdf = BiasCorr_df.loc[:,eachkey]
# regenerate row index as month column
BiasCorr_subdf = BiasCorr_subdf.reset_index().rename(columns={'index':'month'})
# generate the s-vector
s = month.merge(BiasCorr_subdf, how='left', on='month').loc[:,eachkey]
if eachvar=='PRECIP':
#Use for ratio precip method
read_dat[eachvar] = np.multiply(np.array(read_dat.loc[:,eachvar]), np.array(s))
#read_dat[eachvar] = np.array(read_dat.loc[:,eachvar])+np.array(s)
#positiveprecip=read_dat[eachvar]
#positiveprecip[positiveprecip<0.]=0.
#read_dat[eachvar] = positiveprecip*.9842
else:
read_dat[eachvar] = np.array(read_dat.loc[:,eachvar])+np.array(s)
# write it out to the new destination location
filedest = os.path.join(dest_dir, os.path.basename(eachfile))
read_dat.to_csv(filedest, sep='\t', header=None, index=False, float_format='%.4f')
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=map_df, folderpath=dest_dir, catalog_label=end_catalog_label)
# append the source metadata to the new catalog label metadata
metadata[end_catalog_label] = metadata[start_catalog_label]
# update the metadata json file
json.dump(metadata, open('ogh_meta.json', 'w'), ensure_ascii=False)
print('mission complete. this device will now self-destruct. just kidding.')
return(dest_dir, metadata)
def plot_meanP(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_precip_liv2013_met_daily' and 'meanmonth_precip_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_precip_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_precip_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Precip')
if 'meanmonth_precip_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_precip_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Precip')
if 'meanmonth_precip_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Precip')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Precip (mm)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nAverage Precipitation\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('monthly_precip'+str(loc_name)+'.png')
plt.show()
def plot_meanTavg(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_temp_avg_liv2013_met_daily' and 'meanmonth_temp_avg_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_temp_avg_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_temp_avg_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Temp Avg')
if 'meanmonth_temp_avg_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_temp_avg_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Temp Avg')
if 'meanmonth_temp_avg_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Temp Avg')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Temp (C)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nAverage Temperature\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('monthly_Tavg'+str(loc_name)+'.png')
plt.show()
def plot_meanTmin(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_temp_min_liv2013_met_daily' and 'meanmonth_temp_min_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_temp_min_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_temp_min_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Temp min')
if 'meanmonth_temp_min_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_temp_min_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Temp min')
if 'meanmonth_temp_min_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_temp_min_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Temp min')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Temp (C)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nMinimum Temperature\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('monthly_Tmin'+str(loc_name)+'.png')
plt.show()
def plot_meanTmax(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_temp_max_liv2013_met_daily' and 'meanmonth_temp_max_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_temp_max_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_temp_max_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Temp max')
if 'meanmonth_temp_max_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_temp_max_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Temp max')
if 'meanmonth_temp_max_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_temp_max_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Temp max')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Temp (C)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nMaximum Temperature\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('monthly_Tmax'+str(loc_name)+'.png')
plt.show()
def renderWatershed(shapefile, outfilepath='watershedmap.png', epsg=4326):
# generate the figure axis
fig = plt.figure(figsize=(3,3), dpi=500)
ax1 = plt.subplot2grid((1,1),(0,0))
# normalize the color distribution according to the value distribution
cmap = mpl.cm.gnuplot2
# calculate bounding box based on the watershed shapefile
watershed = fiona.open(shapefile)
minx, miny, maxx, maxy = watershed.bounds
w, h = maxx - minx, maxy - miny
watershed.close()
# generate basemap
m = Basemap(projection='merc', epsg=3857, resolution='h', ax=ax1,
llcrnrlon=minx - 0.25 * w, llcrnrlat=miny - 0.25 * h, urcrnrlon=maxx + 0.25 * w, urcrnrlat=maxy + 0.25 * h)
m.drawcountries(linewidth=0.1)
m.drawcoastlines(linewidth=0.1)
m.drawmapboundary(fill_color='lightskyblue')
m.fillcontinents(color='cornsilk', lake_color='lightskyblue')
m.drawrivers(color='lightskyblue', linewidth=.1)
m.drawstates(linewidth=0.1, linestyle='solid', color='gray')
m.drawcountries(color='gray', linewidth=0.1)
m.drawmapscale(minx, miny, maxx, maxy, 500, yoffset=10000, barstyle='fancy', fontsize=2, linewidth=0.01)
# read and transform the watershed shapefiles
m.readshapefile(shapefile = shapefile.replace('.shp',''), name='watersheds',
drawbounds=True, zorder=None, linewidth=0.1, color='m', antialiased=1, default_encoding='utf-8')
# load and transform each polygon in shape
patches = [PolygonPatch(Polygon(np.array(shape)), fc='m', ec='m', linewidth=0.1, zorder=0)
for info, shape in zip(m.watersheds_info, m.watersheds)]
# assimilate shapes to plot axis
coll = PatchCollection(patches, cmap=cmap, match_original=True, zorder=5.0)
ax1.add_collection(coll)
plt.savefig(os.path.join(homedir, 'statemap.png'), dpi=500)
plt.show()
return ax1
def renderPointsInShape(shapefile, NAmer, mappingfile, colvar='all', outfilepath='oghcat_Livneh_Salathe.png', epsg=4326):
fig = plt.figure(figsize=(5,5), dpi=500)
ax1 = plt.subplot2grid((1,1),(0,0))
# generate the polygon color-scheme
cmap = mpl.cm.get_cmap('coolwarm')
norm = mpl.colors.Normalize(0, 1)
color_producer = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
# calculate bounding box based on the watershed shapefile
watershed = fiona.open(shapefile)
minx, miny, maxx, maxy = watershed.bounds
w, h = maxx - minx, maxy - miny
# watershed
watershed_shade = color_producer.to_rgba(0.5)
ptchs = [PolygonPatch(shape(pol['geometry']), fc=watershed_shade, ec=watershed_shade, linewidth=0)
for pol in watershed]
watershed.close()
# generate basemap
m = Basemap(projection='merc', ellps='WGS84', epsg=epsg,
llcrnrlon=minx - 1 * w, llcrnrlat=miny - 1 * h,
urcrnrlon=maxx + 1 * w, urcrnrlat=maxy + 1 * h,
resolution='l', ax=ax1)
m.arcgisimage(service='Canvas/World_Dark_Gray_Base', xpixels=1000)
# generate the collection of Patches
coll = PatchCollection(ptchs, cmap=cmap, match_original=True)
ax1.add_collection(coll)
coll.set_alpha(0.4)
# catalog
cat, n_stations = mappingfileToDF(mappingfile, colvar=colvar)
m.scatter(cat['LONG_'], cat['LAT'], marker='s', s=20, alpha=0.4, c=color_producer.to_rgba(.5))
# save image
plt.savefig(outfilepath)
print('image saved')
return ax1
def renderValuesInPoints(shapefile, NAmer, vardf, vardf_dateindex, outfilepath='oghcat_test.png', epsg=4326):
# generate the figure axis
fig = plt.figure(figsize=(5,5), dpi=500)
ax1 = plt.subplot2grid((1,1),(0,0))
# generate the polygon color-scheme
cmap = mpl.cm.get_cmap('jet')
norm = mpl.colors.Normalize(vardf.as_matrix().min(), vardf.as_matrix().max())
color_producer = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
# calculate bounding box based on the watershed shapefile
watershed = fiona.open(shapefile)
minx, miny, maxx, maxy = watershed.bounds
w, h = maxx - minx, maxy - miny
# watershed
ptchs = [PolygonPatch(shape(pol['geometry']), fc='w', ec='w', linewidth=0) for pol in watershed]
watershed.close()
# generate basemap
m = Basemap(projection='merc', ellps='WGS84', epsg=epsg,
llcrnrlon=minx - 1 * w, llcrnrlat=miny - 1 * h,
urcrnrlon=maxx + 1 * w, urcrnrlat=maxy + 1 * h,
resolution='l', ax=ax1)
m.arcgisimage(service='Canvas/World_Dark_Gray_Base', xpixels=1000)
ax1.grid(True, which='both')
# generate the collection of Patches
coll = PatchCollection(ptchs, cmap=cmap, match_original=True)
ax1.add_collection(coll)
coll.set_alpha(0.3)
# catalog
cat=vardf.loc[vardf_dateindex,:].reset_index(level=[1,2]).rename(columns={'level_1':'LAT','level_2':'LONG_'})
cat_color = cat[vardf_dateindex].apply(lambda x: color_producer.to_rgba(x))
m.scatter(cat['LONG_'], cat['LAT'], marker='s', s=20, alpha=0.4, c=cat_color)
# save image
plt.savefig(outfilepath)
print('image saved')
return ax1
def findStationCode(mappingfile, colvar, colvalue):
"""
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
colvar: (string) a column name in mappingfile
colvalue: (value) a value that corresponds to the colvar column
"""
mapdf = | pd.read_csv(mappingfile) | pandas.read_csv |
import datetime as dt
import glob
import os
import shutil
import unittest
import numpy as np
import pandas as pd
import devicely
class EverionTestCase(unittest.TestCase):
READ_PATH = 'tests/Everion_test_data'
BROKEN_READ_PATH = 'tests/Everion_test_data_broken' #for testing with missing files
WRITE_PATH = 'tests/Everion_test_data_write'
def setUp(self):
self.reader = devicely.EverionReader(self.READ_PATH)
def test_basic_read(self):
self._test_read_individual_dataframes(self.reader)
expected_signal_tags = ['heart_rate', 'respiration_rate', 'heart_rate_variability',
'oxygen_saturation', 'gsr_electrode', 'temperature_object',
'barometer_pressure', 'temperature_local', 'ctemp',
'temperature_barometer']
expected_signal_quality_tags = ['heart_rate_quality', 'respiration_rate_quality',
'heart_rate_variability_quality', 'oxygen_saturation_quality',
'ctemp_quality']
expected_sensor_tags = ['accz_data', 'led2_data', 'led1_data', 'led4_data',
'accy_data', 'accx_data', 'led3_data', 'acc_mag']
expected_feature_tags = ['inter_pulse_interval', 'inter_pulse_interval_deviation']
expected_columns = set(expected_signal_tags + expected_signal_quality_tags +
expected_sensor_tags + expected_feature_tags)
self.assertEqual(set(self.reader.data.columns), expected_columns)
def test_read_with_non_default_tags(self):
signal_tags = [12, 15, 19, 119, 134]
sensor_tags = [80, 83, 84, 85, 92]
feature_tags = [17]
reader = devicely.EverionReader(self.READ_PATH,
signal_tags=signal_tags,
sensor_tags=sensor_tags,
feature_tags=feature_tags)
# The individual should dataframes contain all tags, regardless of the initialization parameters.
self._test_read_individual_dataframes(reader)
expected_singal_columns = ['respiration_rate', 'temperature_local',
'ctemp', 'temperature_barometer']
expected_signal_quality_columns = ['respiration_rate_quality', 'ctemp_quality']
# no acc_mag because 86 (accz_data) is missing
expected_sensor_columns = ['led1_data', 'led4_data', 'accy_data', 'accx_data']
#17 is a valid feature column, but it is not present in the testing csv
expected_feature_columns = []
expected_columns = set(expected_singal_columns + expected_signal_quality_columns +
expected_sensor_columns + expected_feature_columns)
self.assertEqual(set(reader.data.columns), expected_columns)
def test_read_with_invalid_tags(self):
signal_tags = [12, 15, 19, 119, 134, 80] #80 is not a signal tag
sensor_tags = [80, 83, 84, 85, 92, 70] #70 is not a sensor tag
feature_tags = [17, 86] #86 is not a sensor tag
call = lambda: devicely.EverionReader(self.READ_PATH,
signal_tags=signal_tags,
sensor_tags=sensor_tags,
feature_tags=feature_tags)
self.assertRaises(KeyError, call)
def test_read_with_missing_files(self):
print(os.listdir())
shutil.copytree(self.READ_PATH, self.BROKEN_READ_PATH)
signals_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*signals*")).pop()
attributes_dailys_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*attributes_dailys*")).pop()
os.remove(signals_path)
os.remove(attributes_dailys_path)
reader = devicely.EverionReader(self.BROKEN_READ_PATH)
self.assertIsNone(reader.signals)
self.assertIsNone(reader.attributes_dailys)
expected_sensor_tags = ['accz_data', 'led2_data', 'led1_data', 'led4_data',
'accy_data', 'accx_data', 'led3_data', 'acc_mag']
expected_feature_tags = ['inter_pulse_interval', 'inter_pulse_interval_deviation']
expected_columns = set(expected_sensor_tags + expected_feature_tags)
self.assertEqual(set(reader.data.columns), expected_columns)
shutil.rmtree(self.BROKEN_READ_PATH)
def test_read_with_all_join_files_missing(self):
#The signals-, sensors-, and features files are the three join files.
shutil.copytree(self.READ_PATH, self.BROKEN_READ_PATH)
signals_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*signals*")).pop()
sensors_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*sensor_data*")).pop()
features_path = glob.glob(os.path.join(self.BROKEN_READ_PATH, f"*features*")).pop()
os.remove(signals_path)
os.remove(sensors_path)
os.remove(features_path)
reader = devicely.EverionReader(self.BROKEN_READ_PATH)
self.assertIsNone(reader.signals)
self.assertIsNone(reader.sensors)
self.assertIsNone(reader.features)
pd.testing.assert_frame_equal(reader.data, pd.DataFrame())
shutil.rmtree(self.BROKEN_READ_PATH)
def test_timeshift_to_timestamp(self):
expected_aggregates_head = pd.DataFrame({
'count': 5 * [4468],
'streamType': 5 * [5],
'tag': [40, 18, 21, 7, 100],
'time': pd.to_datetime(5 * [1525200281], unit='s'),
'values': [-2.0, 0.76, 21.0, 60.0, 0.0],
'quality': [np.nan, 13.0, np.nan, 0.0, np.nan]
})
expected_analytics_events_head = pd.DataFrame({
"count": [5622, 5621, 5620, 5619, 5618],
"streamType": 5 * [7],
"tag": 5 * [1],
"time": pd.to_datetime([1525204397, 1525204397, 1525204148, 1525204131, 1525203790], unit='s'),
"values": [22.0, 2.0, 22.0, 22.0, 2.0]
})
expected_attributes_dailys_head = pd.DataFrame({
"count": [14577, 14576, 14575, 14574, 14573],
"streamType": 5 * [8],
"tag": 5 * [67],
"time": pd.to_datetime(5 * [1525207721], unit='s'),
"values": [2.0, 4.0, 3.0, 11.0, 12.0],
"quality": [15.0, 9.0, 8.0, 6.0, 5.0]
})
expected_everion_events_head = pd.DataFrame({
"count": 5 * [46912],
"streamType": 5 * [6],
"tag": [128, 131, 129, 132, 126],
"time": pd.to_datetime(5 * [1525192729], unit='s'),
"values": [65295.0, 900.0, 44310.0, 4096.0, 0.0]
})
expected_features_head = pd.DataFrame({
"count": [787000, 787001, 787002, 787003, 787004],
"streamType": 5 * [4],
"tag": 5 * [14],
"time": pd.to_datetime([1525192675, 1525192675, 1525192676, 1525192677, 1525192678], unit='s'),
"values": [950.0, 1085.0, 1074.0, 1021.0, 1056.0],
"quality": [12.0, 11.0, 12.0, 10.0, 11.0]
})
expected_sensors_head = pd.DataFrame({
"count": 5 * [22917264],
"streamType": 5 * [16],
"tag": [86, 81, 80, 83, 85],
"time": pd.to_datetime(5 * [1525192361], unit='s'),
"values": [2176.0, 51612.0, 668.0, 26377.0, 1232.0]
})
expected_signals_head = pd.DataFrame({
'count': 5 * [806132],
'streamType': 5 * [2],
'tag': [71, 13, 6, 66, 12],
'time': pd.to_datetime(5 * [1525192381], unit='s'),
'values': [0.0, 21.86422, 65.0, 1.5686275, 18.0],
'quality': [np.nan, 100.0, 85.0, np.nan, 93.0]
})
timestamp = | pd.Timestamp('1 May 2018 16:32:41') | pandas.Timestamp |
import os
from io import BytesIO
import io
import csv
import requests
from zipfile import ZipFile
from urllib.request import urlopen
import pandas as pd
from pathlib import Path
NEO4J_IMPORT = Path(os.getenv('NEO4J_IMPORT'))
print(NEO4J_IMPORT)
CACHE = Path(NEO4J_IMPORT / 'cache')
CACHE.mkdir(exist_ok=True)
def import_countries():
country_url = 'https://download.geonames.org/export/dump/countryInfo.txt'
names = ['ISO','ISO3','ISO-Numeric','fips','Country','Capital','Area(in sq km)','Population',
'Continent','tld','CurrencyCode','CurrencyName','Phone','Postal Code Format',
'Postal Code Regex','Languages','geonameid','neighbours','EquivalentFipsCode'
]
countries = pd.read_csv(country_url, sep='\t',comment='#', dtype='str', names=names)
# Add missing ISO code for nambia
index = countries.query("ISO3 == 'NAM'").index
countries.at[index, 'ISO'] = 'NA'
countries['id'] = countries['ISO'] # standard id column to link nodes
countries.rename(columns={'ISO': 'iso'}, inplace=True)
countries.rename(columns={'ISO3': 'iso3'}, inplace=True)
countries.rename(columns={'ISO-Numeric': 'isoNumeric'}, inplace=True)
countries.rename(columns={'Country': 'name'}, inplace=True)
countries.rename(columns={'Population': 'population'}, inplace=True)
countries.rename(columns={'Area(in sq km)': 'areaSqKm'}, inplace=True)
countries.rename(columns={'geonameid': 'geonameId'}, inplace=True)
countries.rename(columns={'Continent': 'parentId'}, inplace=True)
countries = countries[['id','name','iso','iso3','isoNumeric', 'parentId', 'areaSqKm','geonameId', 'neighbours']].copy()
countries.fillna('', inplace=True)
countries.to_csv(NEO4J_IMPORT / "00e-GeoNamesCountry.csv", index=False)
def import_admin1():
admin1_url = 'https://download.geonames.org/export/dump/admin1CodesASCII.txt'
names = ['code', 'name', 'name_ascii', 'geonameid']
admin1 = pd.read_csv(admin1_url, sep='\t', dtype='str', names=names)
admin1 = admin1[['code', 'name_ascii', 'geonameid']]
admin1.rename(columns={'code': 'id'}, inplace=True) # standard id column to link nodes
admin1.rename(columns={'name_ascii': 'name'}, inplace=True)
admin1.rename(columns={'geonameid': 'geonameId'}, inplace=True)
admin1['code'] = admin1['id'].str.split('.', expand=True)[1]
admin1['parentId'] = admin1['id'].str.split('.', expand=True)[0]
admin1['name'] = admin1['name'].str.replace('Washington, D.C.', 'District of Columbia')
admin1 = admin1[['id','name','code','parentId', 'geonameId']]
admin1.fillna('', inplace=True)
admin1.to_csv(NEO4J_IMPORT / "00f-GeoNamesAdmin1.csv", index=False)
def import_admin2():
admin2_url = 'https://download.geonames.org/export/dump/admin2Codes.txt'
names = ['code', 'name', 'name_ascii', 'geonameid']
admin2 = pd.read_csv(admin2_url, sep='\t', dtype='str', names=names)
admin2 = admin2[['code', 'name_ascii', 'geonameid']]
admin2.rename(columns={'code': 'id'}, inplace=True) # standard id column to link nodes
admin2.rename(columns={'name_ascii': 'name'}, inplace=True)
admin2.rename(columns={'geonameid': 'geonameId'}, inplace=True)
admin2['parentId'] = admin2['id'].str.rsplit('.', 1, expand=True)[0]
admin2.loc[admin2['id'] == 'US.DC.001', 'name'] = 'District of Columbia'
admin2.loc[admin2['id'] == 'US.CA.075', 'name'] = '<NAME>'
admin2.to_csv(NEO4J_IMPORT / "00g-GeoNamesAdmin2.csv", index=False)
def get_location_id(country, admin1, admin2):
location = country
if admin1 != '':
location = location + '.' + admin1
if admin2 != '':
location = location + '.' + admin2
return location
def import_cities():
urls = ['https://download.geonames.org/export/dump/cities15000.zip', 'https://download.geonames.org/export/dump/cities5000.zip', 'https://download.geonames.org/export/dump/cities1000.zip', 'https://download.geonames.org/export/dump/cities500.zip']
names = [
'geonameid','name','asciiname','alternatenames','latitude','longitude','feature class',
'feature code','country code','cc2','admin1 code','admin2 code','admin3 code','admin4 code',
'population','elevation','dem','timezone','modification date'
]
dfs = []
for url in urls:
file_name = url.split('/')[-1].split('.')[0] + '.txt'
resp = urlopen(url)
zipfile = ZipFile(BytesIO(resp.read()))
city_df = pd.read_csv(zipfile.open(file_name), sep="\t", low_memory=False, names=names)
dfs.append(city_df)
city = pd.concat(dfs)
city = city[['geonameid', 'asciiname', 'country code', 'admin1 code', 'admin2 code']]
city.fillna('', inplace=True)
city.drop_duplicates('geonameid', inplace=True)
city.rename(columns={'geonameid': 'geonameId'}, inplace=True)
city['id'] = city['geonameId']
city.rename(columns={'asciiname': 'name'}, inplace=True)
city['parentId'] = city.apply(lambda row: get_location_id(row['country code'],
row['admin1 code'],
row['admin2 code']), axis=1)
city = city[['id', 'name', 'parentId', 'geonameId']]
city.fillna('', inplace=True)
city.to_csv(NEO4J_IMPORT / "00h-GeoNamesCity.csv", index=False)
def import_UNRegions():
url = "https://unstats.un.org/unsd/methodology/m49/overview"
df = pd.read_html(url, attrs={"id": "downloadTableEN"})[0]
df.rename(columns={
"Region Name": "UNRegion",
"Region Code": "UNRegionCode",
"Sub-region Name": "UNSubRegion",
"Sub-region Code": "UNSubRegionCode",
"Intermediate Region Name": "UNIntermediateRegion",
"Intermediate Region Code": "UNIntermediateRegionCode",
"ISO-alpha3 Code": "iso3",
}, inplace=True)
additions = pd.read_csv("/home/pseudo/Coding/GeoGraph/reference_data/UNRegionAdditions.csv")
additions.fillna('', inplace=True)
df = df.append(additions)
df = df.fillna("").astype(str)
df['UNRegionCode'] = 'm49:' + df['UNRegionCode']
df['UNSubRegionCode'] = 'm49:' + df['UNSubRegionCode']
df['UNIntermediateRegionCode'] = 'm49:' + df['UNIntermediateRegionCode']
# Export All
df.to_csv(NEO4J_IMPORT / "00k-UNAll.csv", index=False)
# Export Intermediate Regions
intermediateRegion = df[df['UNIntermediateRegion'] != '']
intermediateRegion.to_csv(NEO4J_IMPORT / "00k-UNIntermediateRegion.csv", index=False)
# Export Sub-regions
subRegion = df[(df['UNSubRegion'] != '') & (df['UNIntermediateRegion'] == '')]
subRegion.to_csv(NEO4J_IMPORT / "00k-UNSubRegion.csv", index=False)
# Export last
region = df[(df['UNSubRegion'] == '') & (df['UNIntermediateRegion'] == '')]
region.to_csv(NEO4J_IMPORT / "00k-UNRegion.csv", index=False)
def add_data():
"""
adds latitude, longitude, elevation, and population data from GeoNames
to Country, Admin1, Admin2, and City .csv files for ingestion into the Knowledge Graph
"""
country_url = 'https://download.geonames.org/export/dump/allCountries.zip'
content = requests.get(country_url)
zf = ZipFile(BytesIO(content.content))
for item in zf.namelist():
print("File in zip: "+ item)
# Intermediate data cached here
encoding = 'utf-8'
path = CACHE / 'allCountries.csv'
try:
with zf.open('allCountries.txt') as readfile:
with open(path, "w") as file_out:
writer = csv.writer(file_out)
for line in io.TextIOWrapper(readfile, encoding):
row = line.strip().split("\t")
if row[6] == 'A' or row[6] == 'P':
writer.writerow([row[0], row[4], row[5], row[14], row[15]])
except:
print('Download of allCountries.txt failed, using cached version of data')
columns = ['geonameId', 'latitude', 'longitude', 'population', 'elevation']
# If data download failed cached file from past run is used
df = pd.read_csv(path, names=columns, dtype='str', header=0)
df.fillna('', inplace=True)
df['population'] = df['population'].str.replace('0', '')
dfc = df[['geonameId', 'latitude', 'longitude', 'population']]
country = pd.read_csv(NEO4J_IMPORT / "00e-GeoNamesCountry.csv", dtype='str')
country = | pd.merge(country, dfc, on='geonameId', how='left') | pandas.merge |
import os
from getpass import getpass
import spotipy
from spotipy.oauth2 import SpotifyOAuth
import pandas as pd
import numpy as np
import scipy
from scipy.stats import beta as Beta
from scipy.stats import gamma
from scipy.stats import norm
import pystan
import pickle
import ast
from flask import Flask, render_template, request, jsonify, abort
PASTA = os.path.dirname(__file__)
def config():
redirect_uri=input("Redirect URI: ").strip()
client_id=getpass("Client ID: ").strip()
client_secret=getpass("Client secret: ").strip()
return(client_id, client_secret, redirect_uri)
try:
with open(os.path.join(PASTA, "config.txt"), "r") as config_file:
CLIENT_ID = config_file.readline().strip("\n").strip()
CLIENT_SECRET = config_file.readline().strip("\n").strip()
REDIRECT_URI = config_file.readline().strip("\n").strip()
except:
CLIENT_ID, CLIENT_SECRET, REDIRECT_URI = config()
with open(os.path.join(PASTA, "config.txt"), "w") as config_file:
linhas = [CLIENT_ID, CLIENT_SECRET, REDIRECT_URI]
config_file.writelines("\n".join(linhas))
class API_spotify:
def __init__(self):
self.scope = "user-read-recently-played user-modify-playback-state "\
"playlist-read-private playlist-read-collaborative playlist-modify-public"
self.sp = None
self.playlist = None
self.playlist_id = None
self.client_id = CLIENT_ID
self.client_secret = CLIENT_SECRET
self.redirect_uri = REDIRECT_URI
self.auth()
def auth(self):
if self.client_id is not None and \
self.client_secret is not None and \
self.redirect_uri is not None:
self.sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=self.scope,
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.redirect_uri))
def get_recently_played(self):
if self.sp is not None:
result = self.sp.current_user_recently_played(limit=10)
ids_recent = list(map(lambda x: x["track"]["id"], result["items"]))
feats_recent = self.sp.audio_features(ids_recent)
return feats_recent
def get_playlists(self):
result = self.sp.user_playlists(user=self.sp.me()["id"])
spotify = self.sp.user_playlists("spotify")
playlists = result["items"]
while result["next"]:
result = self.sp.next(result)
playlists.extend(result["items"])
playlists.extend(spotify["items"])
return playlists
def set_playlist(self, playlist):
achou = False
playlists = self.get_playlists()
for p in playlists:
if p["name"] == playlist:
achou = True
break
if achou:
self.playlist = playlist
self.playlist_id = p["id"]
def get_songs_from_playlist(self):
if self.sp is not None and \
self.playlist is not None:
result = self.sp.playlist_tracks(playlist_id=self.playlist_id)
musicas = result["items"]
ids_playlist = list(map(lambda x: x["track"]["id"], musicas))
feats_playlist = self.sp.audio_features(ids_playlist)
while result["next"]:
result = self.sp.next(result)
musicas = result["items"]
ids_playlist = list(map(lambda x: x["track"]["id"], musicas))
feats_playlist.extend(self.sp.audio_features(ids_playlist))
return feats_playlist
def create_playlist(self, tracks):
playlists = self.get_playlists()
playlists_names = list(map(lambda x: x["name"], playlists))
# Excluir a playlist antiga se ela existir
if "SpotiBayes" in playlists_names:
for p in playlists:
if p["name"] == "SpotiBayes":
playlist_antiga = p
break
self.sp.current_user_unfollow_playlist(playlist_antiga["id"])
# Criando playlist nova
playlist_nova = self.sp.user_playlist_create(self.sp.me()["id"], "SpotiBayes")
# Limite de 100 tracks por request
tracks_faltantes = tracks
while len(tracks_faltantes) > 100:
tracks = tracks_faltantes[0:100]
tracks_faltantes = tracks_faltantes[100:]
self.sp.user_playlist_add_tracks(self.sp.me()["id"], playlist_nova["id"], tracks)
self.sp.user_playlist_add_tracks(self.sp.me()["id"], playlist_nova["id"], tracks_faltantes)
dist_beta_infl_zero = """
data {
int<lower=0> n;
int<lower=0> m;
int uns_zeros[3,n];
vector[m] musicas;
}
parameters {
simplex[3] theta;
real<lower=0> alpha;
real<lower=0> beta;
}
model {
for (i in 1:n)
uns_zeros[,i] ~ multinomial(theta);
musicas ~ beta(alpha, beta);
}
"""
dist_gama = """
data {
int<lower=0> n;
vector[n] musicas;
}
parameters {
real<lower=0> alpha;
real<lower=0> beta;
}
model {
musicas ~ gamma(alpha, beta);
}
"""
dist_normal = """
data {
int<lower=0> n;
vector[n] musicas;
}
parameters {
real mu;
real<lower=0> sigma;
}
model {
musicas ~ normal(mu, sigma);
}
"""
DISTRIBUICOES = {"beta_infl_zero": dist_beta_infl_zero,
"gama": dist_gama,
"normal": dist_normal}
VARIAVEIS = {"danceability": "beta_infl_zero",
"energy": "beta_infl_zero",
"speechiness": "beta_infl_zero",
"liveness": "beta_infl_zero",
"valence": "beta_infl_zero",
"loudness": "normal", # "gama",
"tempo": "normal",
"acousticness": "beta_infl_zero",
"instrumentalness": "beta_infl_zero"}
def preprocess(df):
df2 = df.loc[:,VARIAVEIS.keys()]
# df2.loc[:,"loudness"] = -df2.loudness # invertendo os valores negativos
return df2
def carregar_modelo(dist):
try:
sm = pickle.load(open(os.path.join(PASTA, dist+".pkl"), 'rb'))
return sm
except:
sm = pystan.StanModel(model_code=DISTRIBUICOES[dist], verbose=False)
with open(os.path.join(PASTA, dist+".pkl"), 'wb') as f:
pickle.dump(sm, f)
return sm
def rodar_stan(var, dist, df):
if dist == "beta_infl_zero":
uns_zeros = np.c_[np.array(df.loc[:,var].between(0.01, 0.99)).astype(int),
np.array(df.loc[:,var] > 0.99).astype(int),
np.array(df.loc[:,var] < 0.01).astype(int)]
dados_stan = {"n": len(df.index),
# m -> numero de musicas que nao sao 0 nem 1
"m": len(df.loc[df.loc[:,var].between(0.01, 0.99)].index),
# uns e zeros -> primeira coluna: nem 0 nem 1,
# segunda coluna: uns
# terceira coluna: zeros
"uns_zeros": uns_zeros.transpose(),
# musicas -> musicas que nao sao 0 nem 1
"musicas": df.loc[df.loc[:,var].between(0.01, 0.99), var]
}
else:
uns_zeros = np.c_[np.ones(len(df.index)), np.zeros(len(df.index)), np.zeros(len(df.index))]
dados_stan = {"n": len(df.index),
"musicas": df.loc[:,var]
}
sm = carregar_modelo(dist)
fit = sm.sampling(data=dados_stan, iter=15000, warmup=5000, seed=9326584,
chains=1, control = {"adapt_delta": 0.99})
odict = fit.extract()
# Pegando só os parametros da playlist
for par in odict.copy():
if par.startswith("lp"):
odict.pop(par)
# o theta tem 3 colunas - isso vai dar pau depois
tamanhos = list(map(lambda x: [1 if len(x.shape) == 1 else x.shape[1]][0], odict.values()))
# transformando a array de 3 colunas (theta)
# em 3 arrays de 1 coluna
for i in range(len(odict)):
if tamanhos[i] > 1:
chave = list(odict)[i]
arr = odict.pop(chave)
for j in range(tamanhos[i]):
odict.update({str(chave)+str(j): arr[:,j]})
result = pd.DataFrame(odict, columns=odict.keys())
result_dict = result.to_dict(orient="list")
# verificando se as medias das musicas
# estão além dos limites de 95% das playlists
if dist == "beta_infl_zero":
alpha = fit.summary()["summary"][3,0]
beta = fit.summary()["summary"][4,0]
theta0 = fit.summary()["summary"][0,0]
theta1 = fit.summary()["summary"][1,0]
theta2 = fit.summary()["summary"][2,0]
low = Beta.ppf(0.025/theta0, alpha, beta)
upp = Beta.ppf(1 - 0.025/theta0, alpha, beta)
elif dist == "gama":
alpha = fit.summary()["summary"][0,0]
beta = fit.summary()["summary"][1,0]
low = gamma.ppf(0.025, alpha, scale=1/beta)
upp = gamma.ppf(1 - 0.025, alpha, scale=1/beta)
elif dist == "normal":
mu = fit.summary()["summary"][0,0]
sigma = fit.summary()["summary"][1,0]
low = norm.ppf(0.025, mu, sigma)
upp = norm.ppf(1 - 0.025, mu, sigma)
medias = dados_stan["musicas"].to_numpy()
bools_incompleto = np.logical_and(np.greater_equal(medias, low),
np.less_equal(medias, upp)) # Faltam os uns e zeros
k = 0
bools = []
for j in range(uns_zeros.shape[0]):
if uns_zeros[j, 0] == 1:
bools.append(bools_incompleto[k])
k += 1
elif uns_zeros[j,1] == 1 and theta1 >= 0.05:
bools.append(True)
elif uns_zeros[j,2] == 1 and theta2 >= 0.05:
bools.append(True)
else:
bools.append(False)
# Calculando as médias
if dist == "beta_infl_zero":
theta00 = fit.summary()["summary"][0,0]
theta01 = fit.summary()["summary"][1,0]
alfa0 = fit.summary()["summary"][3,0]
beta0 = fit.summary()["summary"][4,0]
media = theta00*(alfa0/(alfa0+beta0))+theta01
elif dist == "gama":
alfa0 = fit.summary()["summary"][0,0]
beta0 = fit.summary()["summary"][1,0]
media = alfa0/beta0
elif dist == "normal":
media = fit.summary()["summary"][0,0]
# Pegando diagnósticos
rhat = {}
neff = {}
if dist == "beta_infl_zero":
rhat.update({"theta0": fit.summary()["summary"][0,9]})
rhat.update({"theta1": fit.summary()["summary"][1,9]})
rhat.update({"theta2": fit.summary()["summary"][2,9]})
rhat.update({"alpha": fit.summary()["summary"][3,9]})
rhat.update({"beta": fit.summary()["summary"][4,9]})
neff.update({"theta0": fit.summary()["summary"][0,8]})
neff.update({"theta1": fit.summary()["summary"][1,8]})
neff.update({"theta2": fit.summary()["summary"][2,8]})
neff.update({"alpha": fit.summary()["summary"][3,8]})
neff.update({"beta": fit.summary()["summary"][4,8]})
elif dist == "gama":
rhat.update({"alpha": fit.summary()["summary"][0,9]})
rhat.update({"beta": fit.summary()["summary"][1,9]})
neff.update({"alpha": fit.summary()["summary"][0,8]})
neff.update({"beta": fit.summary()["summary"][1,8]})
elif dist == "normal":
rhat.update({"mu": fit.summary()["summary"][0,9]})
rhat.update({"sigma": fit.summary()["summary"][1,9]})
neff.update({"mu": fit.summary()["summary"][0,8]})
neff.update({"sigma": fit.summary()["summary"][1,8]})
return result_dict, low, media, upp, bools, rhat, neff
def get_posterioris(api, playlist=None):
if playlist is not None:
api.set_playlist(playlist)
feats_playlist = pd.DataFrame.from_dict(api.get_songs_from_playlist())
dados = preprocess(feats_playlist)
else:
dados = preprocess(pd.DataFrame.from_dict(api.get_recently_played()))
fits = {}
lows = {}
medias = {}
upps = {}
bools_dic = {}
rhats = {}
neffs = {}
for var in VARIAVEIS:
result_dict, low, media, upp, bools, rhat, neff = rodar_stan(var, VARIAVEIS[var], dados)
fits.update({var.title(): result_dict})
lows.update({var: low})
medias.update({var: media})
upps.update({var: upp})
bools_dic.update({var: bools})
rhats.update({var: rhat})
neffs.update({var: neff})
if playlist is not None:
feats_playlist["título"] = feats_playlist.id.map(lambda row: api.sp.track(row)["name"])
feats_playlist["artista"] = feats_playlist.id.map(lambda row: ", ".join([artista["name"] for artista in api.sp.track(row)["artists"]]))
else:
feats_playlist = None
diagnostico = {"rhat": rhats, "neff": neffs}
return fits, lows, medias, upps, bools_dic, feats_playlist, diagnostico
if not os.path.isfile(os.path.join(PASTA, ".cache")):
API_spotify()
sapi = API_spotify()
template_folder = os.path.join(PASTA, 'templates')
static_folder = os.path.join(PASTA, 'static')
app = Flask(__name__, template_folder=template_folder, static_folder=static_folder)
@app.route("/")
def home():
playlists = sapi.get_playlists()
return render_template("index.html", playlists=playlists)
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/get_posterior")
def posterior():
playlist = request.args.get("playlist")
fits_file = "fits_" + playlist + ".csv"
summary_file = "summary_" + playlist + ".csv"
dentro_file = "dentro_" + playlist + ".csv"
rhat_file = "rhat_" + playlist + ".csv"
neff_file = "neff_" + playlist + ".csv"
if os.path.isfile(os.path.join(PASTA, fits_file)) and \
os.path.isfile(os.path.join(PASTA, summary_file)) and \
os.path.isfile(os.path.join(PASTA, dentro_file)):
fits = pd.read_csv(os.path.join(PASTA, fits_file), index_col=0).to_dict()
for linha in fits:
for coluna in list(fits[linha].keys()):
try:
fits[linha][coluna] = ast.literal_eval(fits[linha][coluna])
except ValueError:
fits[linha].pop(coluna)
summary = pd.read_csv(os.path.join(PASTA, summary_file), index_col=0)
dentro = pd.read_csv(os.path.join(PASTA, dentro_file), index_col=0)
dentro = dentro.transpose().to_json()
summary = summary.transpose().to_json()
result = {"fits": fits,
"summary": summary,
"dentro": dentro}
return jsonify(result)
arquivos = os.listdir(PASTA)
csv = [arquivo for arquivo in arquivos if arquivo.endswith(".csv")]
for arquivo in csv:
os.remove(os.path.join(PASTA, arquivo))
fits, lows, medias, upps, bools_dic, dados, diagnostico = get_posterioris(sapi, playlist)
# dados.loc[:,"loudness"] = -dados.loudness # invertendo os valores negativos
bools_df = pd.DataFrame(bools_dic)
bools_df.columns = [col + '_bool' for col in bools_df.columns]
dentro = pd.concat([dados, bools_df], axis=1)
dentro["Total"] = ((dentro.filter(regex="_bool$", axis=1)[dentro==True].sum(axis=1)/
dentro.filter(regex="_bool$", axis=1).count(axis=1)))
dentro["Total"] = (dentro.Total > 2/3)
# Criando a playlist com as músicas selecionadas
tracks = dentro.loc[dentro.Total==True, "id"]
if len(tracks) >= 1:
sapi.create_playlist(tracks)
# Fazendo o summary
lows = pd.DataFrame(lows, index=["Limite inferior"])
medias = | pd.DataFrame(medias, index=["Media"]) | pandas.DataFrame |
from ..dataset import Dataset
import pandas as pd
from .datasets import (get_fake_dataset, get_fake_dataset2, get_fake_dataset3,
get_fake_dataset4, get_fake_dataset6,
get_nans_dataset, get_fake_multi_index_dataset)
def test_filter_outliers():
df = get_fake_dataset()
df = df.filter_outliers_by_percent(fop=20, scope='3', drop=False)
assert pd.isnull(df['3']).all()
def test_filter_outliers_inplace():
df = get_fake_dataset()
df.filter_outliers_by_percent(fop=20, scope='3',
drop=False, inplace=True)
assert | pd.isnull(df['3']) | pandas.isnull |
import numpy as np
import pandas as pd
import pytest
from pandera import (
Column, DataFrameSchema, Index, SeriesSchema, Bool, Category, Check,
DateTime, Float, Int, Object, String, Timedelta, errors)
def test_dataframe_schema():
schema = DataFrameSchema(
{
"a": Column(Int,
Check(lambda x: x > 0, element_wise=True)),
"b": Column(Float,
Check(lambda x: 0 <= x <= 10, element_wise=True)),
"c": Column(String,
Check(lambda x: set(x) == {"x", "y", "z"})),
"d": Column(Bool,
Check(lambda x: x.mean() > 0.5)),
"e": Column(Category,
Check(lambda x: set(x) == {"c1", "c2", "c3"})),
"f": Column(Object,
Check(lambda x: x.isin([(1,), (2,), (3,)]))),
"g": Column(DateTime,
Check(lambda x: x >= pd.Timestamp("2015-01-01"),
element_wise=True)),
"i": Column(Timedelta,
Check(lambda x: x < pd.Timedelta(10, unit="D"),
element_wise=True))
})
df = pd.DataFrame(
{
"a": [1, 2, 3],
"b": [1.1, 2.5, 9.9],
"c": ["z", "y", "x"],
"d": [True, True, False],
"e": | pd.Series(["c2", "c1", "c3"], dtype="category") | pandas.Series |
import itertools
from math import ceil
import os
import random
import json
import re
import numpy as np
from interspeechmi.constants import MODELS_DIR
from interspeechmi.nlp.constants import (
CLIENT_CODES,
HF_DATASETS_CACHE_DIR,
THERAPIST_CODES,
)
import logging
import pandas as pd
from typing import Any, Callable, Dict, List, NamedTuple
from interspeechmi.data_handling.constants import (
ANNO_MI_NORMALIZED_AUGMENTED_PATH,
ANNO_MI_NORMALIZED_PATH,
ANNO_MI_NORMALIZED_PATH,
ANNO_MI_DATA_DIR
)
from datasets import load_dataset
from interspeechmi.standalone_utils import (
json_pretty_str,
json_pretty_write
)
from sklearn.model_selection import StratifiedKFold, train_test_split
import shutil
import torch
from torch.nn.utils.rnn import pad_sequence
from tqdm import tqdm
from transformers import (
TrainerCallback,
TrainingArguments
)
from transformers.trainer_callback import (
TrainerControl,
TrainerState
)
logger = logging.getLogger(__name__)
class MyTrainerCallback(TrainerCallback):
"""
Callback used to control the training process
Here we basically do two things:
1) make sure we use the local logger everytime HF wants to log something
This is especially useful when you're trying to log to file -- smth that
HF doesn't do for you automatically, for some reason
2) make the trainer leave a "training is complete" flag
in the output folder and document the logged evaluation history.
"""
def __init__(
self,
remove_optimizer_of_best_checkpoint_on_train_end: bool=False
) -> None:
super().__init__()
self.remove_optimizer_of_best_checkpoint_on_train_end = remove_optimizer_of_best_checkpoint_on_train_end
def on_log(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
logs=None,
**kwargs
):
control.should_log = False
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
logger.info(logs) # using your custom logger
def on_train_end(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs
):
"""
At the end of a training cycle, produce a "training is complete" flag,
which is an empty file named "training_is_complete.flag",
in the output folder
Also, write the logged history to "log_history.json"
in the output folder
"""
# Get the log history
output_dir = args.output_dir
metric = args.metric_for_best_model
performances = [
log[f"eval_{metric}"] for log in state.log_history if (
f"eval_{metric}" in log.keys()
)
]
if args.greater_is_better:
best_performance = max(performances)
else:
best_performance = min(performances)
# Keep only the best checkpoint and delete the others
best_checkpoint = None
for log in state.log_history:
if f"eval_{metric}" in log.keys():
checkpoint_performance = log[f"eval_{metric}"]
step = log["step"]
# Keep the best checkpoint
# It is possible that multiple checkpoints have
# the same best metric value on the dev set.
# In such cases, we just keep the earliest best checkpoint
if (
best_checkpoint is None and
checkpoint_performance == best_performance
):
best_checkpoint = step
# Delete other checkpoints
else:
checkpoint_dir = os.path.join(
output_dir, f"checkpoint-{step}"
)
if os.path.isdir(checkpoint_dir):
shutil.rmtree(checkpoint_dir)
# Rename the folder of the best checkpoint as "best_checkpoint"
assert best_checkpoint is not None
best_checkpoint_dir = os.path.join(
output_dir, f"checkpoint-{best_checkpoint}"
)
assert os.path.isdir(best_checkpoint_dir)
os.rename(
best_checkpoint_dir,
os.path.join(
output_dir,
"best_checkpoint"
)
)
best_checkpoint_dir = os.path.join(output_dir, "best_checkpoint")
# Remove the optimizer of the best checkpoint, because it's quite big
# and not really useful when the training is complete
if self.remove_optimizer_of_best_checkpoint_on_train_end:
optimizer_path = os.path.join(best_checkpoint_dir, "optimizer.pt")
os.remove(optimizer_path)
# Persist the log history
eval_log_history_path = os.path.join(output_dir, "log_history.json")
json_pretty_write(state.log_history, eval_log_history_path)
# Create "training is complete" flag
training_is_complete_flag_path = os.path.join(args.output_dir, "training_is_complete.flag")
with open(training_is_complete_flag_path, 'w') as training_is_complete_flag_writer:
training_is_complete_flag_writer.write("\n")
# # Just in case any checkpoint directory remains, delete it
# checkpoint_pattern = r"^checkpoint-\d+$"
# for dir_content_name in os.listdir(output_dir):
# dir_content_path = os.path.join(output_dir, dir_content_name)
# if (
# os.path.isdir(dir_content_path) and
# re.match(checkpoint_pattern, dir_content_name)
# ):
# shutil.rmtree(dir_content_path)
def build_annomi_context_and_response_pairs_df(
anno_mi_data_path: str=ANNO_MI_NORMALIZED_PATH,
conv_history_window: int=1,
prepend_codes_in_context: str=None,
prepend_code_in_response: bool=False,
context_utterances_connector: str=" ",
utterance_condition: Callable[[NamedTuple], bool]=None,
mi_quality_filter: str=None,
return_response_codes: bool=False,
random_seed: int=42
):
"""
Convert transcripts into a DataFrame of (conversation context, response) pairs
For example, if a conversation from the transcripts looks like
Client: "How are you?",
Therapist: "Good. You?",
Client: "All good",
Then 2 pairs will be generated, assuming a window size of 2:
1) Context: "<client> How are you?", Response: "<therapist> Good. You?"
2) Context: "<client> How are you? <therapist> Good. You?", Response: "<client> All good"
A DataFrame with 3 columns ["anno_mi_row_id", "context, "response"] will be returned,
where each row stores a (context, response) pair and the "anno_mi_row_id" column value
indicates the row ID of the last utterance.
If `prepend_code` is `True`, we add the therapist behaviour / client talk type
between the interlocutor identifier and actual utterance. The example above would become
1) Context: "<client> <neutral> How are you?", Response: "<therapist> <question> Good. You?"
2) Context: "<client> <neutral> How are you? <therapist> <question> Good. You?", Response: "<client> <neutral> All good"
"""
assert prepend_codes_in_context in [
"no", "therapist_oracle", "therapist_predicted",
"client_oracle", "therapist_and_client_oracle",
"client_predicted", "therapist_and_client_predicted",
"therapist_random", "client_random", "therapist_and_client_random"
]
logger.info("Creating <context, response, response_code> pairs ...")
rand = random.Random(random_seed)
if prepend_codes_in_context in [
"therapist_predicted", "client_predicted",
"therapist_and_client_predicted"
]:
anno_mi_predicted_codes = get_predicted_codes_for_annomi(
mi_quality_filter=mi_quality_filter,
get_predicted_therapist_codes=(
prepend_codes_in_context in [
"therapist_predicted", "therapist_and_client_predicted"
]
),
get_predicted_client_codes=(
prepend_codes_in_context in [
"client_predicted", "therapist_and_client_predicted"
]
),
)
else:
anno_mi_predicted_codes = None
context_strs = []
response_strs = []
response_anno_mi_row_ids = []
response_codes = []
dialogue_ids = []
dialogue_mi_qualities = []
def get_code_processed_utt(interlocutor, code, utt_text):
if prepend_codes_in_context in [
"therapist_oracle", "therapist_predicted",
"therapist_and_client_oracle",
"therapist_and_client_predicted",
"therapist_random", "therapist_and_client_random"
]:
prepend_therapist_codes_in_context = True
elif prepend_codes_in_context in [
"no", "client_oracle", "client_predicted",
"client_random"
]:
prepend_therapist_codes_in_context = False
else:
raise NotImplementedError(
f"""
Unimplemeted way of prepending codes in context:
{prepend_codes_in_context}
"""
)
if prepend_codes_in_context in [
"client_oracle", "client_predicted",
"therapist_and_client_oracle",
"therapist_and_client_predicted",
"client_random", "therapist_and_client_random"
]:
prepend_client_codes_in_context = True
elif prepend_codes_in_context in [
"no", "therapist_oracle", "therapist_predicted",
"therapist_random"
]:
prepend_client_codes_in_context = False
else:
raise NotImplementedError(
f"""
Unimplemeted way of prepending codes in context:
{prepend_codes_in_context}
"""
)
if interlocutor == "therapist":
if prepend_therapist_codes_in_context:
assert code in THERAPIST_CODES
code_processed_utt = "<{}>~<{}>{}".format(
interlocutor, code, utt_text
).strip()
else:
code_processed_utt = "<{}>{}".format(
interlocutor, utt_text
).strip()
elif interlocutor == "client":
if prepend_client_codes_in_context:
assert code in CLIENT_CODES
code_processed_utt = "<{}>~<{}>{}".format(
interlocutor, code, utt_text
).strip()
else:
code_processed_utt = "<{}>{}".format(
interlocutor, utt_text
).strip()
else:
raise ValueError(f"Unknown interlocutor: {interlocutor}")
return code_processed_utt
# Use a dict to keep track of the sampled codes
# So that the same utterance in overlapping conversation windows
# will have the same sampled code, which is more consistent
random_utt_codes = dict() # {anno_mi_row_id: sampled_code}
for context_and_response in iter_annomi_utterance_with_context(
num_preceding_utterances_to_return=conv_history_window,
anno_mi_data_path=anno_mi_data_path,
skip_no_context_utterances=True,
utterance_condition=utterance_condition,
mi_quality=mi_quality_filter
):
assert isinstance(context_and_response, pd.DataFrame)
oracle_context_and_response_codes = []
random_context_and_response_codes = []
therapist_behaviour_rephrasing_dict = {
# "question": "asking",
"question": "question",
# "therapist_input": "informing",
"therapist_input": "input",
# "reflection": "listening",
"reflection": "reflection",
"other": "other"
}
if prepend_codes_in_context in [
"therapist_predicted", "client_predicted",
"therapist_and_client_predicted"
]:
predicted_context_and_response_codes = []
for row in context_and_response.itertuples():
therapist_behaviour = getattr(row, "main_therapist_behaviour")
client_talk_type = getattr(row, "client_talk_type")
assert (
(therapist_behaviour == "n/a" and client_talk_type != "n/a") or
(client_talk_type == "n/a" and therapist_behaviour != "n/a")
)
if therapist_behaviour == "n/a":
oracle_context_and_response_codes.append(client_talk_type)
if row.Index not in random_utt_codes.keys():
random_utt_codes[row.Index] = rand.sample(CLIENT_CODES, 1)[0]
else:
oracle_context_and_response_codes.append(
therapist_behaviour_rephrasing_dict[therapist_behaviour]
)
if row.Index not in random_utt_codes.keys():
random_utt_codes[row.Index] = rand.sample(THERAPIST_CODES, 1)[0]
random_context_and_response_codes.append(random_utt_codes[row.Index])
if prepend_codes_in_context in [
"therapist_predicted", "client_predicted",
"therapist_and_client_predicted"
]:
if row.Index not in anno_mi_predicted_codes.keys():
if prepend_codes_in_context == "therapist_predicted":
if getattr(row, "interlocutor") == "therapist":
raise ValueError(
f"""
AnnoMI row ID {row.Index} not found
in code predictions
"""
)
elif prepend_codes_in_context == "client_predicted":
if getattr(row, "interlocutor") == "client":
raise ValueError(
f"""
AnnoMI row ID {row.Index} not found
in code predictions
"""
)
else:
raise ValueError(
f"""
AnnoMI row ID {row.Index} not found
in code predictions
"""
)
predicted_context_and_response_codes.append(None)
else:
assert (
anno_mi_predicted_codes[row.Index]["ground_truth_code"] ==
oracle_context_and_response_codes[-1]
)
predicted_context_and_response_codes.append(
anno_mi_predicted_codes[row.Index]["predicted_code"]
)
if prepend_codes_in_context in [
"therapist_predicted", "client_predicted",
"therapist_and_client_predicted"
]:
assert (
len(oracle_context_and_response_codes) ==
len(predicted_context_and_response_codes)
)
codes_to_use_in_context = predicted_context_and_response_codes[:-1]
elif prepend_codes_in_context in [
"therapist_random", "client_random",
"therapist_and_client_random"
]:
assert (
len(oracle_context_and_response_codes) ==
len(random_context_and_response_codes)
)
codes_to_use_in_context = random_context_and_response_codes[:-1]
else:
codes_to_use_in_context = oracle_context_and_response_codes[:-1]
context_str = context_utterances_connector.join([
get_code_processed_utt(interlocutor, code, utt_text) \
for interlocutor, code, utt_text in zip(
context_and_response["interlocutor"].iloc[:-1],
codes_to_use_in_context,
context_and_response["utterance_text"].iloc[:-1]
)
])
response_interlocutor = context_and_response["interlocutor"].iloc[-1]
response_code = oracle_context_and_response_codes[-1]
response_text = context_and_response["utterance_text"].iloc[-1]
dialogue_mi_quality = context_and_response["mi_quality"].iloc[-1]
response_str = f"<{response_interlocutor}>"
# Basically, if the dataset contains both high- and low-quality-MI
# conversations, we need to use a special label at the beginning
# of the response string of a therapist response, because high-
# and low-quality-MI therapists are different.
# This is not needed for client responses, because the client in
# a high-quality session should not be too different from that
# in a low-quality session
if mi_quality_filter is None and response_interlocutor == "therapist":
response_str += "~<{}>".format(
"good" if dialogue_mi_quality == "high" else "bad"
)
if prepend_code_in_response:
response_str += f"~<{response_code}>"
response_str += response_text
response_str = response_str.strip()
response_anno_mi_row_id = context_and_response.index[-1]
dialogue_id = context_and_response.iloc[-1]["transcript_id"]
context_strs.append(context_str)
response_strs.append(response_str)
response_anno_mi_row_ids.append(response_anno_mi_row_id)
response_codes.append(response_code)
dialogue_ids.append(dialogue_id)
dialogue_mi_qualities.append(dialogue_mi_quality)
annomi_context_and_response_pairs_dict = {
"anno_mi_row_id": response_anno_mi_row_ids,
"dialogue_id": dialogue_ids,
"context": context_strs,
"response": response_strs,
"mi_quality": dialogue_mi_qualities,
}
if return_response_codes:
annomi_context_and_response_pairs_dict["response_code"] = response_codes
return | pd.DataFrame.from_dict(annomi_context_and_response_pairs_dict) | pandas.DataFrame.from_dict |
'''
Created on 22-Feb-2019
@author: <NAME>,
Junior Research Fellow (PhD Student),
National Centre fo Microbial Resource,
National Centre for Cell Science,
Pune, Maharshtra, India.
'''
import pandas as pd
import numpy as np
import subprocess
import os
import re
import sys
def baseDir():
"""Return baseDir of installation
"""
return os.path.dirname(os.path.abspath(__file__))
def blast(query,db,in_numCores,cwd):
"""
Do BLAST with provided query and database
Args:
query: Query file
db: db path/name for doing blast
in_numCores: Number of cores to use for BLAST
in_percIdentCutOff: Percent identity cutoff
cwd: Current working directory
Returns:
None
"""
if(sys.platform in ['darwin','linux','cygwin']):
cmd = "blastn -out " + os.path.join(cwd,"out.blast") + " -outfmt 6 -query " + query + " -db " + db + " -num_threads " + in_numCores + " -max_target_seqs 1"
elif(sys.platform == 'win32'):
cmd = "blastn.exe -out " + os.path.join(cwd,"out.blast") + " -outfmt 6 -query " + query + " -db " + db + " -num_threads " + in_numCores + " -max_target_seqs 1"
subBlast = subprocess.Popen(cmd,stdin=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
output,error = subBlast.communicate()
if(subBlast.returncode != 0):
exit(error)
return None
def selectBlastHits_assignGenus_subsetOtuTable(blastOut,otuTable,blastcutoff):
"""
Select best hits from BLAST output and assign taxonmy with respect to given identity cutoff
Args:
blastOut: Filtered blast output file (outfmt 6)
otuTable: OTU table
blastcutoff: percent identity cutoff to assign genus to the sequences
Returns:
DataFrame: OTU/ASV table containg OTUs/ASVs which have assigned taxonomy through BLAST
"""
df = pd.read_csv(blastOut,sep='\t',index_col=0,header=None)
df = df.groupby(level=0).max()
df = df.loc[df[2]>=blastcutoff]
if(df.empty):
exit('None of OTU/ASV sequences passed the given percent identity cut-off')
otuId_tax_dict = dict(zip(df.index,df[1].str.split("_",expand=True)[1]))
df = pd.read_csv(otuTable,sep="\t",index_col=0)
df.columns = df.columns.astype(str)
df = df.reindex(list(otuId_tax_dict.keys()))
df["taxonomy"] = list(otuId_tax_dict.values())
df = df.groupby(["taxonomy"]).sum()
return otuId_tax_dict,df
def makeTable16S(df,func,taxonomyList):
"""
Consolidate the 16S rRNA copy number table with respect to taxonomy
Args:
df: Dataframe of 16S rRNA copy numner
func: mean/mode/median to be taken
taxonomyList: list of taxonomy
Returns:
DataFrame: consolidated dataframe for organisms provided in taxonomy list
"""
#make table
temp_dict = dict()
for tax in taxonomyList:
num = round(float(df[df.index.str.contains(tax,na=False)].mean()),2)
if(num == 0):
temp_dict[tax] = 1
else:
temp_dict[tax] = num
df_consolidated = pd.DataFrame.from_dict(temp_dict,orient="index")
del(temp_dict)
return df_consolidated
def makeKOTable(df,abundData,coreNum):
"""
Consolidate the KO copy number table with respect to OTU table
Args:
df: Gene copy number table (dataframe)
abundData: OTU abundance table (output of selectBlastHits_assignGenus_subsetOtuTable)
coreNum: value in range 0 to 1. If a gene present in coreNum*genus, then it will be considered as core gene.
Returns:
DataFrame: Consolidated gene copy number table (dataframe)
"""
# old
taxonomyList = list(abundData.index)
dfToReturn = pd.DataFrame()
for tax in taxonomyList:
temp_df = df[df.index.str.contains(tax,na=False)]
n = round(temp_df.shape[0]*coreNum)
temp_df = temp_df[temp_df.columns[temp_df.astype(bool).sum()>=n]]
median_series = temp_df.mean()
median_series[median_series.between(0,1,False)] = 1
#median_df = pd.Series.to_frame(median_series).transpose().round()
median_df = pd.Series.to_frame(median_series).transpose()
dfToReturn = dfToReturn.append(median_df, ignore_index = True,sort=False)
dfToReturn.index = taxonomyList
#replace NA with 0
dfToReturn = dfToReturn.fillna(0)
return dfToReturn
def addAnnotations(metagenomeDf,keggFile):
"""
Add Kegg Annotations to predicted metagenome profile
Args:
metagenomeDf: predicted metagenome profile DataFrame
Returns: Predicted metagenome with KEGG annotations
"""
# read kegg annotations
kodf = pd.read_csv(keggFile, sep="\t", index_col=0,engine='python')
metagenomeDf = metagenomeDf.join(kodf)
return metagenomeDf
def summarizeByFun(metagenomeDf,group):
"""
Consolidate on the basis of group
Args:
metagenomeDf: Annotated Metagenme matrix
group: string by which dataFrame is to be categorized
Returns: Consolidated DataFrame
"""
return metagenomeDf.groupby(group).mean()
def runMinPath(metagenomeDf,funpredPath,outPath,typeOfPrediction):
"""
Run MinPath
Args:
metagenomeDf: Predicted Meatagenome matrix
funpredPath: Path of funPred
outPath: path to store files
typeOfPrediction: kegg or metacyc depending on type of input KO or EC
Returns:
DataFrame: Pruned metagenome content dataframe
"""
#make input for minPath and run MinPath
if(typeOfPrediction == "kegg"):
minpathOutFiles = []
for sampleName in metagenomeDf.columns.difference(['Pathway_Module','A','B','C','EC']):
minPtahInFile = os.path.join(outPath,sampleName + '_minpath_in.txt')
minpathOutFiles.append(os.path.join(outPath,sampleName + '_minpath.out.details'))
#create input file and run MinPath
minPtahInFile_fh = open(minPtahInFile,"w")
minPtahInFile_fh.writelines([str(i) + "\t" + str(j) + "\n" for i,j in enumerate(list(metagenomeDf[metagenomeDf[sampleName]>0].index))])
if(sys.platform == 'linux'):
cmd = "python3 " + os.path.join(funpredPath,'MinPath1.4_micfunpred.py') + " " + funpredPath + " " + outPath + " -ko " + minPtahInFile + " -report " + os.path.join(outPath,sampleName + '_minpath.out') + " -details " + os.path.join(outPath,sampleName + '_minpath.out.details')
elif(sys.platform == 'win32'):
cmd = "python.exe " + os.path.join(funpredPath,'MinPath1.4_micfunpred.py') + " " + funpredPath + " " + outPath + " -ko " + minPtahInFile + " -report " + os.path.join(outPath,sampleName + '_minpath.out') + " -details " + os.path.join(outPath,sampleName + '_minpath.out.details')
a = os.popen(cmd).read()
#create pathway abundance dataframe from all files
metagenomeDf_reindexed = metagenomeDf.copy()
metagenome_daraframes = []
sampleName_list = []
annotation_dataframe = pd.DataFrame(columns=['KO','Pathway'])
for minpath_out in minpathOutFiles:
kos_present = []
sampleName = os.path.basename(minpath_out).split('_minpath')[0]
sampleName_list.append(sampleName)
# read KOs present as per minpath
iFH = open(minpath_out,"r")
for line in iFH.readlines():
# pathways
matchObj = re.match("^path.*\#\s(.*)",line)
if(matchObj):
pathway = matchObj.group(1)
# KOs
matchObj = re.search("(K\d+)",line)
if(matchObj):
ko = matchObj.group(1)
# create a lsit of KOs present and annotation dataframe
kos_present.append(ko)
annotation_dataframe = annotation_dataframe.append({'KO':ko,'Pathway':pathway},ignore_index=True)
kos_present = set(kos_present)
# append dataframe to dataframe list
df_temp = metagenomeDf_reindexed.loc[kos_present][sampleName]
metagenome_daraframes.append(df_temp)
# merge all dataframes
df_kos = pd.concat(metagenome_daraframes,axis=1).fillna(0)
# add annotation
annotation_dataframe = annotation_dataframe.drop_duplicates()
df_kos_annotated = pd.merge(df_kos,annotation_dataframe,left_index=True,right_on='KO')
df_pathway = df_kos_annotated.drop(['KO'],axis=1).groupby('Pathway').min()
return df_kos, df_pathway
# MetaCyc
elif(typeOfPrediction == "metacyc"):
minpathOutFiles = []
for sampleName in metagenomeDf.columns:
minPtahInFile = os.path.join(outPath,sampleName + '_minpath_in.txt')
#make list of files
minpathOutFiles.append(os.path.join(outPath,sampleName + '_minpath.out.details'))
#create input file and run MinPath
minPtahInFile_fh = open(minPtahInFile,"w")
minPtahInFile_fh.writelines(['read' + str(i) + "\t" + str(j) + "\n" for i,j in enumerate(list(metagenomeDf[metagenomeDf[sampleName]>0].index))])
if(sys.platform == 'linux'):
cmd = "python3 " + os.path.join(funpredPath,'MinPath1.4_micfunpred.py') + ' ' + funpredPath + " " + outPath + " -any " + minPtahInFile + " -map " + os.path.join(funpredPath,'data','path_to_RXN.txt') + " -report " + os.path.join(outPath,sampleName + '_minpath.out') + " -details " + os.path.join(outPath,sampleName + '_minpath.out.details')
elif(sys.platform == 'win32'):
cmd = "python3.exe " + os.path.join(funpredPath,'MinPath1.4_micfunpred.py') + ' ' + funpredPath + " " + outPath + " -any " + minPtahInFile + " -map " + os.path.join(funpredPath,'data','path_to_RXN.txt') + " -report " + os.path.join(outPath,sampleName + '_minpath.out') + " -details " + os.path.join(outPath,sampleName + '_minpath.out.details')
a = os.popen(cmd).read()
#create pathway abundance dataframe from all files
metagenomeDf_reindexed = metagenomeDf.copy()
metagenome_daraframes = []
sampleName_list = []
annotation_dataframe = pd.DataFrame(columns=['RXN','Pathway'])
for minpath_out in minpathOutFiles:
kos_present = []
sampleName = os.path.basename(minpath_out).split('_minpath')[0]
sampleName_list.append(sampleName)
# read KOs present as per minpath
iFH = open(minpath_out,"r")
for line in iFH.readlines():
# pathways
matchObj = re.match("^path.*\#\s(.*)",line)
if(matchObj):
pathway = matchObj.group(1)
# KOs
matchObj = re.search("^\s+(\S+)",line)
if(matchObj):
ko = matchObj.group(1)
# create a lsit of KOs present and annotation dataframe
kos_present.append(ko)
annotation_dataframe = annotation_dataframe.append({'RXN':ko,'Pathway':pathway},ignore_index=True)
kos_present = set(kos_present)
# append dataframe to dataframe list
df_temp = metagenomeDf_reindexed.loc[kos_present][sampleName]
metagenome_daraframes.append(df_temp)
# merge all dataframes
df_kos = pd.concat(metagenome_daraframes,axis=1).fillna(0)
# add annotation
annotation_dataframe = annotation_dataframe.drop_duplicates()
df_kos_annotated = pd.merge(df_kos,annotation_dataframe,left_index=True,right_on='RXN')
df_pathway = df_kos_annotated.drop(['RXN'],axis=1).groupby('Pathway').min()
return df_kos, df_pathway
def ec2RXN(df,ec2rxnFile):
"""
Args:
df: EC metagenome dataframe
ec2rxnFile: map file for ec to RXN
Returns:
"""
# make ec dict
ec2RXNDict = dict()
with open(ec2rxnFile,"r") as f:
for i in f.readlines():
if("-" in i.split("\t")[0]):
ec2RXNDict[i.split("\t")[0][:-2]] = i.split("\t")[1].strip()
else:
ec2RXNDict[i.split("\t")[0]] = i.split("\t")[1].strip()
#iterate over metagneome dataframe
tempDf = pd.DataFrame(columns=list(df.columns))
for ec in df.index:
if("-" in ec):
ecTemp = ec[:-2]
else:
ecTemp = ec
if(ecTemp in ec2RXNDict.keys()):
rxnList = ec2RXNDict[ecTemp].split(",")
for rxn in ec2RXNDict[ecTemp].split(","):
tempDf.loc[rxn] = list(df.loc[ec])
return tempDf
def addMetaCycPathwayName(pathAbundanceDf,pathNameFile):
"""
:param pathAbundanceDf: Pathway abundance dataframe
:param pathNameFile: Reference File of pathways
:return: Pathway abundance dataframes with pathway types and common-names
"""
pathNameDf = | pd.read_csv(pathNameFile,sep="\t",index_col=0) | pandas.read_csv |
from helper import *
import pandas as pd
import os
import glob
import re
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings("ignore")
def test_feat(cond, df, cols, p, df_u):
unseen = ''
if cond =='unseen':
unseen = 'unseen'
# col is feauture comb
# p is for loss or latency
# 1: loss # 2 : latency
#print(df.columns)
X = df[cols]
X2 = df_u[cols]
if p == 1:
y = df.loss
y2 = df_u.loss
if p == 2:
y = df.latency
y2 = df_u.latency
# randomly split into train and test sets, test set is 80% of data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.2, random_state=1)
if unseen == 'unseen':
X_test = X2
y_test = y2
clf = DecisionTreeRegressor()
clf = clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
acc1 = mean_squared_error(y_test, y_pred)
clf2 = RandomForestRegressor(n_estimators=10)
clf2 = clf2.fit(X_train,y_train)
y_pred2 = clf2.predict(X_test)
acc2= mean_squared_error(y_test, y_pred2)
#print("Random Forest Accuracy:", acc2, '\n')
clf3 = ExtraTreesRegressor(n_estimators=10)
clf3 = clf3.fit(X_train,y_train)
y_pred3 = clf3.predict(X_test)
acc3= mean_squared_error(y_test, y_pred3)
#print("Extra Trees Accuracy:", acc3, '\n')
pca = PCA()
X_transformed = pca.fit_transform(X_train)
cl = DecisionTreeRegressor()
cl.fit(X_transformed, y_train)
newdata_transformed = pca.transform(X_test)
y_pred4 = cl.predict(newdata_transformed)
acc4 = mean_squared_error(y_test, y_pred4)
#print("PCA Accuracy:", acc4, '\n')
return [acc1, acc2, acc3, acc4 ]
def getAllCombinations( cond_):
lst = ['total_bytes','max_bytes','proto', "1->2Bytes",'2->1Bytes'
,'1->2Pkts','2->1Pkts','total_pkts','number_ms', 'pkt_ratio','time_spread', 'pkt sum','longest_seq'
,'total_pkt_sizes']
lst1 = ["max_bytes", "longest_seq", "total_bytes"]
lst2 = ["total_pkts", "number_ms", "byte_ratio"]
if cond_ == 1:
lst = lst1
if cond_ == 2:
lst = lst2
uniq_objs = set(lst)
combinations = []
for obj in uniq_objs:
for i in range(0,len(combinations)):
combinations.append(combinations[i].union([obj]))
combinations.append(set([obj]))
print("all combinations generated")
return combinations
def test_mse(cond, all_comb1, all_comb2):
unseen = ''
if cond =='unseen':
unseen = 'unseen'
filedir_unseen = os.path.join(os.getcwd(), "outputs", unseen + "combined_t_latency.csv")
df_unseen = pd.read_csv(filedir_unseen)
filedir = os.path.join(os.getcwd(), "outputs", "combined_t_latency.csv")
df = pd.read_csv(filedir)
all_comb1 = pd.Series(all_comb1).apply(lambda x: list(x))
all_comb2 = pd.Series(all_comb2).apply(lambda x: list(x))
dt = []
rf = []
et = []
pca = []
for i in all_comb1:
acc_loss = test_feat(cond, df, i, 1, df_unseen)
dt.append(acc_loss[0])
rf.append(acc_loss[1])
et.append(acc_loss[2])
pca.append(acc_loss[3])
dt2 = []
rf2 = []
et2 = []
pca2 = []
for i in all_comb2:
# 1 = loss
# 2 = latency
acc_latency = test_feat(cond, df, i, 2, df_unseen)
#print(accs)
dt2.append(acc_latency[0])
rf2.append(acc_latency[1])
et2.append(acc_latency[2])
pca2.append(acc_latency[3])
dict1 = pd.DataFrame({'feat': all_comb1, 'dt': dt, 'rf': rf, 'et': et, 'pca': pca})
dict2 = | pd.DataFrame({'feat2': all_comb2, 'dt2': dt2, 'rf2': rf2, 'et2': et2, 'pca2': pca2}) | pandas.DataFrame |
"""Testing creation and manipulation of DataFrameSchema objects."""
# pylint: disable=too-many-lines,redefined-outer-name
import copy
from functools import partial
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
import pandas as pd
import pytest
from pandera import (
STRING,
Bool,
Category,
Check,
Column,
DataFrameSchema,
DateTime,
Float,
Index,
Int,
MultiIndex,
Object,
PandasDtype,
SeriesSchema,
String,
Timedelta,
errors,
)
from pandera.dtypes import LEGACY_PANDAS
from pandera.schemas import SeriesSchemaBase
from .test_dtypes import TESTABLE_DTYPES
def test_dataframe_schema() -> None:
"""Tests the Checking of a DataFrame that has a wide variety of types and
conditions. Tests include: when the Schema works, when a column is dropped,
and when a columns values change its type.
"""
schema = DataFrameSchema(
{
"a": Column(Int, Check(lambda x: x > 0, element_wise=True)),
"b": Column(
Float, Check(lambda x: 0 <= x <= 10, element_wise=True)
),
"c": Column(String, Check(lambda x: set(x) == {"x", "y", "z"})),
"d": Column(Bool, Check(lambda x: x.mean() > 0.5)),
"e": Column(
Category, Check(lambda x: set(x) == {"c1", "c2", "c3"})
),
"f": Column(Object, Check(lambda x: x.isin([(1,), (2,), (3,)]))),
"g": Column(
DateTime,
Check(
lambda x: x >= pd.Timestamp("2015-01-01"),
element_wise=True,
),
),
"i": Column(
Timedelta,
Check(
lambda x: x < pd.Timedelta(10, unit="D"), element_wise=True
),
),
}
)
df = pd.DataFrame(
{
"a": [1, 2, 3],
"b": [1.1, 2.5, 9.9],
"c": ["z", "y", "x"],
"d": [True, True, False],
"e": pd.Series(["c2", "c1", "c3"], dtype="category"),
"f": [(3,), (2,), (1,)],
"g": [
pd.Timestamp("2015-02-01"),
pd.Timestamp("2015-02-02"),
pd.Timestamp("2015-02-03"),
],
"i": [
pd.Timedelta(1, unit="D"),
pd.Timedelta(5, unit="D"),
| pd.Timedelta(9, unit="D") | pandas.Timedelta |
from operator import index
import spacy
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from clf_utils import MultiLabelClassifier, ContentTypeData
class Plotter:
"""
Class for plotting the results of the classifier or message distribution
"""
def __init__(self, num_samples=np.arange(200, 1010, 200), targets=['B', 'W', 'A'], \
do_abs=False, dataset_name='sportsett', is_2_class=False):
self.num_samples = num_samples
self.targets = targets
self.nlp = spacy.load('en_core_web_sm')
self.dataset_name = dataset_name
self.do_abs = do_abs
self.is_2_class = is_2_class
def raw_sentence(self, sents):
return ' '.join([sent.text for sent in sents])
def spacy_sent_tokenize(self, doc):
# print(f'{doc.text}')
sents = []
all_sents = []
valid_stop = False
for sent in doc.sents:
sents.append(sent)
valid_stop = True if sent[-1].text in ['.', '?', '!'] else False
if valid_stop:
all_sents.append(self.raw_sentence(sents))
sents = []
return all_sents
def sumtime_sent_tokenize(self, doc):
words = doc.strip().split(' ')
lows = [idx for idx, i in enumerate(words) if i != '' and i[0].islower()]
sent_lists = [words[i:j] for i, j in zip(lows[:-1], lows[1:])]
sent_lists.append(words[lows[-1]:])
sents = [' '.join(i) for i in sent_lists]
return sents
def save_df_dist_plot(self, df, plt_type='gold', datasets=None):
"""
type:
'gold', 'gens', 'gold_no_split', 'gold_by_datasets'
'gold_finer', 'gold_no_split_finer', 'gens_finer', 'gold_by_datasets_finer'
'gold_by_datasets_2_class', 'gold_no_split_2_class', 'gens_2_class'
"""
ax = df.plot.bar(figsize=(7, 5), rot=0)
plt.rcParams.update({'font.size': 12})
ax.set_title(f'Distribution of Content Type')
for p in ax.patches:
ax.annotate(f'{str(int(p.get_height()))}', (p.get_x() * 1.005, (p.get_height() * 1.005) + 2))
ax.set_ylim(0, 110)
ax.set_xlabel('Content Type')
ax.set_ylabel('Percentage')
print(plt_type, plt_type != "gold_no_split")
if "gold_no_split" not in plt_type:
print("if")
if 'finer' in plt_type:
print("finer")
ax.set_xticklabels(['Intra-Event Basic', 'Intra-Event Complex', 'Inter-Event', 'External'])
elif '2_class' in plt_type:
print('2_class')
ax.set_xticklabels(['Intra-Event', 'Inter-Event'])
else:
print('else')
ax.set_xticklabels(['Intra-Event Basic', 'Intra-Event Complex', 'Inter-Event'])
else:
print("else")
if 'finer' in plt_type:
ax.legend(labels=['Intra-Event Basic', 'Intra-Event Complex', 'Inter-Event', 'External'])
elif '2_class' in plt_type:
ax.legend(labels=['Intra-Event', 'Inter-Event'])
else:
ax.legend(labels=['Intra-Event Basic', 'Intra-Event Complex', 'Inter-Event'])
plt.rcParams.update({'font.size': 12})
ax.figure.tight_layout()
if datasets is None:
ax.figure.savefig(f'./{self.dataset_name}/output/plots/ct_dist/ct_dist_{plt_type}.png', dpi=300)
df.to_csv(f'./{self.dataset_name}/output/csvs/ct_dist/ct_dist_{plt_type}.csv')
else:
for dataset in datasets:
# ax.figure.savefig(f'./{dataset.lower()}/output/plots/ct_dist/ct_dist_{plt_type}.png', dpi=300)
df.to_csv(f'./{dataset.lower()}/output/csvs/ct_dist/ct_dist_{plt_type}.csv')
def save_df_clf_res_plot(self, df, metric='Accuracy', save_path='./output/plots/clf_res', \
is_al_vs_no_al=False, title=None):
plt.rcParams.update({'font.size': 16})
ax = df.plot.line(figsize=(10, 5))
ax.set_xlabel('Number of training samples')
ax.set_ylabel(f'{metric}')
ax.set_xticks(self.num_samples)
title_name = f'{metric} vs Number of training samples' if title is None else title
ax.set_title(title_name)
ax.legend(loc='lower right')
out_file_name_plot = f'./{self.dataset_name}/output/plots/clf_res/{metric.lower()}' \
if not is_al_vs_no_al else f'./{self.dataset_name}/output/plots/clf_res/{(metric.lower()).replace(" ", "_")}_avna'
plt.rcParams.update({'font.size': 12})
ax.figure.tight_layout()
ax.figure.savefig(f'{out_file_name_plot}.png', dpi=300)
out_file_name_csv = f'./{self.dataset_name}/output/csvs/clf_res/{metric.lower()}' \
if not is_al_vs_no_al else f'./{self.dataset_name}/output/csvs/clf_res/{(metric.lower()).replace(" ", "_")}_avna'
df.to_csv(f'{out_file_name_csv}.csv', index=0)
def plot_dist_gold_v_clf(self):
data = ContentTypeData(target_names=self.targets, path=f'./data/tsvs', do_abs=self.do_abs, dataset_name=self.dataset_name)
test_x, test_y = data.get_data(train=False)
train_x, train_y = data.get_data(train_file_name='train')
# all_x, all_y = np.concatenate((train_x, test_x), axis=0), np.concatenate((train_y, test_y), axis=0)
all_x, all_y = test_x, test_y
print(f'\n\nall_x.shape: {all_x.shape}\tall_y.shape: {all_y.shape}\n\n')
clf = MultiLabelClassifier(model_name='bert', ftr_name='none', dataset_name=self.dataset_name, num_classes=len(self.targets))
pred_y = clf.predict_multilabel_classif(all_x)
dists = {}
# print(test_y, pred_y)
dists['Predicted'] = {label: (np.sum(pred_y[:, idx])/len(pred_y))*100 for idx, label in enumerate(self.targets)}
dists['Actual'] = {label: (np.sum(np.array(all_y)[:, idx])/len(all_y))*100 for idx, label in enumerate(self.targets)}
df = | pd.DataFrame(dists) | pandas.DataFrame |
import os
from typing import Union
import math
import pandas as pd
import numpy as np
import sha_calc as sha
from gmhazard_calc import site
from gmhazard_calc import gm_data
from gmhazard_calc import constants as const
from gmhazard_calc.im import IMComponent
from .NZTAResult import NZTAResult
from qcore import geo
# The following CSV file was based on p.147 NZTA Bridge Manual Commentary,
# where, Lat and Lon of each town was obtained from wikipedia (produced by geohack.toolforge.org)
# if the Lat and Lon is in water, a government office location is used instead.
# (eg. regional council for Huntly, Thames, police station for Oban)
# Vs30 values were obtained from Kevin's vs30 map (Release 2020: https://github.com/ucgmsim/Vs30/releases/tag/1)
NZTA_LOOKUP_FFP = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "NZTA_data_lat_lon_vs30.csv"
)
DEFAULT_RETURN_PERIODS = np.array([20, 25, 50, 100, 250, 500, 1000, 2000, 2500])
DEFAULT_EXCEEDANCE_VALUES = 1 / DEFAULT_RETURN_PERIODS
def run_ensemble_nzta(
ensemble: gm_data.Ensemble,
site_info: site.SiteInfo,
exceedance_values: np.ndarray = DEFAULT_EXCEEDANCE_VALUES,
soil_class: const.NZTASoilClass = None,
im_component: IMComponent = IMComponent.RotD50,
):
"""Runs NZTA for the specified site and ensemble
Note:
Parameters
----------
ensemble: Ensemble
The ensemble does not affect calculation at all,
purely included for consistency/completeness
site_info: SiteInfo
The site for which to compute NZTA code hazard
exceedance_values: array of floats, optional
soil_class: NZTASoilClass, optional
The soil class to use, if not specified then
this is computed based on the vs30 of the site
Returns
-------
NZTAResult
"""
# Load the required NZTA data
nzta_df = pd.read_csv(NZTA_LOOKUP_FFP, header=0, index_col=0)
soil_class = (
soil_class if soil_class is not None else get_soil_class(site_info.vs30)
)
# Get the return periods
rp_values = 1 / exceedance_values
# Compute PGA and retrieve effective magnitude
C0_1000, nearest_town = get_C0_1000(
site_info.lat, site_info.lon, soil_class, nzta_df=nzta_df
)
pga_values, M_eff = [], None
for cur_rp in rp_values:
cur_pga, M_eff = get_pga_meff(C0_1000, nearest_town, cur_rp, nzta_df=nzta_df)
pga_values.append(cur_pga)
if im_component != IMComponent.Larger:
ratio = sha.get_computed_component_ratio(
str(IMComponent.Larger),
str(im_component),
# Using period of 0.01 for PGA IM
0.01,
)
pga_values = [value * ratio for value in pga_values]
return NZTAResult(
ensemble,
site_info,
soil_class,
pd.Series(index=exceedance_values, data=pga_values),
M_eff,
C0_1000,
nearest_town,
)
def get_C0_1000(
lat: float,
lon: float,
soil_class: const.NZTASoilClass,
nzta_df: pd.DataFrame = None,
):
"""
Returns
-------
1. C_0,1000 value for the given vs30 value at the closest location
2. the name of the closest town
"""
nzta_df = (
| pd.read_csv(NZTA_LOOKUP_FFP, header=0, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
import app.config.env as env
import pandas as pd
class Capacity:
def __init__(self, capacity=0., unit=None, tenors=[], start=-float("inf"), end=float("inf")):
self.capacity = capacity
self.unit = unit
self.start = start
self.end = end
self.capacities = None
capacities = []
for tenor in tenors:
capacities.apend(capacity if (tenor >= start and tenor < end) else 0.)
self.capacities = | pd.Series(capacities, index=tenors) | pandas.Series |
'''
Python Client for generic Biothings API services
'''
from __future__ import print_function
import os
import sys
import platform
import time
import warnings
from itertools import islice
import requests
import logging
from .utils import str_types
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
try:
from pandas import DataFrame, json_normalize
df_avail = True
except ImportError:
df_avail = False
try:
import requests_cache
caching_avail = True
except ImportError:
caching_avail = False
__version__ = '0.2.5'
# setting up the logging logger
_DEBUG_ = logging.DEBUG
logger = logging.getLogger("biothings_client")
logger.setLevel(_DEBUG_)
# creating the handler to output to stdout
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(_DEBUG_)
# setting up the logging formatter
# this formatter contains time, but will use without time for now
# formatter = logging.Formatter("[%(levelname)s %(asctime)s %(name)s:%(lineno)s] - %(message)s ")
formatter = logging.Formatter("%(levelname)-8s [%(name)s:%(lineno)s] - %(message)s")
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
class ScanError(Exception):
# for errors in scan search type
pass
def alwayslist(value):
'''If input value if not a list/tuple type, return it as a single value list.
Example:
>>> x = 'abc'
>>> for xx in alwayslist(x):
... print xx
>>> x = ['abc', 'def']
>>> for xx in alwayslist(x):
... print xx
'''
if isinstance(value, (list, tuple)):
return value
else:
return [value]
def safe_str(s, encoding='utf-8'):
'''Perform proper encoding if input is an unicode string.'''
try:
_s = str(s)
except UnicodeEncodeError:
_s = s.encode(encoding)
return _s
def list_itemcnt(li):
'''Return number of occurrence for each type of item in the input list.'''
x = {}
for item in li:
if item in x:
x[item] += 1
else:
x[item] = 1
return [(i, x[i]) for i in x]
def iter_n(iterable, n, with_cnt=False):
'''
Iterate an iterator by chunks (of n)
if with_cnt is True, return (chunk, cnt) each time
'''
it = iter(iterable)
if with_cnt:
cnt = 0
while True:
chunk = tuple(islice(it, n))
if not chunk:
return
if with_cnt:
cnt += len(chunk)
yield (chunk, cnt)
else:
yield chunk
class BiothingClient(object):
'''This is the client for a biothing web service.'''
def __init__(self, url=None):
if url is None:
url = self._default_url
self.url = url
if self.url[-1] == '/':
self.url = self.url[:-1]
self.max_query = self._max_query
# delay and step attributes are for batch queries.
self.delay = self._delay # delay is ignored when requests made from cache.
self.step = self._step
self.scroll_size = self._scroll_size
# raise requests.exceptions.HTTPError for status_code > 400
# but not for 404 on getvariant
# set to False to surpress the exceptions.
self.raise_for_status = True
self.default_user_agent = ("{package_header}/{client_version} ("
"python:{python_version} "
"requests:{requests_version}"
")").format(**{
'package_header': self._pkg_user_agent_header,
'client_version': __version__,
'python_version': platform.python_version(),
'requests_version': requests.__version__
})
self._cached = False
@staticmethod
def _dataframe(obj, dataframe, df_index=True):
'''Converts object to DataFrame (pandas)'''
if not df_avail:
# print("Error: pandas module must be installed for as_dataframe option.")
logger.error("Error: pandas module must be installed for as_dataframe option.")
return
# if dataframe not in ["by_source", "normal"]:
if dataframe not in [1, 2]:
raise ValueError(
"dataframe must be either 1 (using json_normalize) or 2 (using DataFrame.from_dict")
if 'hits' in obj:
if dataframe == 1:
df = json_normalize(obj['hits'])
else:
df = DataFrame.from_dict(obj)
else:
if dataframe == 1:
df = json_normalize(obj)
else:
df = | DataFrame.from_dict(obj) | pandas.DataFrame.from_dict |
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import rcParams
params = {
# 'text.latex.preamble': ['\\usepackage{gensymb}'],
# 'text.usetex': True,
'font.family': 'Helvetica',
'lines.solid_capstyle':'butt',
'lines.markeredgewidth': 1,
}
rcParams.update(params)
sns.set_context("paper", font_scale=1.6, rc={"lines.linewidth": 2})
sns.set_style('white')
sns.set_palette("cividis")
dir_path = os.path.dirname(os.path.realpath(__file__))
def main():
sens = pd.read_csv('model_sensitivities.csv',header=0,index_col=0) # ,low_memory=False)
occu = pd.read_csv('sc_occurrence_data.csv',header=0,index_col=0)
occu.index = [val if val[6] is not '_' else val[:6] + val[7:] for val in occu.index.values] # oops, used two naming conventions
occu['cluster'] = [sens.loc[((sens['model_id']==i) & (sens['experiment']=='classification')),'cluster'].values[0] for i in occu.index.values]
clust_list = ['Dominated Cluster','Overfit Cluster','Parsimonious Cluster',]
occu = occu[[True if i in [1,2,3] else False for i in occu['cluster'].values]]
occu['Cluster'] = [clust_list[i-1] for i in occu['cluster']]
occu = occu.drop(['training_error', 'complexity', 'test_error','cluster'],axis=1)
occu[occu.columns[:-1]] = occu[occu.columns[:-1]] > 0
occu = occu.groupby(['Cluster']).sum()
inpu = occu[occu.columns[:-9]].stack()
inputs = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import sys
base_dir = "dir/to/Clipper"
#base_dir = "/Users/gexinzhou/Dropbox/clipper/Github/Clipper_python/"
sys.path.append(base_dir)
import Clipper
exp_d = pd.read_csv('./data/exp_d.csv',sep='\t',index_col=0).values.tolist()
back_d = pd.read_csv('./data/back_d.csv', sep='\t',index_col=0).values.tolist()
exp_e = pd.read_csv('./data/exp_e.csv', sep='\t',index_col=0).values.tolist()
back_e = pd.read_csv('./data/back_e.csv', sep='\t',index_col=0).values.tolist()
print(f"...starting the first test on sample differential analysis")
re1 = Clipper.clipper(score_exp=exp_d, score_back=back_d, analysis="differential", FDR=[0.01, 0.05, 0.1])
trueid = np.arange(2000)
discoveries = | pd.DataFrame(re1["discoveries"][0]) | pandas.DataFrame |
from datetime import datetime
from collections import Counter
from functools import partial
import pandas as pd
import mongoengine
import xlrd
import os
import re
def create_regex(s: str, initials: bool = True) -> str:
"""
Given a string representation of either a channel or marker, generate a standard
regex string to be used in a panel template
Parameters
----------
s: str
String value of channel or marker to generate regex term for
initials: bool, (default=True)
If True, account for use of initials to represent a marker/channel name
Returns
-------
str
Formatted regex string
"""
def has_numbers(inputString):
return any(char.isdigit() for char in inputString)
s = [i for ls in [_.split('-') for _ in s.split(' ')] for i in ls]
s = [i for ls in [_.split('.') for _ in s] for i in ls]
s = [i for ls in [_.split('/') for _ in s] for i in ls]
new_string = list()
for i in s:
if not has_numbers(i) and len(i) > 2 and initials:
new_string.append(f'{i[0]}({i[1:]})*')
else:
new_string.append(i)
new_string = '[\s.-]+'.join(new_string)
new_string = '<*\s*' + new_string + '\s*>*'
return new_string
def create_template(channel_mappings: list, file_name: str,
case_sensitive: bool = False, initials: bool = True):
"""
Given a list of channel mappings from an fcs file, create an excel template for Panel creation.
Parameters
----------
channel_mappings: list
List of channel mappings (list of dictionaries)
file_name: str
File name for saving excel template
case_sensitive: bool, (default=False)
If True, search terms for channels/markers will be case sensitive
initials: bool, (default=True)
If True, search terms for channels/markers will account for the use of initials of channels/markers
Returns
-------
None
"""
try:
assert file_name.split('.')[1] == 'xlsx', 'Invalid file name, must be of format "NAME.xlsx"'
except IndexError:
raise Exception('Invalid file name, must be of format "NAME.xlsx"')
mappings = pd.DataFrame()
mappings['channel'] = [cm['channel'] for cm in channel_mappings]
mappings['marker'] = [cm['marker'] for cm in channel_mappings]
nomenclature = | pd.DataFrame() | pandas.DataFrame |
"""
Unit test for smart explainer
"""
import unittest
from unittest.mock import patch, Mock
import os
from os import path
from pathlib import Path
import types
import pandas as pd
import numpy as np
import catboost as cb
from sklearn.linear_model import LinearRegression
from shapash.explainer.smart_explainer import SmartExplainer
from shapash.explainer.multi_decorator import MultiDecorator
from shapash.explainer.smart_state import SmartState
import category_encoders as ce
import shap
def init_sme_to_pickle_test():
"""
Init sme to pickle test
TODO: Docstring
Returns
-------
[type]
[description]
"""
current = Path(path.abspath(__file__)).parent.parent.parent
pkl_file = path.join(current, 'data/xpl.pkl')
xpl = SmartExplainer()
contributions = pd.DataFrame([[-0.1, 0.2, -0.3], [0.1, -0.2, 0.3]])
y_pred = pd.DataFrame(data=np.array([1, 2]), columns=['pred'])
dataframe_x = pd.DataFrame([[1, 2, 3], [1, 2, 3]])
xpl.compile(contributions=contributions, x=dataframe_x, y_pred=y_pred, model=LinearRegression())
xpl.filter(max_contrib=2)
return pkl_file, xpl
class TestSmartExplainer(unittest.TestCase):
"""
Unit test smart explainer
TODO: Docstring
"""
def test_init(self):
"""
test init smart explainer
"""
xpl = SmartExplainer()
assert hasattr(xpl, 'plot')
def assertRaisesWithMessage(self, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail()
except Exception as inst:
self.assertEqual(inst.args[0]['message'], msg)
@patch('shapash.explainer.smart_explainer.SmartState')
def test_choose_state_1(self, mock_smart_state):
"""
Unit test choose state 1
Parameters
----------
mock_smart_state : [type]
[description]
"""
xpl = SmartExplainer()
xpl.choose_state('contributions')
mock_smart_state.assert_called()
@patch('shapash.explainer.smart_explainer.MultiDecorator')
def test_choose_state_2(self, mock_multi_decorator):
"""
Unit test choose state 2
Parameters
----------
mock_multi_decorator : [type]
[description]
"""
xpl = SmartExplainer()
xpl.choose_state([1, 2, 3])
mock_multi_decorator.assert_called()
def test_validate_contributions_1(self):
"""
Unit test validate contributions 1
"""
xpl = SmartExplainer()
contributions = [
np.array([[2, 1], [8, 4]]),
np.array([[5, 5], [0, 0]])
]
model = Mock()
model._classes = np.array([1, 3])
model.predict = types.MethodType(self.predict, model)
model.predict_proba = types.MethodType(self.predict_proba, model)
xpl.model = model
xpl._case = "classification"
xpl._classes = list(model._classes)
xpl.state = xpl.choose_state(contributions)
xpl.x_init = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
expected_output = [
pd.DataFrame(
[[2, 1], [8, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
),
pd.DataFrame(
[[5, 5], [0, 0]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
]
output = xpl.validate_contributions(contributions)
assert len(expected_output) == len(output)
test_list = [pd.testing.assert_frame_equal(e, m) for e, m in zip(expected_output, output)]
assert all(x is None for x in test_list)
def test_apply_preprocessing_1(self):
"""
Unit test apply preprocessing 1
"""
xpl = SmartExplainer()
contributions = [1, 2, 3]
output = xpl.apply_preprocessing(contributions)
expected = contributions
self.assertListEqual(output, expected)
def test_apply_preprocessing_2(self):
"""
Unit test apply preprocessing 2
"""
xpl = SmartExplainer()
xpl.state = Mock()
preprocessing = Mock()
contributions = [1, 2, 3]
xpl.apply_preprocessing(contributions, preprocessing)
xpl.state.inverse_transform_contributions.assert_called()
def test_modify_postprocessing_1(self):
"""
Unit test modify postprocessing 1
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1:'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing = {0: {'type' : 'suffix', 'rule':' t'},
'Column2': {'type' : 'prefix', 'rule' : 'test'}}
expected_output = {
'Col1': {'type' : 'suffix', 'rule':' t'},
'Col2': {'type' : 'prefix', 'rule' : 'test'}
}
output = xpl.modify_postprocessing(postprocessing)
assert output == expected_output
def test_modify_postprocessing_2(self):
"""
Unit test modify postprocessing 2
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing = {'Error': {'type': 'suffix', 'rule': ' t'}}
with self.assertRaises(ValueError):
xpl.modify_postprocessing(postprocessing)
def test_check_postprocessing_1(self):
"""
Unit test check_postprocessing
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing1 = {0: {'Error': 'suffix', 'rule': ' t'}}
postprocessing2 = {0: {'type': 'Error', 'rule': ' t'}}
postprocessing3 = {0: {'type': 'suffix', 'Error': ' t'}}
postprocessing4 = {0: {'type': 'suffix', 'rule': ' '}}
postprocessing5 = {0: {'type': 'case', 'rule': 'lower'}}
postprocessing6 = {0: {'type': 'case', 'rule': 'Error'}}
with self.assertRaises(ValueError):
xpl.check_postprocessing(postprocessing1)
xpl.check_postprocessing(postprocessing2)
xpl.check_postprocessing(postprocessing3)
xpl.check_postprocessing(postprocessing4)
xpl.check_postprocessing(postprocessing5)
xpl.check_postprocessing(postprocessing6)
def test_apply_postprocessing_1(self):
"""
Unit test apply_postprocessing 1
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
assert np.array_equal(xpl.x_pred, xpl.apply_postprocessing())
def test_apply_postprocessing_2(self):
"""
Unit test apply_postprocessing 2
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing = {'Col1': {'type': 'suffix', 'rule': ' t'},
'Col2': {'type': 'prefix', 'rule': 'test'}}
expected_output = pd.DataFrame(
data=[['1 t', 'test2'],
['3 t', 'test4']],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
output = xpl.apply_postprocessing(postprocessing)
assert np.array_equal(output, expected_output)
def test_check_contributions_1(self):
"""
Unit test check contributions 1
"""
xpl = SmartExplainer()
xpl.contributions, xpl.x_pred = Mock(), Mock()
xpl.state = Mock()
xpl.check_contributions()
xpl.state.check_contributions.assert_called_with(xpl.contributions, xpl.x_pred)
def test_check_contributions_2(self):
"""
Unit test check contributions 2
"""
xpl = SmartExplainer()
xpl.contributions, xpl.x_pred = Mock(), Mock()
mock_state = Mock()
mock_state.check_contributions.return_value = False
xpl.state = mock_state
with self.assertRaises(ValueError):
xpl.check_contributions()
def test_check_label_dict_1(self):
"""
Unit test check label dict 1
"""
xpl = SmartExplainer(label_dict={1: 'Yes', 0: 'No'})
xpl._classes = [0, 1]
xpl._case = 'classification'
xpl.check_label_dict()
def test_check_label_dict_2(self):
"""
Unit test check label dict 2
"""
xpl = SmartExplainer()
xpl._case = 'regression'
xpl.check_label_dict()
def test_check_features_dict_1(self):
"""
Unit test check features dict 1
"""
xpl = SmartExplainer(features_dict={'Age': 'Age (Years Old)'})
xpl.columns_dict = {0: 'Age', 1: 'Education', 2: 'Sex'}
xpl.check_features_dict()
assert xpl.features_dict['Age'] == 'Age (Years Old)'
assert xpl.features_dict['Education'] == 'Education'
@patch('shapash.explainer.smart_explainer.SmartExplainer.choose_state')
@patch('shapash.explainer.smart_explainer.SmartExplainer.apply_preprocessing')
def test_compile_0(self, mock_apply_preprocessing, mock_choose_state):
"""
Unit test compile
Parameters
----------
mock_apply_preprocessing : [type]
[description]
mock_choose_state : [type]
[description]
"""
xpl = SmartExplainer()
mock_state = Mock()
mock_choose_state.return_value = mock_state
model = lambda: None
model.predict = types.MethodType(self.predict, model)
mock_state.rank_contributions.return_value = 1, 2, 3
contributions = pd.DataFrame([[-0.1, 0.2, -0.3], [0.1, -0.2, 0.3]])
mock_state.validate_contributions.return_value = contributions
mock_apply_preprocessing.return_value = contributions
x_pred = pd.DataFrame([[1, 2, 3], [1, 2, 3]])
xpl.compile(x=x_pred, model=model, contributions=contributions)
assert hasattr(xpl, 'state')
assert xpl.state == mock_state
assert hasattr(xpl, 'x_pred')
pd.testing.assert_frame_equal(xpl.x_pred, x_pred)
assert hasattr(xpl, 'contributions')
pd.testing.assert_frame_equal(xpl.contributions, contributions)
mock_choose_state.assert_called()
mock_state.validate_contributions.assert_called()
mock_apply_preprocessing.assert_called()
mock_state.rank_contributions.assert_called()
assert xpl._case == "regression"
def test_compile_1(self):
"""
Unit test compile 1
checking compile method without model
"""
df = pd.DataFrame(range(0, 21), columns=['id'])
df['y'] = df['id'].apply(lambda x: 1 if x < 10 else 0)
df['x1'] = np.random.randint(1, 123, df.shape[0])
df['x2'] = np.random.randint(1, 3, df.shape[0])
df = df.set_index('id')
clf = cb.CatBoostClassifier(n_estimators=1).fit(df[['x1', 'x2']], df['y'])
xpl = SmartExplainer()
xpl.compile(model=clf, x=df[['x1', 'x2']])
assert xpl._case == "classification"
self.assertListEqual(xpl._classes, [0, 1])
def test_compile_2(self):
"""
Unit test compile 2
checking new attributes added to the compile method
"""
df = pd.DataFrame(range(0, 5), columns=['id'])
df['y'] = df['id'].apply(lambda x: 1 if x < 2 else 0)
df['x1'] = np.random.randint(1, 123, df.shape[0])
df['x2'] = ["S", "M", "S", "D", "M"]
df = df.set_index('id')
encoder = ce.OrdinalEncoder(cols=["x2"], handle_unknown="None")
encoder_fitted = encoder.fit(df)
df_encoded = encoder_fitted.transform(df)
output = df[["x1", "x2"]].copy()
output["x2"] = ["single", "married", "single", "divorced", "married"]
clf = cb.CatBoostClassifier(n_estimators=1).fit(df_encoded[['x1', 'x2']], df_encoded['y'])
postprocessing_1 = {"x2": {
"type": "transcoding",
"rule": {"S": "single", "M": "married", "D": "divorced"}}}
postprocessing_2 = {
"family_situation": {
"type": "transcoding",
"rule": {"S": "single", "M": "married", "D": "divorced"}}}
xpl_postprocessing1 = SmartExplainer()
xpl_postprocessing2 = SmartExplainer(features_dict={"x1": "age",
"x2": "family_situation"}
)
xpl_postprocessing3 = SmartExplainer()
xpl_postprocessing1.compile(model=clf,
x=df_encoded[['x1', 'x2']],
preprocessing=encoder_fitted,
postprocessing=postprocessing_1)
xpl_postprocessing2.compile(model=clf,
x=df_encoded[['x1', 'x2']],
preprocessing=encoder_fitted,
postprocessing=postprocessing_2)
xpl_postprocessing3.compile(model=clf,
x=df_encoded[['x1', 'x2']],
preprocessing=None,
postprocessing=None)
assert hasattr(xpl_postprocessing1, "preprocessing")
assert hasattr(xpl_postprocessing1, "postprocessing")
assert hasattr(xpl_postprocessing2, "preprocessing")
assert hasattr(xpl_postprocessing2, "postprocessing")
assert hasattr(xpl_postprocessing3, "preprocessing")
assert hasattr(xpl_postprocessing3, "postprocessing")
pd.testing.assert_frame_equal(xpl_postprocessing1.x_pred, output)
pd.testing.assert_frame_equal(xpl_postprocessing2.x_pred, output)
assert xpl_postprocessing1.preprocessing == encoder_fitted
assert xpl_postprocessing2.preprocessing == encoder_fitted
assert xpl_postprocessing1.postprocessing == postprocessing_1
assert xpl_postprocessing2.postprocessing == postprocessing_1
def test_compile_3(self):
"""
Unit test compile 3
checking compile method without model
"""
df = pd.DataFrame(range(0, 21), columns=['id'])
df['y'] = df['id'].apply(lambda x: 1 if x < 10 else 0)
df['x1'] = np.random.randint(1, 123, df.shape[0])
df['x2'] = np.random.randint(1, 3, df.shape[0])
df = df.set_index('id')
clf = cb.CatBoostClassifier(n_estimators=1).fit(df[['x1', 'x2']], df['y'])
clf_explainer = shap.TreeExplainer(clf)
contrib = pd.DataFrame(
[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
columns=['contribution_0', 'contribution_1', 'contribution_2', 'contribution_3'],
index=[0, 1, 2]
)
xpl = SmartExplainer()
with self.assertRaises(ValueError):
xpl.compile(model=clf, x=df[['x1', 'x2']], explainer=clf_explainer, contributions=contrib)
def test_filter_0(self):
"""
Unit test filter 0
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter()
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
@patch('shapash.explainer.smart_explainer.SmartExplainer.check_features_name')
def test_filter_1(self, mock_check_features_name):
"""
Unit test filter 1
Parameters
----------
mock_check_features_name : [type]
[description]
"""
xpl = SmartExplainer()
mock_check_features_name.return_value = [1, 2]
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(features_to_hide=['X1', 'X2'])
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_2(self):
"""
Unit test filter 2
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(threshold=0.1)
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_3(self):
"""
Unit test filter 3
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(positive=True)
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_4(self):
"""
Unit test filter 4
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(max_contrib=10)
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_5(self):
"""
Unit test filter 5
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter(positive=True, max_contrib=10)
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_6(self):
"""
Unit test filter 6
"""
xpl = SmartExplainer()
mock_data = {'var_dict': 1, 'contrib_sorted': 2, 'x_sorted': 3}
xpl.data = mock_data
mock_state = Mock()
xpl.state = mock_state
xpl.filter()
mock_state.init_mask.assert_called()
mock_state.hide_contributions.assert_not_called()
mock_state.cap_contributions.assert_not_called()
mock_state.sign_contributions.assert_not_called()
mock_state.combine_masks.assert_called()
mock_state.cutoff_contributions.assert_not_called()
assert hasattr(xpl, 'mask')
mock_state.compute_masked_contributions.assert_called()
assert hasattr(xpl, 'masked_contributions')
def test_filter_7(self):
"""
Unit test filter 7
"""
xpl = SmartExplainer()
contributions = [
pd.DataFrame(
data=[[0.5, 0.4, 0.3], [0.9, 0.8, 0.7]],
columns=['Col1', 'Col2', 'Col3']
),
pd.DataFrame(
data=[[0.3, 0.2, 0.1], [0.6, 0.5, 0.4]],
columns=['Col1', 'Col2', 'Col3']
)
]
xpl.data = {'var_dict': 1, 'contrib_sorted': contributions, 'x_sorted': 3}
xpl.state = MultiDecorator(SmartState())
xpl.filter(threshold=0.5, max_contrib=2)
expected_mask = [
pd.DataFrame(
data=[[True, False, False], [True, True, False]],
columns=['contrib_1', 'contrib_2', 'contrib_3']
),
pd.DataFrame(
data=[[False, False, False], [True, True, False]],
columns=['contrib_1', 'contrib_2', 'contrib_3']
)
]
assert len(expected_mask) == len(xpl.mask)
test_list = [pd.testing.assert_frame_equal(e, m) for e, m in zip(expected_mask, xpl.mask)]
assert all(x is None for x in test_list)
expected_masked_contributions = [
pd.DataFrame(
data=[[0.0, 0.7], [0.0, 0.7]],
columns=['masked_neg', 'masked_pos']
),
pd.DataFrame(
data=[[0.0, 0.6], [0.0, 0.4]],
columns=['masked_neg', 'masked_pos']
)
]
assert len(expected_masked_contributions) == len(xpl.masked_contributions)
test_list = [pd.testing.assert_frame_equal(e, m) for e, m in
zip(expected_masked_contributions, xpl.masked_contributions)]
assert all(x is None for x in test_list)
expected_param_dict = {
'features_to_hide': None,
'threshold': 0.5,
'positive': None,
'max_contrib': 2
}
self.assertDictEqual(expected_param_dict, xpl.mask_params)
def test_check_label_name_1(self):
"""
Unit test check label name 1
"""
label_dict = {1: 'Age', 2: 'Education'}
xpl = SmartExplainer(label_dict=label_dict)
xpl.inv_label_dict = {v: k for k, v in xpl.label_dict.items()}
xpl._classes = [1, 2]
entry = 'Age'
expected_num = 0
expected_code = 1
expected_value = 'Age'
label_num, label_code, label_value = xpl.check_label_name(entry, 'value')
assert expected_num == label_num
assert expected_code == label_code
assert expected_value == label_value
def test_check_label_name_2(self):
"""
Unit test check label name 2
"""
xpl = SmartExplainer(label_dict = None)
xpl._classes = [1, 2]
entry = 1
expected_num = 0
expected_code = 1
expected_value = 1
label_num, label_code, label_value = xpl.check_label_name(entry, 'code')
assert expected_num == label_num
assert expected_code == label_code
assert expected_value == label_value
def test_check_label_name_3(self):
"""
Unit test check label name 3
"""
label_dict = {1: 'Age', 2: 'Education'}
xpl = SmartExplainer(label_dict=label_dict)
xpl.inv_label_dict = {v: k for k, v in xpl.label_dict.items()}
xpl._classes = [1, 2]
entry = 0
expected_num = 0
expected_code = 1
expected_value = 'Age'
label_num, label_code, label_value = xpl.check_label_name(entry, 'num')
assert expected_num == label_num
assert expected_code == label_code
assert expected_value == label_value
def test_check_label_name_4(self):
"""
Unit test check label name 4
"""
xpl = SmartExplainer()
label = 0
origin = 'error'
expected_msg = "Origin must be 'num', 'code' or 'value'."
self.assertRaisesWithMessage(expected_msg, xpl.check_label_name, **{'label': label, 'origin': origin})
def test_check_label_name_5(self):
"""
Unit test check label name 5
"""
label_dict = {1: 'Age', 2: 'Education'}
xpl = SmartExplainer(label_dict=label_dict)
xpl.inv_label_dict = {v: k for k, v in xpl.label_dict.items()}
xpl._classes = [1, 2]
label = 'Absent'
expected_msg = f"Label (Absent) not found for origin (value)"
origin = 'value'
self.assertRaisesWithMessage(expected_msg, xpl.check_label_name, **{'label': label, 'origin': origin})
def test_check_features_name_1(self):
"""
Unit test check features name 1
"""
xpl = SmartExplainer()
xpl.features_dict = {'tech_0': 'domain_0', 'tech_1': 'domain_1', 'tech_2': 'domain_2'}
xpl.inv_features_dict = {v: k for k, v in xpl.features_dict.items()}
xpl.columns_dict = {0: 'tech_0', 1: 'tech_1', 2: 'tech_2'}
xpl.inv_columns_dict = {v: k for k, v in xpl.columns_dict.items()}
feature_list_1 = ['domain_0', 'tech_1']
feature_list_2 = ['domain_0', 0]
self.assertRaises(ValueError, xpl.check_features_name, feature_list_1)
self.assertRaises(ValueError, xpl.check_features_name, feature_list_2)
def test_check_features_name_2(self):
"""
Unit test check features name 2
"""
xpl = SmartExplainer()
xpl.features_dict = {'tech_0': 'domain_0', 'tech_1': 'domain_1', 'tech_2': 'domain_2'}
xpl.inv_features_dict = {v: k for k, v in xpl.features_dict.items()}
xpl.columns_dict = {0: 'tech_0', 1: 'tech_1', 2: 'tech_2'}
xpl.inv_columns_dict = {v: k for k, v in xpl.columns_dict.items()}
feature_list = ['domain_0', 'domain_2']
output = xpl.check_features_name(feature_list)
expected_output = [0, 2]
np.testing.assert_array_equal(output, expected_output)
def test_check_features_name_3(self):
"""
Unit test check features name 3
"""
xpl = SmartExplainer()
xpl.columns_dict = {0: 'tech_0', 1: 'tech_1', 2: 'tech_2'}
xpl.inv_columns_dict = {v: k for k, v in xpl.columns_dict.items()}
feature_list = ['tech_2']
output = xpl.check_features_name(feature_list)
expected_output = [2]
np.testing.assert_array_equal(output, expected_output)
def test_check_features_name_4(self):
"""
Unit test check features name 4
"""
xpl = SmartExplainer()
xpl.columns_dict = None
xpl.features_dict = None
feature_list = [1, 2, 4]
output = xpl.check_features_name(feature_list)
expected_output = feature_list
np.testing.assert_array_equal(output, expected_output)
def test_save_1(self):
"""
Unit test save 1
"""
pkl_file, xpl = init_sme_to_pickle_test()
xpl.save(pkl_file)
assert path.exists(pkl_file)
os.remove(pkl_file)
def test_load_1(self):
"""
Unit test load 1
"""
temp, xpl = init_sme_to_pickle_test()
xpl2 = SmartExplainer()
current = Path(path.abspath(__file__)).parent.parent.parent
pkl_file = path.join(current, 'data/xpl_to_load.pkl')
xpl2.load(pkl_file)
attrib_xpl = [element for element in xpl.__dict__.keys()]
attrib_xpl2 = [element for element in xpl2.__dict__.keys()]
assert all(attrib in attrib_xpl2 for attrib in attrib_xpl)
assert all(attrib2 in attrib_xpl for attrib2 in attrib_xpl2)
def test_check_y_pred_1(self):
"""
Unit test check y pred
"""
xpl = SmartExplainer()
xpl.y_pred = None
xpl.x_pred = None
xpl.check_y_pred()
def test_check_y_pred_2(self):
"""
Unit test check y pred 2
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
data=np.array([[1, 2], [3, 4]]),
columns=['Col1', 'Col2']
)
xpl.y_pred = pd.DataFrame(
data=np.array(['1', 0]),
columns=['Y']
)
with self.assertRaises(ValueError):
xpl.check_y_pred(xpl.y_pred)
def test_check_y_pred_3(self):
"""
Unit test check y pred 3
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
data=np.array([[1, 2], [3, 4]]),
columns=['Col1', 'Col2']
)
xpl.y_pred = pd.DataFrame(
data=np.array([0]),
columns=['Y']
)
with self.assertRaises(ValueError):
xpl.check_y_pred(xpl.y_pred)
def test_check_y_pred_4(self):
"""
Unit test check y pred 4
"""
xpl = SmartExplainer()
xpl.y_pred = [0, 1]
self.assertRaises(AttributeError, xpl.check_y_pred)
def test_check_y_pred_5(self):
"""
Unit test check y pred 5
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
data=np.array([[1, 2], [3, 4]]),
columns=['Col1', 'Col2']
)
xpl.y_pred = pd.Series(
data=np.array(['0'])
)
with self.assertRaises(ValueError):
xpl.check_y_pred(xpl.y_pred)
def test_check_model_1(self):
"""
Unit test check model 1
"""
model = lambda: None
model.predict = types.MethodType(self.predict, model)
xpl = SmartExplainer()
xpl.model = model
xpl._case, xpl._classes = xpl.check_model()
assert xpl._case == 'regression'
assert xpl._classes is None
def test_check_model_2(self):
"""
Unit test check model 2
"""
xpl = SmartExplainer()
df1 = pd.DataFrame([1, 2])
df2 = pd.DataFrame([3, 4])
xpl.contributions = [df1, df2]
xpl.state = xpl.choose_state(xpl.contributions)
model = lambda: None
model._classes = np.array([1, 2])
model.predict = types.MethodType(self.predict, model)
model.predict_proba = types.MethodType(self.predict_proba, model)
xpl.model = model
xpl._case, xpl._classes = xpl.check_model()
assert xpl._case == 'classification'
self.assertListEqual(xpl._classes, [1, 2])
def test_check_features_desc_1(self):
"""
Unit test check features desc 1
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[0.12, 0, 13, 1],
[0.13, 1, 14, 1],
[0.14, 1, 15, 1],
[0.15, np.NaN, 13, 1]],
columns=['col1', 'col2', 'col3', 'col4']
)
expected = {
'col1' : 4,
'col2' : 2,
'col3' : 3,
'col4' : 1
}
assert xpl.check_features_desc() == expected
@patch('shapash.explainer.smart_explainer.SmartExplainer.check_y_pred')
def test_add_1(self, mock_check_y_pred):
"""
Unit test add 1
Parameters
----------
mock_check_y_pred : [type]
[description]
"""
xpl = SmartExplainer()
dataframe_yp = pd.DataFrame([1, 3, 1], columns=['pred'], index=[0, 1, 2])
mock_y_pred = Mock(return_value=dataframe_yp)
mock_check_y_pred.return_value = mock_y_pred()
xpl.x_pred = dataframe_yp
xpl.add(y_pred=dataframe_yp)
expected = SmartExplainer()
expected.y_pred = dataframe_yp
assert not pd.testing.assert_frame_equal(xpl.y_pred, expected.y_pred)
mock_check_y_pred.assert_called()
def test_add_2(self):
"""
Unit test add 2
"""
xpl = SmartExplainer()
xpl._classes = [0, 1]
xpl._case = "classification"
xpl.add(label_dict={0: 'Zero', 1: 'One'})
assert xpl.label_dict[0] == 'Zero'
assert xpl.label_dict[1] == 'One'
def test_add_3(self):
"""
Unit test add 3
"""
xpl = SmartExplainer()
xpl.columns_dict = {0: 'Age', 1: 'Education', 2: 'Sex'}
xpl.add(features_dict={'Age': 'Age (Years Old)'})
assert xpl.features_dict['Age'] == 'Age (Years Old)'
assert xpl.features_dict['Education'] == 'Education'
def test_to_pandas_1(self):
"""
Unit test to pandas 1
"""
xpl = SmartExplainer()
xpl.state = SmartState()
data = {}
data['contrib_sorted'] = pd.DataFrame(
[[0.32230754, 0.1550689, 0.10183475, 0.05471339],
[-0.58547512, -0.37050409, -0.07249285, 0.00171975],
[-0.48666675, 0.25507156, -0.16968889, 0.0757443]],
columns=['contribution_0', 'contribution_1', 'contribution_2', 'contribution_3'],
index=[0, 1, 2]
)
data['var_dict'] = pd.DataFrame(
[[1, 0, 2, 3],
[1, 0, 3, 2],
[1, 0, 2, 3]],
columns=['feature_0', 'feature_1', 'feature_2', 'feature_3'],
index=[0, 1, 2]
)
data['x_sorted'] = pd.DataFrame(
[[1., 3., 22., 1.],
[2., 1., 2., 38.],
[2., 3., 26., 1.]],
columns=['feature_0', 'feature_1', 'feature_2', 'feature_3'],
index=[0, 1, 2]
)
xpl.data = data
xpl.columns_dict = {0: 'Pclass', 1: 'Sex', 2: 'Age', 3: 'Embarked'}
xpl.features_dict = {'Pclass': 'Pclass', 'Sex': 'Sex', 'Age': 'Age', 'Embarked': 'Embarked'}
xpl.x = pd.DataFrame(
[[3., 1., 22., 1.],
[1., 2., 38., 2.],
[3., 2., 26., 1.]],
columns=['Pclass', 'Sex', 'Age', 'Embarked'],
index=[0, 1, 2]
)
xpl.x_pred = xpl.x
xpl.contributions = data['contrib_sorted']
xpl.y_pred = pd.DataFrame([1, 2, 3], columns=['pred'], index=[0, 1, 2])
model = lambda : None
model.predict = types.MethodType(self.predict, model)
xpl.model = model
xpl._case, xpl._classes = xpl.check_model()
xpl.state = xpl.choose_state(xpl.contributions)
output = xpl.to_pandas(max_contrib=2)
expected = pd.DataFrame(
[[1, 'Sex', 1.0, 0.32230754, 'Pclass', 3.0, 0.1550689],
[2, 'Sex', 2.0, -0.58547512, 'Pclass', 1.0, -0.37050409],
[3, 'Sex', 2.0, -0.48666675, 'Pclass', 3.0, 0.25507156]],
columns=['pred',
'feature_1',
'value_1',
'contribution_1',
'feature_2',
'value_2',
'contribution_2'],
index=[0, 1, 2],
dtype=object
)
expected['pred'] = expected['pred'].astype(int)
assert not | pd.testing.assert_frame_equal(expected, output) | pandas.testing.assert_frame_equal |
import numpy as np
import arcpy
from arcpy import env
from arcpy.sa import *
from arcgis.features import GeoAccessor, GeoSeriesAccessor
import sys
import os
import csv
import pandas as pd
import tempfile
import copy
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from basinmaker.func.arcgis import *
from basinmaker.func.pdtable import *
arcpy.env.overwriteOutput = True
arcpy.CheckOutExtension("Spatial")
def Locate_subid_needsbyuser_arcgis(
Path_Points="#", Gauge_NMS="#", Path_products="#"
):
"""Get Subbasin Ids
Function that used to obtain subbasin ID of certain gauge.
or subbasin ID of the polygon that includes the given point
shapefile.
Parameters
----------
Path_Points : string (Optional)
It is the path of the point shapefile. If the point shapefile is
provided. The function will return subids of those catchment
polygons that includes these point in the point shapefile
Gauge_NMS : list
Name of the streamflow gauges, such as ['09PC019'], if the gauge
name is provided, the subbasin ID that contain this gauge will be
returned
Path_products : string
The path of the subbasin polygon shapefile.
The shapefile should at least contains following columns
##############Subbasin related attributes###########################
SubID - integer, The subbasin Id
DowSubId - integer, The downstream subbasin ID of this
subbasin
Obs_NM - The streamflow obervation gauge name.
Notes
-------
Path_Points or Gauge_NMS should only provide one each time
to use this function
Returns:
-------
SubId_Selected : list
It is a list contains the selected subid based on provided
streamflow gauge name or provided point shapefile
Examples
-------
"""
tempfolder = os.path.join(
tempfile.gettempdir(),
"basinmaker_locsubid" + str(np.random.randint(1, 10000 + 1)),
)
if not os.path.exists(tempfolder):
os.makedirs(tempfolder)
arcpy.env.workspace = tempfolder
SubId_Selected = -1
if Gauge_NMS[0] != "#":
hyshdinfo2 = pd.DataFrame.spatial.from_featureclass(Path_products)
hyshdinfo2 = hyshdinfo2.loc[hyshdinfo2["Obs_NM"] != "-9999.0"]
hyshdinfo2 = hyshdinfo2.loc[hyshdinfo2["Obs_NM"].isin(Gauge_NMS)]
hyshdinfo2 = hyshdinfo2[["Obs_NM", "SubId"]]
# hyshdinfo2.to_csv(os.path.join(self.OutputFolder,'SubIds_Selected.csv'),sep=',', index = None)
SubId_Selected = hyshdinfo2["SubId"].values
if Path_Points != "#":
SpRef_in = arcpy.Describe(Path_products).spatialReference
arcpy.Project_management(Path_products,"Obspoint_project2.shp", out_coordinate_system)
arcpy.SpatialJoin_analysis("Obspoint_project2.shp", Path_products, 'Sub_Selected_by_Points')
hyshdinfo2 = Outputfilename_cat(os.path.join(tempfolder, "Sub_Selected_by_Points.shp"))
SubId_Selected = hyshdinfo2["SubId"].values
SubId_Selected = SubId_Selected[SubId_Selected > 0]
return SubId_Selected
def Select_Routing_product_based_SubId_arcgis(
OutputFolder,
Routing_Product_Folder,
mostdownid=-1,
mostupstreamid=-1,
):
"""Extract region of interest based on provided Subid
Function that used to obtain the region of interest from routing
product based on given SubId
Parameters
----------
OutputFolder : string
Folder path that stores extracted routing product
Path_Catchment_Polygon : string
Path to the catchment polygon
Path_River_Polyline : string (optional)
Path to the river polyline
Path_Con_Lake_ply : string (optional)
Path to a connected lake polygon. Connected lakes are lakes that
are connected by Path_final_cat_riv or Path_final_riv.
Path_NonCon_Lake_ply : string (optional)
Path to a non connected lake polygon. Connected lakes are lakes
that are not connected by Path_final_cat_riv or Path_final_riv.
mostdownid : integer
It is the most downstream subbasin ID in the region of interest
mostupstreamid : integer (optional)
It is the most upstream subbasin ID in the region of interest.
Normally it is -1, indicating all subbasin drainage to mostdownid
is needed. In some case, if not all subbasin drainage to mostdownid
is needed, then the most upstream subbsin ID need to be provided
here.
Notes
-------
This function has no return values, instead following fiels will be
generated. The output files have are same as inputs expect the extent
are different.
os.path.join(OutputFolder,os.path.basename(Path_Catchment_Polygon))
os.path.join(OutputFolder,os.path.basename(Path_River_Polyline))
os.path.join(OutputFolder,os.path.basename(Path_Con_Lake_ply))
os.path.join(OutputFolder,os.path.basename(Path_NonCon_Lake_ply))
Returns:
-------
None
Examples
-------
"""
tempfolder = os.path.join(
tempfile.gettempdir(),
"basinmaker_locsubid" + str(101),#np.random.randint(1, 10000 + 1)),
)
if not os.path.exists(tempfolder):
os.makedirs(tempfolder)
arcpy.env.workspace = tempfolder
Path_Catchment_Polygon="#"
Path_River_Polyline="#"
Path_Con_Lake_ply="#"
Path_NonCon_Lake_ply="#"
Path_obs_gauge_point="#"
Path_final_cat_ply="#"
Path_final_cat_riv="#"
##define input files from routing prodcut
for file in os.listdir(Routing_Product_Folder):
if file.endswith(".shp"):
if 'catchment_without_merging_lakes' in file:
Path_Catchment_Polygon = os.path.join(Routing_Product_Folder, file)
if 'river_without_merging_lakes' in file:
Path_River_Polyline = os.path.join(Routing_Product_Folder, file)
if 'sl_connected_lake' in file:
Path_Con_Lake_ply = os.path.join(Routing_Product_Folder, file)
if 'sl_non_connected_lake' in file:
Path_NonCon_Lake_ply = os.path.join(Routing_Product_Folder, file)
if 'obs_gauges' in file:
Path_obs_gauge_point = os.path.join(Routing_Product_Folder, file)
if 'finalcat_info' in file:
Path_final_cat_ply = os.path.join(Routing_Product_Folder, file)
if 'finalcat_info_riv' in file:
Path_final_cat_riv = os.path.join(Routing_Product_Folder, file)
if Path_Catchment_Polygon == '#' or Path_River_Polyline =='#':
print("Invalid routing product folder ")
arcpy.AddMessage(Path_Catchment_Polygon)
return()
sub_colnm = "SubId"
down_colnm = "DowSubId"
##3
cat_ply = pd.DataFrame.spatial.from_featureclass(Path_Catchment_Polygon)
hyshdinfo = cat_ply[[sub_colnm, down_colnm]].astype("int32").values
### Loop for each downstream id
if not os.path.exists(OutputFolder):
os.makedirs(OutputFolder)
## find all subid control by this subid
for i_down in range(0,len(mostdownid)):
### Loop for each downstream id
OutHyID = mostdownid[i_down]
OutHyID2 = mostupstreamid[i_down]
## find all subid control by this subid
HydroBasins1 = defcat(hyshdinfo, OutHyID)
if OutHyID2 > 0:
HydroBasins2 = defcat(hyshdinfo, OutHyID2)
### exculde the Ids in HydroBasins2 from HydroBasins1
for i in range(len(HydroBasins2)):
rows = np.argwhere(HydroBasins1 == HydroBasins2[i])
HydroBasins1 = np.delete(HydroBasins1, rows)
HydroBasins = HydroBasins1
else:
HydroBasins = HydroBasins1
if i_down == 0:
HydroBasins_All = HydroBasins
else:
HydroBasins_All = np.concatenate((HydroBasins_All, HydroBasins), axis=0)
Outputfilename_cat = os.path.join(
OutputFolder, os.path.basename(Path_Catchment_Polygon)
)
cat_ply_select = cat_ply.loc[cat_ply['SubId'].isin(HydroBasins_All)]
cat_ply_select.spatial.to_featureclass(location=Outputfilename_cat,overwrite=True,sanitize_columns=False)
Outputfilename_cat_riv = os.path.join(
OutputFolder, os.path.basename(Path_River_Polyline)
)
cat_riv = pd.DataFrame.spatial.from_featureclass(Path_River_Polyline)
cat_riv_select = cat_riv.loc[cat_riv['SubId'].isin(HydroBasins)]
cat_riv_select.spatial.to_featureclass(location=Outputfilename_cat_riv,overwrite=True,sanitize_columns=False)
cat_ply_select = pd.DataFrame.spatial.from_featureclass(Outputfilename_cat)
Connect_Lake_info = cat_ply_select.loc[cat_ply_select["Lake_Cat"] == 1]
Connect_Lakeids = np.unique(Connect_Lake_info["HyLakeId"].values)
Connect_Lakeids = Connect_Lakeids[Connect_Lakeids > 0]
NConnect_Lake_info = cat_ply_select.loc[cat_ply_select["Lake_Cat"] == 2]
NonCL_Lakeids = np.unique(NConnect_Lake_info["HyLakeId"].values)
NonCL_Lakeids = NonCL_Lakeids[NonCL_Lakeids > 0]
if len(Connect_Lakeids) > 0 and Path_Con_Lake_ply != "#":
sl_con_lakes = pd.DataFrame.spatial.from_featureclass(Path_Con_Lake_ply)
sl_con_lakes = sl_con_lakes.loc[sl_con_lakes['Hylak_id'].isin(Connect_Lakeids)]
sl_con_lakes.spatial.to_featureclass(location=os.path.join(OutputFolder,os.path.basename(Path_Con_Lake_ply)),overwrite=True,sanitize_columns=False)
if len(NonCL_Lakeids) > 0 and Path_NonCon_Lake_ply != "#":
sl_non_con_lakes = | pd.DataFrame.spatial.from_featureclass(Path_NonCon_Lake_ply) | pandas.DataFrame.spatial.from_featureclass |
import pandas as pd
import torch
from pytorch_tabnet.tab_model import TabNetRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import LabelEncoder
if __name__ == '__main__':
root_path = './data/processed_data/house/'
train_data = pd.read_csv(root_path + 'train_data.csv')
len_train = len(train_data)
test_data = | pd.read_csv(root_path + 'test_data.csv') | pandas.read_csv |
import okex.account_api as account
import okex.futures_api as future
import okex.spot_api as spot
import pandas as pd
import datetime
import time
from tools.DBTool import DBTool
from tools.ReadConfig import ReadConfig
from sqlalchemy import create_engine
from sqlalchemy.types import DECIMAL, TEXT, Date, DateTime
class Account():
def __init__(self,spotAPI,accountAPI,engine):
self.spotAPI =spotAPI
self.accountAPI = accountAPI
self.engine= engine
def get_timestamp(self,time):
# now = datetime.datetime.now()
t = time.isoformat("T", "milliseconds")
return t + "Z"
def get_okex_spot_accounts(self,time):
# okex现货账户信息
result = self.spotAPI.get_account_info()
spotAccount = | pd.DataFrame(result, columns=['frozen', 'hold', 'id', 'currency', 'balance', 'available', 'holds']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = | Categorical(["a", "c", "b", "d", np.nan], ordered=True) | pandas.Categorical |
"""Pypiplot."""
from d3heatmap import d3heatmap as d3
import pypistats
import requests
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import argparse
import pandas as pd
import numpy as np
import os
from calplot import calplot, yearplot
import tempfile
# %%
class Pypiplot:
"""Class pypiplot."""
def __init__(self, username, category=['with_mirrors', 'without_mirrors'], sep=';', savepath=None, verbose=3):
"""Initialize pypiplot.
Parameters
----------
username : String
Github user account name.
category : list, optional
Downloads is counted for one or both of these categories ['with_mirrors', 'without_mirrors'].
sep : str, (Default: ';')
Seperator to store data in csv file.
savepath : String, (Default: None)
Storage of the csv files containing download statistics.
verbose : int, (Default: 3)
Verbosity message.
Returns
-------
None.
"""
self.username = username
self.repo_link = 'https://api.github.com/users/' + username + '/repos'
self.sep = sep
self.category = category
self.curpath = os.path.dirname(os.path.abspath(__file__))
self.tempdir = os.path.abspath(tempfile.gettempdir())
# self.curpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
if savepath is None:
self.savepath = os.path.join(self.curpath, 'pypi_data')
if not os.path.exists(self.savepath): os.makedirs(self.savepath)
else:
self.savepath = savepath
self.verbose=verbose
def update(self, repo=None):
"""Update repo download file(s).
Description
-----------
Update the local stored file with daily downloads for the specified repos.
Parameters
----------
repo : list of Strings, (Default: None)
None : Take all available pypi repos for the username.
Returns
-------
None.
"""
if (repo is not None) and ('str' in str(type(repo))):
repo = [repo]
# Extract all repos
repos = self._get_repo_names_from_git()
if (repo is not None):
repos = repo
if not np.any(np.isin(repos, repo)):
raise ValueError('[pypiplot] >Error: repos [%s] does not exists or is private.' %(repo))
if self.verbose>=3: print('[pypiplot] >Start updating..')
for repo in repos:
try:
if self.verbose>=3: print('[pypiplot] >[%s]' %(repo))
status = True
df = pypistats.overall(repo, total=True, format="pandas")
df.dropna(inplace=True)
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
df = df.sort_values("date")
df.reset_index(drop=True, inplace=True)
del df['percent']
# Merge with any on disk
pathname = os.path.join(self.savepath, repo + '.csv')
if os.path.isfile(pathname):
# Read repo from disk
df_disk = read_repo_counts_from_disk(pathname, self.sep)
# Merge with latest counts
df, status = add_new_counts_to_repo(df, df_disk, repo, verbose=self.verbose)
# Write to disk
if status:
if self.verbose>=3: print('[pypiplot] >Write to disk: [%s]' %(pathname))
df.to_csv(pathname, index=False, sep=self.sep)
except:
if self.verbose>=1: print('[pypiplot] >Skip [%s] because could not retrieve statistics from Pypi.' %(repo))
def stats(self, repo=None):
"""Compute statistics for the specified repo(s).
Description
-----------
Compute and summarize statistics for the libraries.
Parameters
----------
repo : list of Strings, (Default: None)
None : Take all available pypi repos for the username.
Returns
-------
dict()
* data : Download statistics for the repo(s).
* heatmap : DataFrame containing (summarized) data statistics.
* repos : Number of repos.
* n_libraries : Number of libraries processed.
"""
# Retrieve all repos for the username
status, repos, filenames, pathnames = self._get_repos()
if (repo is not None) and ('str' in str(type(repo))):
repo = [repo]
# Check whether specific repo exists.
if repo is not None:
Iloc = np.isin(repos, repo)
if not np.any(Iloc): raise ValueError('[pypiplot] >Error: repos [%s] does not exists or is private. Tip: Run the .update() first.' %(repo))
# repos = [repo]
repos = repos[Iloc]
filenames = filenames[Iloc]
pathnames = pathnames[Iloc]
if not status:
if self.verbose>=3: print('[pypiplot] >No repos could be retrieved from git nor disk <return>')
return None
out = pd.DataFrame()
for repo, pathname in zip(repos, pathnames):
df = read_repo_counts_from_disk(pathname, self.sep)
# Take desired categories
Iloc = np.isin(df['category'], self.category)
df = df.loc[Iloc, :]
# Group by date
df = df.groupby("date").sum().sort_values("date")
df.reset_index(drop=False, inplace=True)
dftmp = df.groupby("date").sum()
dftmp.rename(columns={'downloads': repo}, inplace=True)
out = pd.concat([out, dftmp], axis=0)
out.fillna(value=0, inplace=True)
out.reset_index(drop=False, inplace=True)
out = out.groupby("date").sum()
# Make heatmap
heatmap = _compute_history_heatmap(out, verbose=self.verbose)
self.results = {}
self.results['data'] = out
self.results['heatmap'] = heatmap
self.results['n_libraries'] = out.shape[1]
self.results['repos'] = repos
return self.results
def _get_repo_names_from_git(self):
# Extract repos for user
if self.verbose>=3: print('[pypiplot] >Extracting repo names for [%s]..' %(self.username))
r = requests.get(self.repo_link)
data = r.json()
# Extract the repo names
repos = []
for rep in data:
# full_names.insert(0, rep['full_name'])
repos.insert(0, rep['name'])
if self.verbose>=3: print('[pypiplot] >[%.0d] repos found for [%s]' %(len(repos), self.username))
# Return
return np.array(repos)
def _get_repos(self):
status = True
# Retrieve all downloads from disk
repos, filenames, pathnames = get_files_on_disk(self.savepath, verbose=self.verbose)
# Update and retrieve if needed
if len(repos)==0:
if self.verbose>=3: print('[pypiplot] >No files found on disk. Lets update first!')
# Update all repos
self.update()
# Retrieve all downloads from disk
repos, filenames, pathnames = get_files_on_disk(self.savepath, verbose=0)
if len(repos)==0:
status = False
# Return
return status, repos, filenames, pathnames
def plot_cal(self, method='mean', vmin=None, vmax=None, cmap='YlGn', norm=False):
X = self.results['data'].copy()
if vmin is not None:
X[X<=vmin]=vmin
if vmax is not None:
X[X>=vmax]=vmax
if norm:
print('[pypiplot]> Normalizing..')
X = (X-X.mean(axis=0)) / X.std(axis=0)
print('[pypiplot]> Method: [%s]' %(method))
if method=='median':
events=X.median(axis=1)
elif method=='mean':
events=X.mean(axis=1)
else:
events=X.sum(axis=1)
# Make the calender
plt.figure()
calplot(events, cmap=cmap, colorbar=False, figsize=None, suptitle=None)
def plot_year(self, title=None, description=None, path='d3heatmap.html', vmin=10, vmax=None, cmap='interpolateGreens', visible=True, overwrite=False):
"""Plot heatmap across all repos.
Description
-----------
Plot heatmap of all the repos combined with weeks vs day-name
Parameters
----------
title : String, (Default: None)
Title of the heatmap.
description : String, (Default: None)
Description of the heatmap.
path : String, (Default: 'd3heatmap.html'.)
Full pathname or filename to store the file. If None is used, the system tempdir is used.
vmin : int, (Default: 25)
Minimum color: Used for colorscheme.
None: Take the minimum value in the matrix.
vmax : int, (Default: None)
Minimum color: Used for colorscheme.
None: Take the maximum value in the matrix.
cmap : String, (default: 'interpolateInferno').
The colormap scheme. This can be found at: https://github.com/d3/d3-scale-chromatic.
visible : Bool, (default: True).
Open the browser.
Returns
-------
None.
"""
if description is None:
if self.results['n_libraries']>1:
description = '%.0d Total Pypi downloads across %.0d libraries' %(self.results['heatmap'].sum().sum(), self.results['n_libraries'])
else:
description = '%.0d Total Pypi downloads for %s' %(self.results['heatmap'].sum().sum(), self.results['repos'][0])
if title is None:
title = ''
# Make heatmap with d3js.
d3.matrix(self.results['heatmap'], fontsize=9, title=title, description=description, path=path, width=700, height=200, cmap=cmap, vmin=vmin, vmax=vmax, stroke='black', showfig=visible, overwrite=True)
def plot(self, title=None, method='mean', legend=True, figsize=(25, 15)):
plt.figure()
if method=='median':
self.results['data'].rolling(window=30).median().plot(figsize=figsize, legend=legend)
elif method=='sum':
self.results['data'].rolling(window=30).sum().plot(figsize=figsize, legend=legend)
else:
self.results['data'].rolling(window=30).mean().plot(figsize=figsize, legend=legend)
plt.xlabel('Date')
plt.ylabel('Average number of downloads in a rolling window of 30 days')
plt.grid(True)
plt.title(title)
def plot_heatmap(self, title=None, description=None, path='d3_heatmap_repos.html', vmin=10, vmax=None, width=700, height=None, cmap='interpolateGreens'):
"""Plot heatmap across all repos.
Description
-----------
Plot heatmap of all the repos combined with weeks vs day-name
Parameters
----------
title : String, (Default: None)
Title of the heatmap.
description : String, (Default: None)
Description of the heatmap.
path : String, (Default: 'd3_heatmap_repos.html'.)
Full pathname or filename to store the file. If None is used, the system tempdir is used.
vmin : int, (Default: 25)
Minimum color: Used for colorscheme.
None: Take the minimum value in the matrix.
vmax : int, (Default: None)
Minimum color: Used for colorscheme.
None: Take the maximum value in the matrix.
cmap : String, (default: 'interpolateInferno').
The colormap scheme. This can be found at: https://github.com/d3/d3-scale-chromatic
'interpolateOranges'
width : int, (default: 700).
Width of the window.
height : int, (default: None).
None: Determine based on number of repos.
Returns
-------
None.
"""
heatmap = pd.DataFrame()
cols = self.results['data'].columns.values
for col in cols:
heatmap[col] = _compute_history_heatmap(pd.DataFrame(self.results['data'][col])).sum(axis=0)
if title is None:
title = ''
if description is None:
if self.results['n_libraries']>1:
description = '%.0d Pypi downloads last year across %.0d libraries' %(self.results['heatmap'].sum().sum(), self.results['n_libraries'])
else:
description = '%.0d Pypi downloads last year for %s' %(self.results['heatmap'].sum().sum(), self.results['repos'][0])
if height is None:
height = np.maximum(np.minimum(40 * heatmap.shape[1], 550), 200)
# Make heatmap with d3js.
d3.matrix(heatmap.T, fontsize=9, title=title, description=description, path=path, width=700, height=height, cmap=cmap, vmin=vmin, vmax=vmax, stroke='black', overwrite=True)
# %%
def _compute_history_heatmap(df, duration=360, nr_days=7, verbose=3):
df = df.sum(axis=1).copy()
datetimeformat='%Y-%m-%d'
if verbose>=3: print('[pypiplot] >Computing heatmap across the last %.0d days.' %(duration))
# Make sure the duration is tops 365 from now
extend_days = datetime.now() - timedelta(duration)
dates_start = pd.to_datetime(pd.date_range(start=extend_days, end=df.index[0]).strftime(datetimeformat), format=datetimeformat)
df_start = pd.DataFrame(np.zeros((len(dates_start), 1)), dtype=int, index=dates_start)
# Fill the gap between now and the latest point of the date in the data
dates_end = pd.to_datetime(pd.date_range(start=df.index[-1] + timedelta(1), end=datetime.now()).strftime(datetimeformat), format=datetimeformat)
df_end = pd.DataFrame(np.zeros((len(dates_end), 1)), dtype=int, index=dates_end)
# dataframe containing 365 days of data
df_365 = pd.concat([df_start, df, df_end], axis=0)
# To make sure we can break the dataframe into columns of 7 days, we need to extend a bit more.
extend_days = float(nr_days - np.mod(df_365.shape[0], nr_days))
start = df_365.index[0] - timedelta(extend_days)
dates_start = pd.to_datetime(pd.date_range(start=start, end=df_365.index[0] - timedelta(1)).strftime(datetimeformat), format=datetimeformat)
df_start = pd.DataFrame(np.zeros((len(dates_start), 1)), dtype=int, index=dates_start)
# Final
df_fin = pd.concat([df_start, df_365], axis=0)
df_values = df_fin.values.reshape((-1, nr_days))
# Column names
colnames = df_fin.index.isocalendar().week.astype(str).values
# colnames = pd.Int64Index(idx.isocalendar().week
colnames = colnames.reshape((-1, nr_days))[:, -1]
rownames = df_fin.index.day_name().values.reshape((-1, nr_days))[0, :]
rownames = np.array(list(map(lambda x: x[0:3], rownames))).astype('O')
# Flip matrix up down to make sure that sunday is on top
rownames=rownames[::-1]
df_values = np.flipud(df_values.T)
# Output
df_heatmap = | pd.DataFrame(columns=colnames, data=df_values, index=rownames) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# 系统模块
import sys
# 数据处理模块
import pandas as pd
# 引入外部模块
# 整理数据
from predict_prepare import Predict_Prepare as Prepare
# 获取价格预测结果
from predict_predict import Predict_Predict as Predict
class Predict_Lead:
def __init__(self):
pass
# 其他包调用的函数
def predict_result(self):
# 模型分两段进行预测
period = [1, 2]
# 实例化准备模块和模型预测模块
PrePare_Data = Prepare()
Predict_Data = Predict()
# 获得第一段时间的预测结果
# 整理样本数据集,进行模型预测准备工作
# History_Model11、Predict_Model11:生猪预测模型所需使用的自变量和因变量
# Last_data_model11:原始数据集中生猪价格的最后一条记录的时间
# History_Model21、Predict_Model21:玉米预测模型所需使用的自变量和因变量
# Last_data_model21:原始数据集中玉米价格的最后一条记录的时间
History_Model11, Predict_Model11, Last_data_model11, History_Model21, Predict_Model21, Last_data_model21 = PrePare_Data.variables_prepar(period[0])
# 获取预测结果
# predict_result1:生猪价格和玉米价格的预测结果
# y_test_compare11:第一时间段中生猪模型训练结果和实际价格的集合
# y_test_compare12:第一时间段中玉米模型训练结果和实际价格的集合
predict_result1, y_test_compare11, y_test_compare12 = Predict_Data.predict_result(History_Model11, Last_data_model11, Predict_Model11, History_Model21, Last_data_model21, Predict_Model21, period[0])
# 获得第二段时间的预测结果
# 整理样本数据集,进行模型预测准备工作
History_Model12, Predict_Model12, Last_data_model12, History_Model22, Predict_Model22, Last_data_model22 = PrePare_Data.variables_prepar(period[1])
# 获取预测结果
predict_result2, y_test_compare21, y_test_compare22 = Predict_Data.predict_result(History_Model12, Last_data_model12, Predict_Model12, History_Model22, Last_data_model22, Predict_Model22, period[1])
# 整合两端时间的预测结果
predict_result = | pd.concat([predict_result1, predict_result2]) | pandas.concat |
### imports
import sys
from pathlib import Path
import pickle
import numpy as np
import pandas as pd
util_dir = Path.cwd().parent.parent.parent.joinpath('Utility')
sys.path.insert(1, str(util_dir))
from Abstract import *
from Geometry import intersection, union, iou
from Information import *
from Configuration import frcnn_config
from mean_average_precision import MetricBuilder
# Load configuration object
cwd = Path.cwd()
pickle_path = cwd.joinpath('frcnn.test.config.pickle')
C = pickle.load(open(pickle_path,'rb'))
### setup convenient directories
prediction_dir = cwd.joinpath('predictions')
performance_dir = cwd.joinpath('performances')
performance_dir.mkdir(parents=True, exist_ok=True)
### object detection analysis function
# return common metrics: precision, recall, degeneracy, and mAPs
def od_analysis(ref_df, pred_df, IoU_cuts):
### preparing the result dataframe
columns = ['Metric/IoU_cuts']+IoU_cuts
result_df = pd.DataFrame(columns = columns)
precision_row = ['precision']
recall_row = ['recall']
ap_row = ['AP']
grading_grids = []
imgs = ref_df['FileName'].unique()
precision_cols = [ [] for i in IoU_cuts ]
recall_cols = [ [] for i in IoU_cuts]
ap_cols = [ [] for i in IoU_cuts] # every sub [] contains aps for every IoU
for img_idx, img in enumerate(imgs):
sys.stdout.write(t_info(f"Parsing image: {img_idx+1}/{len(imgs)}", '\r'))
if img_idx+1 == len(imgs):
sys.stdout.write('\n')
sys.stdout.flush()
ref_slice = ref_df[ref_df['FileName']==img]
pred_slice = pred_df[pred_df['FileName']==img]
ref_bboxes = [ [row['XMin'], row['XMax'], row['YMin'], row['YMax']] for index, row in ref_slice.iterrows() ]
pred_bboxes = [ [row['XMin'], row['XMax'], row['YMin'], row['YMax'], row['Score']] for index, row in pred_slice.iterrows() ]
gt = np.array([ [r['XMin'], r['YMin'], r['XMax'], r['YMax'], 0, 0, 0] for index,r in ref_slice.iterrows()])
gt *= 512.0
preds = np.array([ [b[0], b[2], b[1], b[3], 0, b[4]]\
for b in pred_bboxes])
preds *= 512.0
pickle.dump(gt, open('gt', 'wb'))
pickle.dump(preds, open('preds', 'wb'))
recall_dict1 = {}
if len(pred_bboxes)==0:
for ap_col in ap_cols:
ap_col.append(0)
else:
# create metric_fn
metric_fn = MetricBuilder.build_evaluation_metric("map_2d", async_mode=True, num_classes=1)
# add some samples to evaluation
metric_fn.add(preds, gt)
result = metric_fn.value(iou_thresholds=IoU_cuts, recall_thresholds=np.arange(0., 1.1, 0.1))
for col_idx, iou_val in enumerate(IoU_cuts):
ap = result[iou_val][0]['ap']
final_precision = result[iou_val][0]['precision'][-1]
final_recall = result[iou_val][0]['recall'][-1]
precision_cols[col_idx].append(final_precision)
recall_cols[col_idx].append(final_recall)
ap_cols[col_idx].append(ap)
# calculate precision, recall, and degeneracy
# iNum = len(ref_bboxes)
# jNum = len(pred_bboxes)
# grading_grid = np.zeros(shape=(iNum, jNum))
#
# for i, j in np.ndindex(iNum, jNum):
# grading_grid[i,j] = iou(ref_bboxes[i], pred_bboxes[j])
#
# grading_grids.append(grading_grid)
for precision_col, recall_col, ap_col in zip(precision_cols, recall_cols, ap_cols):
precision_row.append(np.array(precision_col).mean())
recall_row.append(np.array(recall_col).mean())
ap_row.append(np.array(ap_col).mean())
### Grading the RPN prediction after NMS
# for iou_idx, iou_limit in enumerate(IoU_cuts):
# sys.stdout.write(t_info(f"Processing iou cuts: {iou_idx+1}/{len(IoU_cuts)}", '\r'))
# if iou_idx+1 == len(IoU_cuts):
# sys.stdout.write('\n')
# sys.stdout.flush()
#
#
# precisions = []
# recalls = []
# degeneracies = []
#
# for grading_grid in grading_grids:
# iNum, jNum = grading_grid.shape
#
# tp = 0
# fp = 0
# fn = 0
#
# for i in range(iNum):
# row = grading_grid[i]
# if np.any(row>=iou_limit):
# degeneracy = np.count_nonzero(row>=iou_limit)
# degeneracies.append(degeneracy)
#
# else:
# degeneracies.append(0)
# fn += 1
#
# for j in range(jNum):
# col = grading_grid[:,j]
# if np.any(col>=iou_limit):
# tp += 1
# else:
# fp += 1
#
# dom1 = tp+fp
#
# # dom1 == 0 when no ROI is proposed
# if dom1!=0:
# precision = tp/dom1
# precisions.append(precision)
#
# recall = tp/(tp+fn)
# recalls.append(recall)
#
# if len(precisions)!=0:
# precision_entry = np.array(precisions).mean()
# precision_row.append(precision_entry)
# else:
# precision_row.append([])
#
# if len(recalls)!=0:
# recall_entry = np.array(recalls).mean()
# recall_row.append(recall_entry)
# else:
# recall_row.append([])
#
# if len(degeneracies)!=0:
# degeneracy_entry = np.array(degeneracies).mean()
# degeneracy_row.append(degeneracy_entry)
# else:
# degeneracy_row.append([])
precision_df = pd.DataFrame([precision_row], columns=columns)
recall_df = pd.DataFrame([recall_row], columns=columns)
ap_df = pd.DataFrame([ap_row], columns=columns)
result_df = result_df.append(precision_df, ignore_index=True)
result_df = result_df.append(recall_df, ignore_index=True)
result_df = result_df.append(ap_df, ignore_index=True)
return result_df
# load real bboxes and predicted bboxes
IoU_cuts = [0.5, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95]
ref_df = pd.read_csv(C.train_bbox_reference_file, index_col=0)
pred_files = [f for f in prediction_dir.glob('*.csv')]
for i, pred_file in enumerate(pred_files):
pinfo(f'Evaluating predictions {i+1}/{len(pred_files)}: {pred_file.name}')
pred_df = pd.read_csv(pred_file, index_col=0)
fileName = pred_file.name[::-1].split('_',1)[1][::-1]+'_performance.csv'
file = performance_dir.joinpath(fileName)
if file.exists():
read_df = | pd.read_csv(file, index_col=0) | pandas.read_csv |
import json
import itertools
import pandas as pd
import numpy as np
def prepare_data():
users_data = | pd.read_json("proper_data/users2.json") | pandas.read_json |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_empty_str(self):
with self.assertRaisesRegex(
ValueError, "CategoricalMetadataColumn.*empty strings.*"
"column 'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', '', 'bar']},
index= | pd.Index(['a', 'b', 'c'], name='id') | pandas.Index |
# Copyright 2017 Sidewalk Labs | https://www.apache.org/licenses/LICENSE-2.0
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pandas as pd
import numpy as np
from collections import OrderedDict
import os
from enum import Enum
from doppelganger import marginals
FILE_PATTERN = 'state_{}_puma_{}_{}'
class ErrorStat(Enum):
ROOT_MEAN_SQUARED_ERROR = 1
ROOT_SQUARED_ERROR = 2
ABSOLUTE_PCT_ERROR = 3
class AccuracyException(Exception):
pass
class Accuracy(object):
def __init__(self, person_pums, household_pums, marginal_data,
generated_persons, generated_households, marginal_variables, use_all_marginals):
self.comparison_dataframe = Accuracy._comparison_dataframe(
person_pums,
household_pums,
marginal_data,
generated_persons,
generated_households,
marginal_variables,
use_all_marginals
)
@staticmethod
def from_doppelganger(
cleaned_data_persons,
cleaned_data_households,
marginal_data,
population,
marginal_variables=[],
use_all_marginals=True
):
'''Initialize an accuracy object from doppelganger objects
cleaned_data_persons (doppelgange.DataSource.CleanedData) - pums person data
cleaned_data_households (doppelgange.DataSource.CleanedData) - pums household data
marginal_data (doppelganger.Marginals) - marginal data (usually census)
population (doppelganger.Population) - Uses: population.generated_people and
population.generated_households
marginal_variables (list(str)): list of marginal variables to compute error on.
'''
return Accuracy(
person_pums=cleaned_data_persons.data,
household_pums=cleaned_data_households.data,
marginal_data=marginal_data.data,
generated_persons=population.generated_people,
generated_households=population.generated_households,
marginal_variables=marginal_variables,
use_all_marginals=use_all_marginals
)
@staticmethod
def from_data_dir(state, puma, data_dir, marginal_variables, use_all_marginals):
'''Helper method for loading datafiles with same format output by download_allocate_generate
run script
Args:
state: state id
puma: puma id
data_dir: directory with stored csv files
marginal_variables (list(str)): list of marginal variables to compute error on.
Return: an initialized Accuracy object
'''
return Accuracy.from_csvs(
state, puma,
data_dir + os.path.sep + FILE_PATTERN.format(state, puma, 'persons_pums.csv'),
data_dir + os.path.sep + FILE_PATTERN.format(state, puma, 'households_pums.csv'),
data_dir + os.path.sep + FILE_PATTERN.format(state, puma, 'marginals.csv'),
data_dir + os.path.sep + FILE_PATTERN.format(state, puma, 'people.csv'),
data_dir + os.path.sep + FILE_PATTERN.format(state, puma, 'households.csv'),
marginal_variables,
use_all_marginals
)
@staticmethod
def from_csvs(
state, puma,
person_pums_filepath,
household_pums_filepath,
marginals_filepath,
generated_persons_filepath,
generated_households_filepath,
marginal_variables,
use_all_marginals
):
'''Load csv files for use in accuracy calcs'''
msg = '''Accuracy's from_data_dir Assumes files of the form:
state_{state_id}_puma_{puma_id}_X
Where X is contained in the set:
{persons_pums.csv, households_pums.csv, marginals.csv, people.csv, households.csv}
'''
try:
df_person = pd.read_csv(person_pums_filepath)
df_household = pd.read_csv(household_pums_filepath)
df_marginal = pd.read_csv(marginals_filepath)
df_gen_persons = | pd.read_csv(generated_persons_filepath) | pandas.read_csv |
import pandas as pd
import numpy as np
import arrow
from datetime import datetime
from caendr.models.sql import Strain
from caendr.services.cloud.postgresql import db
from caendr.services.cloud.datastore import query_ds_entities
from caendr.services.user import get_num_registered_users
from caendr.utils.plots import time_series_plot
def get_strain_collection_plot(df):
return time_series_plot(
df,
x_title='Year',
y_title='Count',
range=[
datetime(1995, 10, 17),
datetime.today()
]
)
def cum_sum_strain_isotype():
"""
Create a time-series plot of strains and isotypes collected over time
Args:
df - the strain dataset
"""
df = pd.read_sql_table(Strain.__tablename__, db.engine)
# Remove strains with issues
df = df[df["issues"] == False]
cumulative_isotype = df[['isotype', 'sampling_date']].sort_values(['sampling_date'], axis=0) \
.drop_duplicates(['isotype']) \
.groupby(['sampling_date'], as_index=True) \
.count() \
.cumsum() \
.reset_index()
cumulative_isotype = cumulative_isotype.append({'sampling_date': np.datetime64(datetime.today().strftime("%Y-%m-%d")),
'isotype': len(df['isotype'].unique())}, ignore_index=True)
cumulative_strain = df[['strain', 'sampling_date']].sort_values(['sampling_date'], axis=0) \
.drop_duplicates(['strain']) \
.dropna(how='any') \
.groupby(['sampling_date']) \
.count() \
.cumsum() \
.reset_index()
cumulative_strain = cumulative_strain.append({'sampling_date': np.datetime64(datetime.today().strftime("%Y-%m-%d")),
'strain': len(df['strain'].unique())}, ignore_index=True)
df = cumulative_isotype.set_index('sampling_date') \
.join(cumulative_strain.set_index('sampling_date')) \
.reset_index()
return df
def get_report_sumary_plot_legacy(df):
return time_series_plot(
df,
x_title='Date',
y_title='Count',
range=[
datetime(2016, 3, 1),
datetime.today()
],
colors=[
'rgb(149, 150, 255)',
'rgb(81, 151, 35)'
]
)
def get_mappings_summary_legacy():
"""
Generates the cumulative sum of reports and traits mapped.
Cached daily
"""
traits = query_ds_entities('trait')
if traits:
traits = | pd.DataFrame.from_dict(traits) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import gc
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
import logging
import itertools
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
#modify to work with kfold
#def smoteAdataset(Xig, yig, test_size=0.2, random_state=0):
#def smoteAdataset(Xig_train, yig_train, Xig_test, yig_test):
# sm=SMOTE(random_state=2)
# Xig_train_res, yig_train_res = sm.fit_sample(Xig_train, yig_train.ravel())
# return Xig_train_res, pd.Series(yig_train_res), Xig_test, pd.Series(yig_test)
def create_logger():
logger_ = logging.getLogger('main')
logger_.setLevel(logging.DEBUG)
fh = logging.FileHandler('simple_lightgbm.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s]%(asctime)s:%(name)s:%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger_.addHandler(fh)
logger_.addHandler(ch)
def get_logger():
return logging.getLogger('main')
def lgb_multi_weighted_logloss(y_true, y_preds):
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
if len(np.unique(y_true)) > 14:
classes.append(99)
class_weight[99] = 2
y_p = y_preds.reshape(y_true.shape[0], len(classes), order='F')
y_ohe = pd.get_dummies(y_true)
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
y_p_log = np.log(y_p)
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
nb_pos = y_ohe.sum(axis=0).values.astype(float)
class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return 'wloss', loss, False
def multi_weighted_logloss(y_true, y_preds):
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
if len(np.unique(y_true)) > 14:
classes.append(99)
class_weight[99] = 2
y_p = y_preds
y_ohe = pd.get_dummies(y_true)
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
y_p_log = np.log(y_p)
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
nb_pos = y_ohe.sum(axis=0).values.astype(float)
class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return loss
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
def predict_chunk(df_, clfs_, meta_, features, train_mean):
df_, aux_df_ = preprocess_ts_df(df_)
auxs = make_features(df_, aux_df_)
aggs = get_aggregations()
aggs = get_aggregations()
new_columns = get_new_columns(aggs)
agg_ = df_.groupby('object_id').agg(aggs)
agg_.columns = new_columns
agg_ = add_features_to_agg(df=agg_)
full_test = agg_.reset_index().merge(
right=meta_,
how='left',
on='object_id'
)
for aux in auxs:
full_test = pd.merge(full_test, aux, on='object_id', how='left')
full_test = postprocess_df(full_test)
#full_test = full_test.fillna(train_mean)
preds_ = None
for clf in clfs_:
if preds_ is None:
preds_ = clf.predict_proba(full_test[features]) / len(clfs_)
else:
preds_ += clf.predict_proba(full_test[features]) / len(clfs_)
preds_99 = np.ones(preds_.shape[0])
for i in range(preds_.shape[1]):
preds_99 *= (1 - preds_[:, i])
preds_df_ = pd.DataFrame(preds_, columns=['class_' + str(s) for s in clfs_[0].classes_])
preds_df_['object_id'] = full_test['object_id']
preds_df_['class_99'] = 0.14 * preds_99 / np.mean(preds_99)
print(preds_df_['class_99'].mean())
del agg_, full_test, preds_
gc.collect()
return preds_df_
def save_importances(importances_):
mean_gain = importances_[['gain', 'feature']].groupby('feature').mean()
importances_['mean_gain'] = importances_['feature'].map(mean_gain['gain'])
plt.figure(figsize=(8, 12))
sns.barplot(x='gain', y='feature', data=importances_.sort_values('mean_gain', ascending=False))
plt.tight_layout()
plt.savefig('importances.png')
def train_classifiers(full_train=None, y=None):
folds = StratifiedKFold(n_splits=10, shuffle=True, random_state=123)
clfs = []
importances = pd.DataFrame()
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.03,
'subsample': .9,
'colsample_bytree': .6,
'reg_alpha': .01,
'reg_lambda': .01,
'min_split_gain': 0.02,
'min_child_weight': 5,
'n_estimators': 10000,
'silent': -1,
'verbose': -1,
'max_depth': 3,
'seed': 159
}
oof_preds = np.zeros((len(full_train), np.unique(y).shape[0]))
full_ids = np.zeros(len(full_train))
w = y.value_counts()
ori_weights = {i : np.sum(w) / w[i] for i in w.index}
weights = {i : np.sum(w) / w[i] for i in w.index}
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
for value in classes:
weights[value] = weights[value] * class_weight[value]
for fold_, (trn_, val_) in enumerate(folds.split(y, y)):
lgb_params['seed'] += fold_
trn_x, trn_y = full_train.iloc[trn_], y.iloc[trn_]
val_x, val_y = full_train.iloc[val_], y.iloc[val_]
full_ids[val_] = val_x['object_id']
del val_x['object_id'], trn_x['object_id']
# trn_xa, trn_y, val_xa, val_y=smoteAdataset(trn_x.values, trn_y.values, val_x.values, val_y.values)
# trn_x=pd.DataFrame(data=trn_xa, columns=trn_x.columns)
# val_x=pd.DataFrame(data=val_xa, columns=val_x.columns)
clf = lgb.LGBMClassifier(**lgb_params)
clf.fit(
trn_x, trn_y,
eval_set=[(trn_x, trn_y), (val_x, val_y)],
eval_metric=lgb_multi_weighted_logloss,
verbose=100,
early_stopping_rounds=50,
sample_weight=trn_y.map(weights)
)
oof_preds[val_, :] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)
get_logger().info(multi_weighted_logloss(val_y, clf.predict_proba(val_x, num_iteration=clf.best_iteration_)))
imp_df = pd.DataFrame()
imp_df['feature'] = trn_x.columns
imp_df['gain'] = clf.feature_importances_
imp_df['fold'] = fold_ + 1
importances = pd.concat([importances, imp_df], axis=0, sort=False)
clfs.append(clf)
get_logger().info('MULTI WEIGHTED LOG LOSS : %.5f ' % multi_weighted_logloss(y_true=y, y_preds=oof_preds))
preds_df_ = pd.DataFrame(oof_preds, columns=['class_' + str(s) for s in clfs[0].classes_])
preds_df_['object_id'] = full_ids
print(preds_df_.head())
preds_df_.to_csv("oof_predictions.csv", index=False)
unique_y = np.unique(y)
class_map = dict()
for i,val in enumerate(unique_y):
class_map[val] = i
y_map = np.zeros((y.shape[0],))
y_map = np.array([class_map[val] for val in y])
# Compute confusion matrix
from sklearn.metrics import confusion_matrix
cnf_matrix = confusion_matrix(y_map, np.argmax(oof_preds,axis=-1))
np.set_printoptions(precision=2)
sample_sub = pd.read_csv('../input/sample_submission.csv')
class_names = list(sample_sub.columns[1:-1])
del sample_sub;gc.collect()
# Plot non-normalized confusion matrix
plt.figure(figsize=(12,12))
foo = plot_confusion_matrix(cnf_matrix, classes=class_names,normalize=True,
title='Confusion matrix')
return clfs, importances
def get_aggregations():
return {
'flux': ['min', 'max', 'mean', 'median', 'std', 'skew'],
'flux_err': ['min', 'max', 'mean', 'median', 'std', 'skew'],
'detected': ['sum'],
'flux_ratio_sq': ['sum','skew'],
'flux_by_flux_ratio_sq': ['sum','skew'],
}
def get_new_columns(aggs):
return [k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
def add_features_to_agg(df):
df['flux_diff'] = df['flux_max'] - df['flux_min']
df['flux_dif2'] = (df['flux_max'] - df['flux_min']) / df['flux_mean']
df['flux_w_mean'] = df['flux_by_flux_ratio_sq_sum'] / df['flux_ratio_sq_sum']
df['flux_dif3'] = (df['flux_max'] - df['flux_min']) / df['flux_w_mean']
return df
def agg_per_obj_passband(df, col, agg):
aux = df[['object_id','passband']+[col]]
aggs = {col: [agg]}
aux = df.groupby(['object_id','passband']).agg(aggs).reset_index()
new_df = pd.DataFrame()
new_df['object_id'] = aux['object_id'].unique()
for x in range(0,6):
new_aux = aux[aux['passband'] == x]
del new_aux['passband']
new_aux.columns = ['object_id',col+'_'+agg+'_passband_'+str(x)]
new_df = pd.merge(new_df, new_aux, on='object_id', how='left')
new_df = new_df.fillna(0)
return new_df
def mjd_diff_detected(df, col):
mjd_max = df.groupby('object_id')[col].max().reset_index()
mjd_min = df.groupby('object_id')[col].min().reset_index()
mjd_max.columns = ['object_id',col+'_max']
mjd_min.columns = ['object_id',col+'_min']
df = pd.merge(df, mjd_max, on='object_id', how='left')
df = pd.merge(df, mjd_min, on='object_id', how='left')
df[col+'_diff_detected'] = df[col+'_max'] - df[col+'_min']
aux_df = df.groupby('object_id')[col+'_diff_detected'].max().reset_index()
return aux_df
def mjd_diff2_detected(df, col):
mjd_max = df.groupby('object_id')[col].max().reset_index()
mjd_min = df.groupby('object_id')[col].min().reset_index()
mjd_mean = df.groupby('object_id')[col].mean().reset_index()
mjd_max.columns = ['object_id',col+'_max']
mjd_min.columns = ['object_id',col+'_min']
mjd_mean.columns = ['object_id',col+'_mean']
df = pd.merge(df, mjd_max, on='object_id', how='left')
df = pd.merge(df, mjd_min, on='object_id', how='left')
df = pd.merge(df, mjd_mean, on='object_id', how='left')
df[col+'_diff2_detected'] = (df[col+'_max'] - df[col+'_min']) / df[col+'_mean']
aux_df = df.groupby('object_id')[col+'_diff2_detected'].max().reset_index()
return aux_df
def mjd_diff_detected_passband(df, col):
mjd_max = df.groupby(['object_id','passband'])[col].max().reset_index()
mjd_min = df.groupby(['object_id','passband'])[col].min().reset_index()
mjd_max.columns = ['object_id','passband',col+'_max']
mjd_min.columns = ['object_id','passband',col+'_min']
df = pd.merge(df, mjd_max, on=['object_id','passband'], how='left')
df = pd.merge(df, mjd_min, on=['object_id','passband'], how='left')
df[col+'_diff'] = df[col+'_max'] - df[col+'_min']
aux = df.groupby(['object_id','passband'])[col+'_diff'].max().reset_index()
new_df = | pd.DataFrame() | pandas.DataFrame |
import re
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import Categorical, Index, NaT, Series, date_range
import pandas._testing as tm
from pandas.api.types import is_scalar
class TestCategoricalAnalytics:
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_not_ordered_raises(self, aggregation):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
msg = f"Categorical is not ordered for operation {aggregation}"
agg_func = getattr(cat, aggregation)
with pytest.raises(TypeError, match=msg):
agg_func()
def test_min_max_ordered(self):
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
@pytest.mark.parametrize(
"categories,expected",
[
(list("ABC"), np.NaN),
([1, 2, 3], np.NaN),
pytest.param(
Series(date_range("2020-01-01", periods=3), dtype="category"),
NaT,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/29962"
),
),
],
)
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_ordered_empty(self, categories, expected, aggregation):
# GH 30227
cat = Categorical([], categories=categories, ordered=True)
agg_func = getattr(cat, aggregation)
result = agg_func()
assert result is expected
@pytest.mark.parametrize(
"values, categories",
[(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])],
)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_with_nan(self, values, categories, function, skipna):
# GH 25303
cat = Categorical(values, categories=categories, ordered=True)
result = getattr(cat, function)(skipna=skipna)
if skipna is False:
assert result is np.nan
else:
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_only_nan(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Categorical([np.nan], categories=[1, 2], ordered=True)
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("method", ["min", "max"])
def test_deprecate_numeric_only_min_max(self, method):
# GH 25303
cat = Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
with tm.assert_produces_warning(expected_warning=FutureWarning):
getattr(cat, method)(numeric_only=True)
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_raises(self, method):
cat = Categorical(["a", "b", "c", "b"], ordered=False)
msg = (
f"Categorical is not ordered for operation {method}\n"
"you can use .as_ordered() to change the Categorical to an ordered one"
)
method = getattr(np, method)
with pytest.raises(TypeError, match=re.escape(msg)):
method(cat)
@pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"])
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
msg = (
f"the '{kwarg}' parameter is not supported in the pandas implementation "
f"of {method}"
)
if kwarg == "axis":
msg = r"`axis` must be fewer than the number of dimensions \(1\)"
kwargs = {kwarg: 42}
method = getattr(np, method)
with pytest.raises(ValueError, match=msg):
method(cat, **kwargs)
@pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")])
def test_numpy_min_max_axis_equals_none(self, method, expected):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
method = getattr(np, method)
result = method(cat, axis=None)
assert result == expected
@pytest.mark.parametrize(
"values,categories,exp_mode",
[
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
],
)
def test_mode(self, values, categories, exp_mode):
s = Categorical(values, categories=categories, ordered=True)
res = s.mode()
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self, ordered):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
cat = Categorical(
["cheese", "milk", "apple", "bread", "bread"],
categories=["cheese", "milk", "apple", "bread"],
ordered=ordered,
)
ser = Series(cat)
# Searching for single item argument, side='left' (default)
res_cat = cat.searchsorted("apple")
assert res_cat == 2
assert is_scalar(res_cat)
res_ser = ser.searchsorted("apple")
assert res_ser == 2
assert is_scalar(res_ser)
# Searching for single item array, side='left' (default)
res_cat = cat.searchsorted(["bread"])
res_ser = ser.searchsorted(["bread"])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = cat.searchsorted(["apple", "bread"], side="right")
res_ser = ser.searchsorted(["apple", "bread"], side="right")
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted("cucumber")
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted("cucumber")
# Searching for multiple values one of each is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted(["bread", "cucumber"])
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted(["bread", "cucumber"])
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=["c", "a", "b"])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(["b", "a", "b"], categories=["a", "b"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["c", "b", "a", "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["c", "b", "a"], categories=["a", "b", "c"], ordered=True)
| tm.assert_categorical_equal(res, exp_cat) | pandas._testing.assert_categorical_equal |
import glob
import numpy as np
import pandas as pd
from datetime import datetime
scriptPath = __file__
path = scriptPath[:-35] + '/data/'
filepath = path+'extractedDFIdata.csv'
filepathMydefichain = path+'mydefichainOperatornodes.xlsx'
dfDFIData = | pd.read_csv(filepath) | pandas.read_csv |
# File path of cleaned_loan_data.csv stored in path
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
sample_size=2000
z_critical = stats.norm.ppf(q = 0.95)
data = pd.read_csv(path)
data_sample = data.sample(n=sample_size, random_state=0)
sample_mean = data_sample['installment'].mean()
sample_std = data_sample['installment'].std()
margin_of_error = (z_critical * sample_std) / math.sqrt(sample_size)
confidence_interval = (sample_mean - margin_of_error, sample_mean + margin_of_error)
true_mean = data['installment'].mean()
print(margin_of_error)
print(true_mean)
print(confidence_interval[0])
print(confidence_interval[1])
sample_size = np.array([20, 50, 100])
fig, (ax_1, ax_2, ax_3) = plt.subplots(nrows=3, ncols=1, figsize=(20, 10))
for i in range(len(sample_size)):
m = []
for j in range(1000):
m.append(data['installment'].sample(n=sample_size[i]).mean())
mean_series = | pd.Series(m) | pandas.Series |
# DAG schedulada para utilização dos dados do Titanic
from airflow import DAG
# Importação de operadores
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from datetime import datetime, timedelta
import pandas as pd
import zipfile
import pyodbc
import sqlalchemy
data_path = '/root/download'
# Argumentos default
default_args = {
'owner': 'diego.rech', # Dono da DAG
'depends_on_past': False, # Se DAG depende de algo acontecendo antes para iniciar o processo
'start_date': datetime(2020, 11, 30, 23), # Data de inicio do processo da DAG
'email': '<EMAIL>', # Email para ser notificado, caso configurado
'email_on_failure': False, # Para receber emails em casa de falha
'email_on_retry': False, # Para receber emails em casa de uma nova tentativa de execução
'retries': 1, # Quantas vezes uma nova tentativa deve ser feita
'retry_delay': timedelta(minutes=1) # Quanto tempo até a nova tentativa ser realizada
}
# Denifinição da DAG
dag = DAG(
'treino-04',# Nome da DAG
description='Utiliza os dados do ENADE para demonstrar o Paralelismo', # Descrição que facilita a identificação da DAG
default_args=default_args,
schedule_interval='*/10 * * * *'# Intervalo de execução utilizando cron
)
# Task que marca o inicio do processo
start_processing = BashOperator(
task_id='start_processing',
bash_command='echo "Starting Preprocessing! Vai!"',
dag=dag
)
# Baixa os dados do ENADE 2019 do site oficial
task_get_data = BashOperator(
task_id='get_data',
bash_command=f'wget -P /root/download http://download.inep.gov.br/microdados/Enade_Microdados/microdados_enade_2019.zip -o {data_path}/enade_2019.zip',
dag=dag
)
def unzip_data():
with zipfile.ZipFile(f'{data_path}/enade_2019.zip', 'r') as zipped:
zipped.extractall(f'{data_path}')
# Task responsável pelo unzip do arquivo
task_unzip_data = PythonOperator(
task_id = 'unzip_data',
python_callable = unzip_data,
dag=dag
)
def apply_filter():
cols = ['CO_GRUPO', 'TP_SEXO', 'NU_IDADE', 'NT_GER', 'NT_FG', 'NT_CE', 'QE_I01', 'QE_I02', 'QE_I04', 'QE_I05', 'QE_I08']
enade = pd.read_csv(f'{data_path}/microdados_enade_2019/2019/3.DADOS/microdados_enade_2019.txt', sep=';', decimal=',', usecols=cols)
enade = enade.loc[
(enade.NU_IDADE > 20) &
(enade.NU_IDADE < 40) &
(enade.NT_GER > 0)
]
enade.to_csv(data_path + '/enade_filtrado_2019.csv', index=False)
task_apply_filter = PythonOperator(
task_id = 'apply_filter',
python_callable = apply_filter,
dag=dag
)
# Idade centralizada na média
# Idade centralizada na média ao quadrado
def construct_centralized_age():
age = pd.read_csv(f'{data_path}/enade_filtrado_2019.csv', usecols=['NU_IDADE'])
age['centralized_age'] = age.NU_IDADE - age.NU_IDADE.mean()
age[['centralized_age']].to_csv(data_path + '/centralized_age.csv', index=False)
def construct_centralized_pow():
centralized_age = pd.read_csv(f'{data_path}/centralized_age.csv', sep=';', decimal=',')
centralized_age['centralized_age'] = centralized_age['centralized_age'].astype(float)
centralized_age['centralized_pow'] = centralized_age['centralized_age'] ** 2
centralized_age[['centralized_pow']].to_csv(f'{data_path}/centralized_pow.csv', index=False)
task_construct_centralized_age = PythonOperator(
task_id = 'centralized_age',
python_callable = construct_centralized_age,
dag=dag
)
task_construct_centralized_pow = PythonOperator(
task_id = 'centralized_pow',
python_callable = construct_centralized_pow,
dag=dag
)
def construct_martial_status():
filter = pd.read_csv(f'{data_path}/enade_filtrado_2019.csv', usecols=['QE_I01'])
filter['martial_status'] = filter.QE_I01.replace({
'A': 'Solteiro',
'B': 'Casado',
'C': 'Separado',
'D': 'Viúvo',
'E': 'Outro'
})
filter[['martial_status']].to_csv(f'{data_path}/martial_status.csv', index=False)
task_construct_martial_status = PythonOperator(
task_id = 'construct_martial_status',
python_callable = construct_martial_status,
dag = dag
)
def construct_color():
filter = pd.read_csv(f'{data_path}/enade_filtrado_2019.csv', usecols=['QE_I02'])
filter['color'] = filter.QE_I02.replace({
'A': 'Branca',
'B': 'Preta',
'C': 'Amarela',
'D': 'Parda',
'E': 'Indígena',
'F': '',
' ': ''
})
filter[['color']].to_csv(f'{data_path}/color.csv', index=False)
task_construct_color = PythonOperator(
task_id='construct_color',
python_callable = construct_color,
dag = dag
)
def construct_escopai():
filter = | pd.read_csv(f'{data_path}/enade_filtrado_2019.csv', usecols=['QE_I04']) | pandas.read_csv |
import pandas as pd
import numpy as np
import datetime
class Durations(object):
@classmethod
def set(cls, X, extract_cols, dataset):
print("... ... Durations")
all_df = dataset["all_df"]
# duration from first action to clickout
dffac_df = all_df[["session_id", "timestamp", "timestamp_dt"]].groupby(
"session_id").first().reset_index()
dffac_df = dffac_df[["session_id", "timestamp_dt"]]
dffac_df.columns = ["session_id", "first_timestamp_dt"]
X = pd.merge(X, dffac_df, on="session_id", how="left")
X["session_duration"] = X.apply(lambda x: (x.timestamp_dt - x.first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["session_duration"]
del dffac_df
# duration from last distination to clickout
dflsc_df = all_df[["session_id", "_session_id", "timestamp", "timestamp_dt"]].groupby(
"_session_id").first().reset_index()
dflsc_df = dflsc_df[dflsc_df._session_id.isin(X._session_id)]
dflsc_df = dflsc_df[["session_id", "timestamp_dt"]]
dflsc_df.columns = ["session_id", "step_first_timestamp_dt"]
X = pd.merge(X, dflsc_df, on="session_id", how="left")
X["step_duration"] = X.apply(lambda x: (x.timestamp_dt - x.step_first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["step_duration"]
del dflsc_df
return (X, extract_cols)
class JustClickout(object):
@classmethod
def set(cls, X, extract_cols):
print("... ... JustClickout")
# append current fillters
def get_cf_features(x):
sbp = 1 if "Sort by Price" in x.current_filters else 0
sbd = 1 if "Sort By Distance" in x.current_filters else 0
sbr = 1 if "Sort By Rating" in x.current_filters else 0
fod = 1 if "Focus on Distance" in x.current_filters else 0
fsr = 1 if "Focus on Rating" in x.current_filters else 0
bev = 1 if "Best Value" in x.current_filters else 0
return pd.Series({'cf_sbp': sbp
, 'cf_sbd': sbd
, 'cf_sbr': sbr
, 'cf_fod': fod
, 'cf_fsr': fsr
, 'cf_bev': bev})
X["current_filters"] = X["current_filters"].fillna("")
curf_df = X[["current_filters"]].apply(lambda x: get_cf_features(x), axis=1)
X = pd.concat([X, curf_df], axis=1)
extract_cols = extract_cols + list(curf_df.columns)
del curf_df
return (X, extract_cols)
class JustBeforeClickout(object):
@classmethod
def set(cls, X, dataset):
print("... ... JustBeforeClickout")
all_df = dataset["all_df"]
# last action_type
lasttype_df = all_df[["session_id", "action_type", "is_y"]].copy()
lasttype_df["lat"] = lasttype_df["action_type"].shift(1)
lasttype_df["last_session_id"] = lasttype_df["session_id"].shift(1)
lasttype_df = lasttype_df[lasttype_df.is_y == 1]
lasttype_df = lasttype_df[lasttype_df.session_id == lasttype_df.last_session_id]
lasttype_df = lasttype_df[["session_id", "lat"]]
onehot_lat = pd.get_dummies(lasttype_df, columns=['lat'])
X = pd.merge(X, onehot_lat, on="session_id", how="left")
lat_cols = list(onehot_lat.columns)
lat_cols.remove("session_id")
for lat_col in lat_cols:
X[lat_col] = X[lat_col].fillna(0)
del lasttype_df
del onehot_lat
return X
class Record2Impression(object):
@classmethod
def expand(cls, X, extract_cols, dataset):
print("... ... Record2Impression")
# create expanded
X = X.reset_index()
X["gid"] = X.index
X["n_imps"] = X[["impressions"]].apply(lambda x: len(str(x.impressions).split("|")), axis=1)
X["price_mean"] = X[["prices"]].apply(lambda x: np.mean(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["price_std"] = X[["prices"]].apply(lambda x: np.std(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["impression"] = X[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
X["price"] = X[["prices"]].apply(lambda x: str(x.prices).split("|"), axis=1)
X_impression = X[["gid", "impression"]].set_index('gid').impression.apply(pd.Series).stack().reset_index(
level=0).rename(columns={0: 'impression'})
X_price = X[["gid", "price"]].set_index('gid').price.apply(pd.Series).stack().reset_index(level=0).rename(
columns={0: 'price'})
X_position = X[["gid", "impression"]].set_index('gid').impression.apply(
lambda x: pd.Series(range(len(x)))).stack().reset_index(level=0).rename(columns={0: 'position'})
X_expanded = pd.concat([X_impression, X_price], axis=1)
X_expanded = pd.concat([X_expanded, X_position], axis=1)
X_expanded.columns = ["gid", "impression", "gid2", "price", "gid3", "position"]
X_expanded = X_expanded[["gid", "impression", "price", "position"]]
# join expaned
X = pd.merge(X_expanded, X[["gid", "n_imps", "price_mean", "price_std"] + extract_cols], on="gid", how="left")
# to normalize position and price
X["pos_rate"] = X["position"] / X["n_imps"]
X["pos"] = X["position"] + 1
X["price_norm"] = (X["price"].astype(float) - X["price_mean"].astype(float)) / X["price_std"].astype(float)
# join price_norm rank
pnorm_rank_df = X[["session_id", "price_norm"]].copy()
pnorm_rank_df = pnorm_rank_df[["session_id", "price_norm"]].groupby("session_id").rank(ascending=False)
pnorm_rank_df.columns = ["price_norm_rank"]
X = pd.concat([X, pnorm_rank_df], axis=1)
del pnorm_rank_df
# calc discount rate
X["price"] = X["price"].astype(float)
prices_df = X[["impression", "price"]].groupby("impression").agg({'price': np.mean}).reset_index()
prices_df.columns = ["impression", "item_price_mean"]
X = pd.merge(X, prices_df, on="impression", how="left")
X["discount_rate"] = X["price"] / X["item_price_mean"]
del prices_df
# append some important props and other props with over 0.2 coverage
sum_item_props_df = dataset["sum_item_props_df"]
item_props = dataset["item_props"]
prop_cols = ["pGood Rating"
, "pVery Good Rating"
, "pExcellent Rating"
, "pSatisfactory Rating"
, "p1 Star"
, "p2 Star"
, "p3 Star"
, "p4 Star"
, "p5 Star"
, "pBusiness Centre"
, "pBusiness Hotel"
, "pConference Rooms"]
c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
prop_cols = prop_cols + c02over_prop_cols
prop_cols = list(set(prop_cols))
X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
X[prop_cols] = X[prop_cols].fillna(0)
return (X, extract_cols)
class DecisionMakingProcess(object):
@classmethod
def detect(cls, X, dataset):
print("... ... Decision Making Process")
print("... ... ... Attention and Perceptual Encoding")
print("... ... ... Information Acquisition and Evaluation")
all_df = dataset["all_df"]
# join pos stats"
copos_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "impressions", "is_y"]].copy()
copos_df = copos_df[copos_df.is_y == 0]
copos_df["impression"] = copos_df[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
copos_df["co_pos"] = copos_df[["impression", "reference"]].apply(
lambda x: x.impression.index(x.reference) + 1 if x.reference in x.impression else 1, axis=1)
copos_df_stats = copos_df[["session_id", "co_pos"]].groupby("session_id").agg(
{'co_pos': [np.min, np.max, np.mean]}).reset_index()
copos_df_stats.columns = ["session_id", "co_pos_min", "co_pos_max", "co_pos_mean"]
X = pd.merge(X, copos_df_stats, on="session_id", how="left")
X["co_pos_min"] = X["co_pos_min"].fillna(1)
X["co_pos_mean"] = X["co_pos_mean"].fillna(1)
X["co_pos_max"] = X["co_pos_max"].fillna(1)
X["co_pos_min_diff"] = X["pos"] - X["co_pos_min"]
X["co_pos_mean_diff"] = X["pos"] - X["co_pos_mean"]
X["clickouted_pos_max_diff"] = X["co_pos_max"] - X["pos"]
del copos_df
del copos_df_stats
# is_last and is_last_elapsed_time
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
lastref_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
lastref_df["is_target"] = 0
lastref_df.loc[lastref_df.is_y == 1, "is_target"] = 1
lastref_df = lastref_df[lastref_df.action_type.isin(action_types)]
lastref_df["last_session_id"] = lastref_df["session_id"].shift(1)
lastref_df["last_reference"] = lastref_df["reference"].shift(1)
lastref_df["last_timestamp"] = lastref_df["timestamp"].shift(1)
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_session_id]
lastref_df = lastref_df[lastref_df.is_target == 1][["session_id", "last_reference", "last_timestamp"]]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_reference"]] = X[["last_reference"]].fillna("-1")
X[["last_timestamp"]] = X[["last_timestamp"]].fillna(-1)
X["is_last"] = X[["impression", "last_reference"]].apply(lambda x: 1 if x.impression == x.last_reference else 0,
axis=1)
X["elapsed_time_between_is_last"] = X[["impression", "last_reference", "timestamp", "last_timestamp"]].apply(
lambda x: int(x.timestamp) - int(x.last_timestamp) if x.impression == x.last_reference else np.nan, axis=1)
lastdur_df = X[["session_id", "elapsed_time_between_is_last"]].copy()
lastdur_df = lastdur_df.dropna(axis=0, how='any')
X.drop("elapsed_time_between_is_last", axis=1, inplace=True)
X = pd.merge(X, lastdur_df, on="session_id", how="left")
del lastref_df
del lastdur_df
# join is_last_last
lastref_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
lastref_df["last_last_session_id"] = lastref_df["session_id"].shift(2)
lastref_df["last_last_reference"] = lastref_df["reference"].shift(2)
lastref_df = lastref_df[lastref_df.is_y == 1]
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_last_session_id]
lastref_df = lastref_df[["session_id", "last_last_reference"]]
lastref_df = lastref_df[~lastref_df.duplicated()]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_last_reference"]] = X[["last_last_reference"]].fillna("-1")
X["is_last_last"] = X[["impression", "last_last_reference"]].apply(
lambda x: 1 if x.impression == x.last_last_reference else 0, axis=1)
del lastref_df
# elapsed next mean by item "it's kind of a future information."
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
isnext_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
isnext_df["next_session_id"] = isnext_df["session_id"].shift(-1)
isnext_df["next_timestamp"] = isnext_df["timestamp"].shift(-1)
isnext_df = isnext_df[isnext_df.session_id == isnext_df.next_session_id]
isnext_df["elapsed_next"] = isnext_df["next_timestamp"] - isnext_df["timestamp"]
isnext_df = isnext_df[isnext_df.action_type.isin(action_types)]
isnext_df = isnext_df[isnext_df.is_y == 0]
isnext_gp_df = isnext_df[["reference", "elapsed_next"]].groupby("reference").agg(
{"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_gp_df
isnext_gp_df = isnext_df[isnext_df.action_type == "clickout item"][["reference", "elapsed_next"]].groupby(
"reference").agg({"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time_byco"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_df
del isnext_gp_df
# clickouted item during session
couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
couted_df = couted_df[couted_df.action_type == "clickout item"]
couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
couted_df = couted_df[["session_id", "reference"]]
couted_df.columns = ["session_id", "impression"]
couted_df = couted_df[~couted_df.duplicated()]
couted_df["clickouted"] = 1
X = pd.merge(X, couted_df, on=["session_id", "impression"], how="left")
X["clickouted"] = X["clickouted"].fillna(0)
X["clickouted"] = X["clickouted"].astype(int)
# diff between clickouted price mean
co_price_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "prices", "impressions", "is_y"]].copy()
co_price_df = co_price_df[co_price_df.is_y == 0] # to prevent leakage
def get_price(reference, impressions, prices):
imps = str(impressions).split("|")
prs = str(prices).split("|")
if reference in imps:
return prs[imps.index(reference)]
else:
return 0
co_price_df["price"] = co_price_df.apply(lambda x: get_price(x.reference, x.impressions, x.prices), axis=1)
co_price_df["price"] = co_price_df["price"].astype(float)
co_price_df = co_price_df.groupby("session_id").agg({'price': np.mean}).reset_index()
co_price_df.columns = ["session_id", "couted_price_mean"]
X = pd.merge(X, co_price_df, on="session_id", how="left")
X["couted_price_mean"] = X["couted_price_mean"].fillna(-1)
X["clickouted_price_diff"] = X["price"].astype(float) / X["couted_price_mean"]
X.loc[X.clickouted_price_diff < 0, "clickouted_price_diff"] = 0
del co_price_df
# set two above displayed item and five below displayed item
u_cols = []
def set_undert_the_clickouted_and_islast(X, target_col, nu=5):
u_col = target_col + "_u"
X[u_col] = X["session_id"]
X.loc[X[target_col] != 1, u_col] = ""
for u in [_ for _ in range(-2, nu + 1, 1) if _ != 0]:
new_col = u_col + str(u).replace("-", "p")
X[new_col] = X[u_col].shift(u)
X[new_col] = X[new_col].fillna("")
X.loc[X[new_col] == X["session_id"], new_col] = "1"
X.loc[X[new_col] != "1", new_col] = 0
X.loc[X[new_col] == "1", new_col] = 1
u_cols.append(new_col)
X.drop(u_col, axis=1, inplace=True)
set_undert_the_clickouted_and_islast(X, "clickouted", 5)
set_undert_the_clickouted_and_islast(X, "is_last", 5)
# sum of number of above displayed item
u_coted_cols = [col for col in u_cols if "clickouted" in col]
u_islast_col = [col for col in u_cols if "is_last" in col]
X["clickouted_sum"] = X[u_coted_cols].sum(axis=1)
X["is_last_sum"] = X[u_islast_col].sum(axis=1)
# step_elapsed_mean which represents velocity of user activities.
selapsed_df = all_df[["session_id", "step", "timestamp", "timestamp_dt", "action_type", "reference"]].copy()
selapsed_df["pre_timestamp"] = selapsed_df["timestamp"].shift(1)
selapsed_df["pre_timestamp_dt"] = selapsed_df["timestamp_dt"].shift(1)
selapsed_df["pre_session_id"] = selapsed_df["session_id"].shift(1)
selapsed_df = selapsed_df[selapsed_df.session_id == selapsed_df.pre_session_id]
selapsed_df["elapsed"] = selapsed_df["timestamp"] - selapsed_df["pre_timestamp"]
selapsed_df = selapsed_df[["session_id", "elapsed"]]
selapsed_df = selapsed_df[selapsed_df.elapsed.notna()]
selapsed_df = selapsed_df[selapsed_df.elapsed > 0]
selapsed_df = selapsed_df.groupby("session_id").agg({"elapsed": np.mean}).reset_index()
selapsed_df.columns = ["session_id", "step_elapsed_mean"]
X = pd.merge(X, selapsed_df, on="session_id", how="left")
del selapsed_df
# last duration all "is it same as is_last_elapsed_time?"
lduration_all_df = all_df[["session_id", "action_type", "timestamp", "is_y"]].copy()
lduration_all_df["pre_timestamp"] = lduration_all_df["timestamp"].shift(1)
lduration_all_df["pre_session_id"] = lduration_all_df["session_id"].shift(1)
lduration_all_df = lduration_all_df[lduration_all_df.session_id == lduration_all_df.pre_session_id]
lduration_all_df["elapsed_time"] = lduration_all_df["timestamp"] - lduration_all_df["pre_timestamp"]
lduration_all_df = lduration_all_df[lduration_all_df.is_y == 1]
lduration_all_df = lduration_all_df[["session_id", "elapsed_time"]]
X = pd.merge(X, lduration_all_df, on="session_id", how="left")
del lduration_all_df
# first action_type
firsta_df = all_df[["session_id", "_session_id", "action_type", "is_y"]].copy()
firsta_df = firsta_df[firsta_df.is_y == 0] # to prevent leakage
firsta_df = firsta_df.groupby("_session_id").first().reset_index()
firsta_df = firsta_df.groupby("session_id").last().reset_index()
firsta_df.loc[firsta_df["action_type"] == "search for destination", "action_type"] = "fa_sfd"
firsta_df.loc[firsta_df["action_type"] == "interaction item image", "action_type"] = "fa_iii"
firsta_df.loc[firsta_df["action_type"] == "clickout item", "action_type"] = "fa_coi"
firsta_df.loc[firsta_df["action_type"] == "search for item", "action_type"] = "fa_sfi"
firsta_df.loc[firsta_df["action_type"] == "search for poi", "action_type"] = "fa_sfp"
firsta_df.loc[firsta_df["action_type"] == "change of sort order", "action_type"] = "fa_coso"
firsta_df.loc[firsta_df["action_type"] == "filter selection", "action_type"] = "fa_fis"
firsta_df.loc[firsta_df["action_type"] == "interaction item info", "action_type"] = "fa_iiinfo"
firsta_df.loc[firsta_df["action_type"] == "interaction item rating", "action_type"] = "fa_iirat"
firsta_df.loc[firsta_df["action_type"] == "interaction item deals", "action_type"] = "fa_iidea"
firsta_df = firsta_df[["session_id", "action_type"]]
firsta_df.columns = ["session_id", "at"]
onehot_firsta = pd.get_dummies(firsta_df, columns=['at'])
firsta_cols = list(onehot_firsta.columns)
firsta_cols.remove("session_id")
X = pd.merge(X, onehot_firsta, on="session_id", how="left")
for firsta_col in firsta_cols:
X[firsta_col] = X[firsta_col].fillna(0)
del firsta_df
del onehot_firsta
# price norm by item rating prop
X["r6"] = 0
X["r7"] = 0
X["r8"] = 0
X["r9"] = 0
X.loc[X["pSatisfactory Rating"] == 1, "r6"] = 6
X.loc[X["pGood Rating"] == 1, "r7"] = 7
X.loc[X["pVery Good Rating"] == 1, "r8"] = 8
X.loc[X["pExcellent Rating"] == 1, "r9"] = 9
X["rating"] = X[["r6", "r7", "r8", "r9"]].apply(
lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
X["rating"] = X["rating"].fillna(-1)
pns_df = X[["session_id", "rating", "price"]].groupby(["session_id", "rating"]).agg(
{'price': [np.mean, np.std]}).reset_index()
pns_df.columns = ["session_id", "rating", "r_price_mean", "r_price_std"]
pns_df["r_price_std"] = pns_df["r_price_std"].fillna(1)
X = pd.merge(X, pns_df, on=["session_id", "rating"], how="left")
X["r_price_norm"] = (X["price"].astype(float) - X["r_price_mean"].astype(float)) / X["r_price_std"].astype(
float)
del pns_df
# price norm by star
X["star"] = -1
X.loc[X["p1 Star"] == 1, "star"] = 1
X.loc[X["p2 Star"] == 1, "star"] = 2
X.loc[X["p3 Star"] == 1, "star"] = 3
X.loc[X["p4 Star"] == 1, "star"] = 4
X.loc[X["p5 Star"] == 1, "star"] = 5
pns_df = X[["session_id", "star", "price"]].groupby(["session_id", "star"]).agg(
{'price': [np.mean, np.std]}).reset_index()
pns_df.columns = ["session_id", "star", "s_price_mean", "s_price_std"]
pns_df["s_price_std"] = pns_df["s_price_std"].fillna(1)
X = pd.merge(X, pns_df, on=["session_id", "star"], how="left")
X["s_price_norm"] = (X["price"].astype(float) - X["s_price_mean"].astype(float)) / X["s_price_std"].astype(
float)
del pns_df
return X
class ByItem(object):
@classmethod
def set(cls, X, dataset):
print("... ... ByItem")
all_df = dataset["all_df"]
# imps score
impscore_df = dataset["impscore_df"]
item_props = dataset["item_props"]
X = pd.merge(X, impscore_df, on="impression", how="left")
X["impsocre"] = X["impsocre"].fillna(0)
# # append some important props and other props with over 0.2 coverage
# sum_item_props_df = dataset["sum_item_props_df"]
# prop_cols = ["pGood Rating"
# , "pVery Good Rating"
# , "pExcellent Rating"
# , "pSatisfactory Rating"
# , "p1 Star"
# , "p2 Star"
# , "p3 Star"
# , "p4 Star"
# , "p5 Star"
# , "pBusiness Centre"
# , "pBusiness Hotel"
# , "pConference Rooms"]
# c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
# prop_cols = prop_cols + c02over_prop_cols
# prop_cols = list(set(prop_cols))
# X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
# X[prop_cols] = X[prop_cols].fillna(0)
# append item svd n_components=10
item_props_svd = dataset["item_props_svd"]
prop_svd_cols = list(item_props_svd.columns)
prop_svd_cols.remove("item_id")
X = pd.merge(X, item_props_svd, left_on="impression", right_on="item_id", how="left")
X[prop_svd_cols] = X[prop_svd_cols].fillna(0)
# # price norm by item rating prop
# X["r6"] = 0
# X["r7"] = 0
# X["r8"] = 0
# X["r9"] = 0
# X.loc[X["pSatisfactory Rating"] == 1, "r6"] = 6
# X.loc[X["pGood Rating"] == 1, "r7"] = 7
# X.loc[X["pVery Good Rating"] == 1, "r8"] = 8
# X.loc[X["pExcellent Rating"] == 1, "r9"] = 9
# X["rating"] = X[["r6", "r7", "r8", "r9"]].apply(
# lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
# X["rating"] = X["rating"].fillna(-1)
# pns_df = X[["session_id", "rating", "price"]].groupby(["session_id", "rating"]).agg(
# {'price': [np.mean, np.std]}).reset_index()
# pns_df.columns = ["session_id", "rating", "r_price_mean", "r_price_std"]
# pns_df["r_price_std"] = pns_df["r_price_std"].fillna(1)
# X = pd.merge(X, pns_df, on=["session_id", "rating"], how="left")
# X["r_price_norm"] = (X["price"].astype(float) - X["r_price_mean"].astype(float)) / X["r_price_std"].astype(
# float)
# del pns_df
#
# # price norm by star
# X["star"] = -1
# X.loc[X["p1 Star"] == 1, "star"] = 1
# X.loc[X["p2 Star"] == 1, "star"] = 2
# X.loc[X["p3 Star"] == 1, "star"] = 3
# X.loc[X["p4 Star"] == 1, "star"] = 4
# X.loc[X["p5 Star"] == 1, "star"] = 5
# pns_df = X[["session_id", "star", "price"]].groupby(["session_id", "star"]).agg(
# {'price': [np.mean, np.std]}).reset_index()
# pns_df.columns = ["session_id", "star", "s_price_mean", "s_price_std"]
# pns_df["s_price_std"] = pns_df["s_price_std"].fillna(1)
# X = pd.merge(X, pns_df, on=["session_id", "star"], how="left")
# X["s_price_norm"] = (X["price"].astype(float) - X["s_price_mean"].astype(float)) / X["s_price_std"].astype(
# float)
# del pns_df
# item ctr
ctrbyitem_df = all_df[all_df.action_type == "clickout item"][["session_id", "reference", "is_y"]].copy()
ctrbyitem_df = ctrbyitem_df[ctrbyitem_df.is_y == 0]
ref_df = ctrbyitem_df[["reference"]].groupby(["reference"]).size().reset_index()
ref_df.columns = ["impression", "rcnt"]
ref_df["ctrbyitem"] = ref_df["rcnt"].astype(float) / ref_df.shape[0]
ref_df = ref_df[["impression", "ctrbyitem"]]
X = pd.merge(X, ref_df, on="impression", how="left")
X["ctrbyitem"] = X["ctrbyitem"].fillna(0)
del ctrbyitem_df
del ref_df
# item ctr by city
cr_tmp_df = all_df[all_df.action_type == "clickout item"].copy()
cr_tmp_df = cr_tmp_df[cr_tmp_df.is_y == 0] # to prevent leakage
city_df = cr_tmp_df[["city"]].groupby(["city"]).size().reset_index()
city_df.columns = ["city", "ccnt"]
cityref_df = cr_tmp_df[["city", "reference"]].groupby(["city", "reference"]).size().reset_index()
cityref_df.columns = ["city", "impression", "rcnt"]
cityref_df = pd.merge(cityref_df, city_df, on="city", how="left")
cityref_df["ctrbycity"] = cityref_df["rcnt"].astype(float) / cityref_df["ccnt"].astype(float)
cityref_df = cityref_df[["city", "impression", "ctrbycity"]]
X = pd.merge(X, cityref_df, on=["city", "impression"], how="left")
X["ctrbycity"] = X["ctrbycity"].fillna(0)
del cr_tmp_df
del city_df
del cityref_df
# item ctr by city rank
ctrbycity_rank_df = X[["session_id", "ctrbycity"]].copy()
ctrbycity_rank_df = ctrbycity_rank_df[["session_id", "ctrbycity"]].groupby("session_id").rank(ascending=False)
ctrbycity_rank_df.columns = ["ctrbycity_rank"]
X = pd.concat([X, ctrbycity_rank_df], axis=1)
del ctrbycity_rank_df
# bayes likelihood by item
bayes_likelihood = dataset["bayes_likelihood"]
X["rlr"] = X["impression"].astype(str) + X["last_reference"].astype(str)
def set_bayes_li(rlr):
if rlr in bayes_likelihood:
return bayes_likelihood[rlr]
return 0.0
X["bayes_li"] = X[["rlr"]].apply(lambda x: set_bayes_li(x.rlr), axis=1)
# clickouted item 2 item during session
v2v_counter = dataset["v2v_counter"]
def extract_sv2v_counter(iids):
v = {}
for iid in iids:
if iid in v2v_counter:
for s in v2v_counter[iid]:
if not s in v:
v[s] = v2v_counter[iid][s]
return v
couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
couted_df = couted_df[couted_df.action_type == "clickout item"]
couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
couted_df = couted_df[["session_id", "reference"]]
couted_df.columns = ["session_id", "impression"]
couted_df = couted_df[~couted_df.duplicated()]
couted_df["clickouted"] = 1
sv2v_df = couted_df.groupby("session_id").apply(
lambda x: extract_sv2v_counter(list(x.impression))).reset_index()
sv2v_df.columns = ["session_id", "sv2v"]
X = pd.merge(X, sv2v_df, on="session_id", how="left")
X["sv2v"] = X["sv2v"].fillna("{}")
X["sv2v_score"] = X[["impression", "sv2v"]].apply(
lambda x: x.sv2v[x.impression] if x.impression in x.sv2v else np.nan, axis=1)
X.drop("sv2v", axis=1, inplace=True)
sv2vs_stats = X.groupby("session_id").agg({"sv2v_score": [np.mean, np.std]}).reset_index()
sv2vs_stats.columns = ["session_id", "sv2v_score_mean", "sv2v_score_std"]
X = pd.merge(X, sv2vs_stats, on="session_id", how="left")
X["sv2v_score_norm"] = X["sv2v_score"] - X["sv2v_score_mean"] / X["sv2v_score_std"]
del couted_df
del sv2v_df
del sv2vs_stats
# some action_types are already done by each item during each session
couted_df = all_df[["action_type", "session_id", "reference"]].copy()
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"]
ated_cols = ["iired"
, "iifed"
, "iiied"
, "iided"
, "sfied"]
for i, action_type in enumerate(action_types):
at_df = couted_df[couted_df.action_type == action_type].copy()
at_df = at_df[["session_id", "reference"]]
at_df.columns = ["session_id", "impression"]
at_df = at_df[~at_df.duplicated()]
at_df[ated_cols[i]] = 1
X = pd.merge(X, at_df, on=["session_id", "impression"], how="left")
X[ated_cols[i]] = X[ated_cols[i]].fillna(0)
X[ated_cols[i]] = X[ated_cols[i]].astype(int)
del at_df
del couted_df
# dropout rate by each item during each session
dropout_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
dropout_df = dropout_df[dropout_df.action_type.isin(["interaction item image", "clickout item"])]
dropout_df = dropout_df[dropout_df.is_y == 0] # to prevent leakage
dropout_df.loc[dropout_df["action_type"] == "interaction item image", "iii"] = 1
dropout_df["iii"] = dropout_df["iii"].fillna(0)
dropout_df.loc[dropout_df["action_type"] == "clickout item", "cko"] = 1
dropout_df["cko"] = dropout_df["cko"].fillna(0)
def is_dropout(iii, cko):
if iii != 0 and cko != 0:
return 0
elif iii != 0 and cko == 0:
return 1
else:
return -1
dropout_df = dropout_df[["session_id", "reference", "iii", "cko"]].groupby(["session_id", "reference"]).apply(
lambda x: is_dropout(np.sum(x.iii), np.sum(x.cko))).reset_index()
dropout_df.columns = ["session_id", "reference", "dropout"]
dropout_df = dropout_df[dropout_df != -1]
dropout_df = dropout_df[["reference", "dropout"]].groupby("reference").apply(
lambda x: np.sum(x.dropout).astype(float) / len(x.dropout)).reset_index()
dropout_df.columns = ["impression", "dropout_rate"]
X = pd.merge(X, dropout_df, on="impression", how="left")
del dropout_df
# dropout rate by each item during all sessions
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"]
dropout_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
dropout_df = dropout_df[dropout_df.action_type.isin(action_types + ["clickout item"])]
dropout_df = dropout_df[dropout_df.is_y == 0] # to prevent leakage
dropout_df.loc[dropout_df.action_type.isin(action_types), "iii"] = 1
dropout_df["iii"] = dropout_df["iii"].fillna(0)
dropout_df.loc[dropout_df["action_type"] == "clickout item", "cko"] = 1
dropout_df["cko"] = dropout_df["cko"].fillna(0)
dropout_df = dropout_df[["session_id", "reference", "iii", "cko"]].groupby(["session_id", "reference"]).apply(
lambda x: is_dropout(np.sum(x.iii), np.sum(x.cko))).reset_index()
dropout_df.columns = ["session_id", "reference", "dropout"]
dropout_df = dropout_df[dropout_df != -1]
dropout_df = dropout_df[["reference", "dropout"]].groupby("reference").apply(
lambda x: np.sum(x.dropout).astype(float) / len(x.dropout)).reset_index()
dropout_df.columns = ["impression", "all_dropout_rate"]
X = pd.merge(X, dropout_df, on="impression", how="left")
del dropout_df
# action_type rate by each item
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
atstats_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
atstats_df = atstats_df[atstats_df.action_type.isin(action_types)]
atstats_df = atstats_df[atstats_df.is_y == 0] # to prevent leakage
atstats_df = atstats_df[["reference", "action_type"]].groupby(["reference", "action_type"]).size().reset_index()
atstats_df.columns = ["reference", "action_type", "at_cnt"]
atstats_refcnt_df = atstats_df[["reference", "at_cnt"]].groupby("reference").sum().reset_index()
atstats_refcnt_df.columns = ["reference", "rf_cnt"]
atstats_df = pd.merge(atstats_df, atstats_refcnt_df, on="reference", how="left")
atstats_df["at_rate"] = atstats_df["at_cnt"].astype(float) / atstats_df["rf_cnt"]
atstats_df = atstats_df.pivot(index='reference', columns='action_type', values='at_rate').reset_index()
at_rate_cols = ["co_at_rate", "iid_at_rate", "iii_at_rate", "iif_at_rate", "iir_at_rate", "sfi_at_rate"]
atstats_df.columns = ["impression"] + at_rate_cols
atstats_df = atstats_df.fillna(0)
X = pd.merge(X, atstats_df, on="impression", how="left")
for at_rate_col in at_rate_cols:
X[at_rate_col] = X[at_rate_col].fillna(0)
del atstats_df
# action_type rate in-session rank by each item
at_rate_cols = ["co_at_rate"
, "iid_at_rate"
, "iii_at_rate"
, "iif_at_rate"
, "iir_at_rate"
, "sfi_at_rate"]
at_rank_cols = []
for at_rate_col in at_rate_cols:
at_rank_col = at_rate_col + "_rank"
at_rank_cols.append(at_rank_col)
at_rank_df = X[["session_id", at_rate_col]].copy()
at_rank_df = at_rank_df[["session_id", at_rate_col]].groupby("session_id").rank(ascending=False)
at_rank_df.columns = [at_rank_col]
X = pd.concat([X, at_rank_df], axis=1)
del at_rank_df
# reference_elapsed_mean and by action_type
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
relapsed_df = all_df[
["session_id", "step", "timestamp", "timestamp_dt", "action_type", "reference", "is_y"]].copy()
relapsed_df["pre_timestamp"] = relapsed_df["timestamp"].shift(1)
relapsed_df["pre_timestamp_dt"] = relapsed_df["timestamp_dt"].shift(1)
relapsed_df["pre_session_id"] = relapsed_df["session_id"].shift(1)
relapsed_df = relapsed_df[relapsed_df.session_id == relapsed_df.pre_session_id]
relapsed_df["elapsed"] = relapsed_df["timestamp"] - relapsed_df["pre_timestamp"]
relapsed_df = relapsed_df[relapsed_df.action_type.isin(action_types)]
relapsed_df = relapsed_df[relapsed_df.is_y == 0] # to prevent leakage
relapsed_df = relapsed_df[relapsed_df.elapsed.notna()]
relapsed_df = relapsed_df[relapsed_df.elapsed > 0]
r_relapsed_df = relapsed_df[["reference", "elapsed"]].groupby("reference").agg(
{"elapsed": np.mean}).reset_index()
r_relapsed_rate_cols = ["ref_elapsed_mean"]
r_relapsed_df.columns = ["impression"] + r_relapsed_rate_cols
a_relapsed_df = relapsed_df[["reference", "action_type", "elapsed"]].groupby(["reference", "action_type"]).agg(
{"elapsed": np.mean}).reset_index()
a_relapsed_df.columns = ["reference", "action_type", "at_elapsed_mean"]
a_relapsed_df = a_relapsed_df.pivot(index='reference', columns='action_type',
values='at_elapsed_mean').reset_index()
a_relapsed_rate_cols = ["co_ref_elapsed_mean", "iid_ref_elapsed_mean", "iii_ref_elapsed_mean",
"iif_ref_elapsed_mean", "iir_ref_elapsed_mean", "sfi_ref_elapsed_mean"]
a_relapsed_df.columns = ["impression"] + a_relapsed_rate_cols
X = pd.merge(X, r_relapsed_df, on="impression", how="left")
X = pd.merge(X, a_relapsed_df, on="impression", how="left")
del relapsed_df
del r_relapsed_df
del a_relapsed_df
# tsh "time split by hour" item ctr
tsh_df = all_df[all_df.action_type == "clickout item"][
["session_id", "action_type", "reference", "timestamp_dt", "is_y"]].copy()
tsh_df["tsh24"] = -1
X["tsh24"] = -1
ts_min = tsh_df["timestamp_dt"].min()
ts_max = tsh_df["timestamp_dt"].max()
def set_tscol(hours):
tscol = "tsh" + str(hours)
ts_start = ts_min
ts_end = ts_start + datetime.timedelta(hours=hours)
ts_bin = 1
while True:
tsh_df.loc[(tsh_df.timestamp_dt >= ts_start) & (tsh_df.timestamp_dt < ts_end), tscol] = ts_bin
X.loc[(X.timestamp_dt >= ts_start) & (X.timestamp_dt < ts_end), tscol] = ts_bin
ts_start = ts_end
ts_end = ts_start + datetime.timedelta(hours=hours)
if ts_start > ts_max:
break
ts_bin += 1
set_tscol(24)
tsh_df = tsh_df[tsh_df.is_y == 0]
tsh24_df = tsh_df[["tsh24"]].groupby(["tsh24"]).size().reset_index()
tsh24_df.columns = ["tsh24", "allcnt"]
tsh24ref_df = tsh_df[["tsh24", "reference"]].groupby(["tsh24", "reference"]).size().reset_index()
tsh24ref_df.columns = ["tsh24", "impression", "rcnt"]
tsh24ref_df = pd.merge(tsh24ref_df, tsh24_df, on="tsh24", how="left")
tsh24ref_df["ctrbytsh24"] = tsh24ref_df["rcnt"].astype(float) / tsh24ref_df["allcnt"].astype(float)
tsh24ref_df = tsh24ref_df[["tsh24", "impression", "ctrbytsh24"]]
X = pd.merge(X, tsh24ref_df, on=["tsh24", "impression"], how="left")
X["ctrbytsh24"] = X["ctrbytsh24"].fillna(0)
del tsh_df
del tsh24_df
del tsh24ref_df
# item ctr by some props
ctrbyprops_df = all_df[all_df.action_type == "clickout item"][["session_id", "reference", "is_y"]].copy()
ctrbyprops_df.columns = ["session_id", "item_id", "is_y"]
star_cols = ["p1 Star", "p2 Star", "p3 Star", "p4 Star", "p5 Star"]
rating_cols = ["pSatisfactory Rating", "pGood Rating", "pVery Good Rating", "pExcellent Rating"]
ctrbyprops_df = pd.merge(ctrbyprops_df, item_props[["item_id"] + star_cols + rating_cols], on="item_id",
how="left")
ctrbyprops_df["star"] = -1
ctrbyprops_df.loc[ctrbyprops_df["p1 Star"] == 1, "star"] = 1
ctrbyprops_df.loc[ctrbyprops_df["p2 Star"] == 1, "star"] = 2
ctrbyprops_df.loc[ctrbyprops_df["p3 Star"] == 1, "star"] = 3
ctrbyprops_df.loc[ctrbyprops_df["p4 Star"] == 1, "star"] = 4
ctrbyprops_df.loc[ctrbyprops_df["p5 Star"] == 1, "star"] = 5
ctrbyprops_df["r6"] = 0
ctrbyprops_df["r7"] = 0
ctrbyprops_df["r8"] = 0
ctrbyprops_df["r9"] = 0
ctrbyprops_df.loc[ctrbyprops_df["pSatisfactory Rating"] == 1, "r6"] = 6
ctrbyprops_df.loc[ctrbyprops_df["pGood Rating"] == 1, "r7"] = 7
ctrbyprops_df.loc[ctrbyprops_df["pVery Good Rating"] == 1, "r8"] = 8
ctrbyprops_df.loc[ctrbyprops_df["pExcellent Rating"] == 1, "r9"] = 9
ctrbyprops_df["rating"] = ctrbyprops_df[["r6", "r7", "r8", "r9"]].apply(
lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
ctrbyprops_df["rating"] = ctrbyprops_df["rating"].fillna(-1)
ctrbyprops_df["star_rating"] = "sr_" + ctrbyprops_df["star"].astype(str) + "_" + ctrbyprops_df["rating"].astype(
str)
ctrbyprops_df = ctrbyprops_df[["session_id", "star_rating", "item_id", "is_y"]]
ctrbyprops_df = ctrbyprops_df[ctrbyprops_df.is_y == 0] # to prevent leakage
ctrbyprops_df = ctrbyprops_df[["item_id", "star_rating"]]
ctrbyprops_df.columns = ["impression", "star_rating"]
prop_df = ctrbyprops_df[["star_rating"]].groupby(["star_rating"]).size().reset_index()
prop_df.columns = ["star_rating", "allcnt"]
propref_df = ctrbyprops_df[["star_rating", "impression"]].groupby(
["star_rating", "impression"]).size().reset_index()
propref_df.columns = ["star_rating", "impression", "rcnt"]
propref_df = pd.merge(propref_df, prop_df, on="star_rating", how="left")
propref_df["ctrbyprops"] = propref_df["rcnt"].astype(float) / propref_df["allcnt"].astype(float)
propref_df = propref_df[["star_rating", "impression", "ctrbyprops"]]
X["star_rating"] = "sr_" + X["star"].astype(str) + "_" + X["rating"].astype(str)
X = pd.merge(X, propref_df, on=["star_rating", "impression"], how="left")
X["ctrbyprops"] = X["ctrbyprops"].fillna(0)
del ctrbyprops_df
del prop_df
del propref_df
# is no serach item
action_types = ["clickout item"]
is_nosi_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
is_nosi_df = is_nosi_df.groupby("session_id").first().reset_index()
is_nosi_df = is_nosi_df[(is_nosi_df.action_type.isin(action_types)) & (is_nosi_df.is_y == 0)]
is_nosi_df = is_nosi_df[["reference"]].groupby("reference").size().reset_index()
is_nosi_df.columns = ["impression", "nosearch_cnt"]
X = pd.merge(X, is_nosi_df, on="impression", how="left")
X["nosearch_cnt"] = X["nosearch_cnt"].fillna(0)
del is_nosi_df
return X
class BySession(object):
@classmethod
def set(cls, X, dataset):
print("... ... BySession as Motivation")
all_df = dataset["all_df"]
# item ratio of appearance by each session
def get_precnt_ratio(x):
pre_references = str(x.pre_references).split("|")
len_pre_ref = len(pre_references)
if len_pre_ref != 0:
return np.float(pre_references.count(x.impression)) / len_pre_ref
return 0
preref_df = all_df[all_df.action_type != "clickout item"].groupby("session_id").apply(
lambda x: "|".join([r for r in list(x.reference) if str.isnumeric(r)])).reset_index()
preref_df.columns = ["session_id", "pre_references"]
X = pd.merge(X, preref_df, on="session_id", how="left")
X[["pre_references"]] = X[["pre_references"]].fillna("")
X["precnt_ratio"] = X[["impression", "pre_references"]].apply(lambda x: get_precnt_ratio(x), axis=1)
del preref_df
# action_type ratio of appearance by each session
atype_long_names = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
atype_short_names = ["interaction_item_rating_ratio"
, "iif_ratio"
, "iii_ratio"
, "iid_ratio"
, "sfi_ratio"
, "co_ratio"]
preref_df2 = all_df[all_df.action_type.isin(atype_long_names)][
["session_id", "reference", "action_type", "is_y"]].copy()
preref_df2 = preref_df2[preref_df2.is_y == 0] # to prevent leakage
preref_df2 = preref_df2[["session_id", "reference", "action_type"]]
preref_df3 = preref_df2[["session_id"]].groupby("session_id").size().reset_index()
preref_df3.columns = ["session_id", "cnt"]
preref_df2 = pd.get_dummies(preref_df2, columns=['action_type'])
preref_df2 = preref_df2.groupby(["session_id", "reference"]).sum().reset_index()
preref_df2.columns = ["session_id", "impression"] + atype_short_names
preref_df2 = pd.merge(preref_df2, preref_df3, on="session_id", how="left")
preref_df2[atype_short_names] = preref_df2[atype_short_names].astype(float)
for atype_short_name in atype_short_names:
preref_df2[atype_short_name] = preref_df2[atype_short_name] / preref_df2["cnt"]
X = pd.merge(X, preref_df2, on=["session_id", "impression"], how="left")
del preref_df2
del preref_df3
# # clickouted item 2 item during session
# v2v_counter = dataset["v2v_counter"]
# def extract_sv2v_counter(iids):
# v = {}
# for iid in iids:
# if iid in v2v_counter:
# for s in v2v_counter[iid]:
# if not s in v:
# v[s] = v2v_counter[iid][s]
# return v
#
# couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
# couted_df = couted_df[couted_df.action_type == "clickout item"]
# couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
# couted_df = couted_df[["session_id", "reference"]]
# couted_df.columns = ["session_id", "impression"]
# couted_df = couted_df[~couted_df.duplicated()]
# couted_df["clickouted"] = 1
# sv2v_df = couted_df.groupby("session_id").apply(
# lambda x: extract_sv2v_counter(list(x.impression))).reset_index()
# sv2v_df.columns = ["session_id", "sv2v"]
# X = pd.merge(X, sv2v_df, on="session_id", how="left")
# X["sv2v"] = X["sv2v"].fillna("{}")
# X["sv2v_score"] = X[["impression", "sv2v"]].apply(
# lambda x: x.sv2v[x.impression] if x.impression in x.sv2v else np.nan, axis=1)
# X.drop("sv2v", axis=1, inplace=True)
# sv2vs_stats = X.groupby("session_id").agg({"sv2v_score": [np.mean, np.std]}).reset_index()
# sv2vs_stats.columns = ["session_id", "sv2v_score_mean", "sv2v_score_std"]
# X = pd.merge(X, sv2vs_stats, on="session_id", how="left")
# X["sv2v_score_norm"] = X["sv2v_score"] - X["sv2v_score_mean"] / X["sv2v_score_std"]
# del couted_df
# del sv2v_df
# del sv2vs_stats
# is zero interactions
zeroit_df = all_df[["session_id"]].groupby("session_id").size().reset_index()
zeroit_df.columns = ["session_id", "it_count"]
zeroit_df["is_zeroit"] = zeroit_df[["it_count"]].apply(lambda x: 1 if x.it_count == 1 else 0, axis=1)
X = | pd.merge(X, zeroit_df, on="session_id", how="left") | pandas.merge |
import pandas as pd
import pytorch_lightning as pl
from argparse import ArgumentParser
from pathlib import Path
from pytorch_lightning import Callback, seed_everything
from pytorch_lightning.callbacks import (
EarlyStopping,
LearningRateMonitor,
ModelCheckpoint,
)
from pytorch_lightning.loggers import CSVLogger
from src.pl_data.datamodule import DataModule
from src.pl_data.rel_dataset import RELDataset
from src.pl_data.wnut_dataset import WNUTDataset
from src.pl_module.ger_model import GERModel
from src.pl_module.rbert_model import RBERT
from typing import Union
DATA_DIR = Path("data")
parser = ArgumentParser()
parser.add_argument("--fast_dev_run", type=bool, default=False)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--seed", nargs="+", type=int, default=[42])
parser.add_argument("--model", type=str)
parser.add_argument("--save_to_hub", type=str)
args, unknown = parser.parse_known_args()
REL_DATA = (
DATA_DIR / "rel_data" / "relations.csv"
if not args.fast_dev_run
else "tests/toy_data/train_rel.csv"
)
REL_DATA_TEST = (
DATA_DIR / "rel_data" / "relations_test.csv"
if not args.fast_dev_run
else "tests/toy_data/train_rel.csv"
)
def build_callbacks() -> list[Callback]:
callbacks: list[Callback] = [
LearningRateMonitor(
logging_interval="step",
log_momentum=False,
),
EarlyStopping(
monitor="val_loss",
mode="min",
verbose=True,
min_delta=0.0,
patience=3,
),
ModelCheckpoint(
filename="checkpoint",
monitor="val_f1",
mode="max",
save_top_k=1,
verbose=True,
),
]
return callbacks
def run(
dataset,
pl_model: pl.LightningModule,
name: str,
path: Union[Path, str],
test_path: Union[Path, str],
seed: int,
args=args,
) -> None:
seed_everything(seed, workers=True)
datamodule: pl.LightningDataModule = DataModule(
dataset=dataset,
path=path,
test_path=test_path,
num_workers=8,
batch_size=args.batch_size,
seed=seed,
)
model: pl.LightningModule = pl_model()
callbacks: list[Callback] = build_callbacks()
csv_logger = CSVLogger(
save_dir="csv_logs",
name="seed_" + str(seed),
version=name,
)
if args.fast_dev_run:
trainer_kwargs = {"gpus": None, "auto_select_gpus": False}
else:
trainer_kwargs = {"gpus": -1, "auto_select_gpus": True, "precision": 16}
trainer: pl.Trainer = pl.Trainer.from_argparse_args(
args,
**trainer_kwargs,
deterministic=True, # ensure reproducible results
default_root_dir="ckpts",
logger=[csv_logger],
log_every_n_steps=10,
callbacks=callbacks,
max_epochs=35,
)
trainer.tune(model=model, datamodule=datamodule)
trainer.fit(model=model, datamodule=datamodule)
if not args.fast_dev_run:
test = trainer.test(model=model, ckpt_path="best", datamodule=datamodule)
| pd.DataFrame(test) | pandas.DataFrame |
from tkinter import *
import pandas as pd
import numpy as np
from pathlib import Path
from tensorflow import keras
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
root = Tk()
root.title('MLB Predictor')
root.geometry("500x400")
def selectedNL():
homeTeam = clickedHome.get()
visitingTeam = clickedVisiting.get()
print(homeTeam)
print(visitingTeam)
predictNL(homeTeam, visitingTeam)
#def selectedAL():
#homeTeam = clickedHomeAL.get()
#visitingTeam = clickedVisitingAL.get()
#print(homeTeam)
#print(visitingTeam)
#predictAL(homeTeam, visitingTeam)
def predictNL(homeId, visitingId):
blueprintColumns = ['Visiting: Pythagorean expectation ratio', 'Home: Pythagorean expectation versus ratio',
'League Diffrence', 'Visiting: Odd ratio', 'Home: Team - Win rate',
'Home: Pitcher - Homeruns per game', 'Visiting: Team - Pythagorean expectation',
'Home: Pitcher - Saves per game', 'Home: Pitcher - Shutouts per game',
'Visiting: Pitcher - Saves per game', 'Home: Pythagorean expectation ratio',
'Home: Win ratio', 'Visiting: Team - Win rate']
targets_columns = ['Home: Win', 'Visiting: Win']
pathModel = '../Learning/Deep Training/Models/13D1956836164396.h5'
path = Path(__file__).parent.absolute()
print(path)
data_folder = path / 'FrontendData'
target_data = data_folder / "None_Targets_Frontend.csv"
predictor_data = data_folder / "None_Predictors_Frontend.csv"
df_targets = pd.read_csv(target_data)
df_predictors = | pd.read_csv(predictor_data) | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from petersburg import Graph
plt.style.use('ggplot')
__author__ = 'willmcginnis'
def simulate(c_switch):
data = []
in_house = 1
third_party = in_house * 0.80
switching = in_house * c_switch
weights = np.linspace(0.1, 1, 100)
for weight in weights:
g = Graph()
g.from_dict({
1: {'payoff': 0, 'after': []},
2: {'payoff': 0, 'after': [{'node_id': 1, 'cost': in_house}]},
3: {'payoff': 0, 'after': [{'node_id': 2, 'cost': in_house}]},
4: {'payoff': 0, 'after': [{'node_id': 1, 'cost': third_party}]},
5: {'payoff': 0, 'after': [{'node_id': 4, 'cost': third_party}]},
6: {'payoff': 0, 'after': [{'node_id': 4, 'cost': switching, 'weight': weight}]},
7: {'payoff': 0, 'after': [{'node_id': 6, 'cost': in_house}, {'node_id': 5, 'cost': 2 * switching, 'weight': weight}]},
8: {'payoff': 0, 'after': [{'node_id': 3, 'cost': in_house}]},
9: {'payoff': 0, 'after': [{'node_id': 5, 'cost': third_party}]},
10: {'payoff': 0, 'after': [{'node_id': 7, 'cost': in_house}, {'node_id': 5, 'cost': 3 * switching, 'weight': weight}]},
11: {'payoff': 0, 'after': [{'node_id': 8, 'cost': 0}]},
12: {'payoff': 0, 'after': [{'node_id': 9, 'cost': 0}]},
13: {'payoff': 0, 'after': [{'node_id': 10, 'cost': 0}]},
})
options = g.get_options(iters=1000)
data.append([weight / (1.0 + weight), options[2] - options[4]])
return data
def plot(c_switch):
data = simulate(c_switch)
df = | pd.DataFrame(data, columns=['weight', 'in_house - third_party']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pprint
import config.settings
import config.strategy
from core.utility import chunk_trades, sharpe, drawdown
from multiprocessing_on_dill import Pool #, Process, Manager
from contextlib import closing
class accountCurve():
"""
Account curve object for Portfolio and Instrument.
Calculates the positions we want to be in, based on the volatility target.
"""
def __init__(self, portfolio, capital=500000, positions=None, panama_prices=None, nofx=False, portfolio_weights = 1, **kw):
self.portfolio = portfolio
self.nofx = nofx
self.weights = portfolio_weights
self.multiproc = kw.get('multiproc', True)
# If working on one instrument, put it in a list
if not isinstance(portfolio, list):
self.portfolio = [self.portfolio]
if isinstance(positions, pd.Series):
positions = positions.rename(self.portfolio[0].name)
self.capital = capital
self.panama = panama_prices
if positions is None:
self.positions = self.instrument_positions()
self.positions = self.positions.multiply(self.weights)
else:
self.positions = pd.DataFrame(positions)
# Reduce all our positions so that they fit inside our target volatility when combined.
self.positions = self.positions.multiply(self.vol_norm(),axis=0)
# If we run out of data (for example, if the data feed is stopped), hold position for 5 trading days and then close.
# chunk_trades() is a function that is designed to reduce the amount of trading (and hence cost)
self.positions = chunk_trades(self.positions).ffill(limit=5).fillna(0)
def __repr__(self):
"""
Returns a formatted list of statistics about the account curve.
"""
return pprint.pformat(self.stats_list())
def inst_calc(self):
"""Calculate all the things we need on all the instruments and cache it."""
try:
return self.memo_inst_calc
except:
if len(self.portfolio)>1 and self.multiproc:
with closing(Pool()) as pool:
self.memo_inst_calc = dict(pool.map(lambda x: (x.name, x.calculate()), self.portfolio))
else:
self.memo_inst_calc = dict(map(lambda x: (x.name, x.calculate()), self.portfolio))
return self.memo_inst_calc
def instrument_positions(self):
"""Position returned by the instrument objects, not the final position in the portfolio"""
try:
return self.memo_instrument_positions
except:
self.memo_instrument_positions = pd.DataFrame({k: v['position'] for k, v in self.inst_calc().items()})
return self.memo_instrument_positions
def rates(self):
"""
Returns a Series or DataFrame of exchange rates.
"""
if self.nofx==True:
return 1
try:
return self.memo_rates
except:
self.memo_rates = pd.DataFrame({k: v['rate'] for k, v in self.inst_calc().items()})
return self.memo_rates
def stats_list(self):
stats_list = ["sharpe",
"gross_sharpe",
"annual_vol",
"sortino",
"cap",
"avg_drawdown",
"worst_drawdown",
"time_in_drawdown",
"calmar",
"avg_return_to_drawdown"]
return {k: getattr(self, k)() for k in stats_list}
def returns(self):
"""
Returns a Series/Frame of net returns after commissions, spreads and estimated slippage.
"""
return self.position_returns() + self.transaction_returns() + self.commissions() + self.spreads()
def position_returns(self):
"""The returns from holding the portfolio we had yesterday"""
# We shift back 2, as self.positions is the frontier - tomorrow's ideal position.
return (self.positions.shift(2).multiply((self.panama_prices()).diff(), axis=0).fillna(0) * self.point_values()) * self.rates()
def transaction_returns(self):
"""Estimated returns from transactions including slippage. Uses the average settlement price of the last two days"""
# self.positions.diff().shift(1) = today's trades
slippage_multiplier = .5
return (self.positions.diff().shift(1).multiply((self.panama_prices()).diff()*slippage_multiplier, axis=0).fillna(0) * self.point_values()) * self.rates()
def commissions(self):
commissions = pd.Series({v.name: v.commission for v in self.portfolio})
return (self.positions.diff().shift(1).multiply(commissions)).fillna(0).abs()*-1
def spreads(self):
spreads = | pd.Series({v.name: v.spread for v in self.portfolio}) | pandas.Series |
import numpy as np
import pandas as pd
import pydicom
import os
import matplotlib.pyplot as plt
import collections
from tqdm import tqdm_notebook as tqdm
from datetime import datetime
from math import ceil, floor
import cv2
import tensorflow as tf
#import keras
#import sys
#from keras_applications.resnet import ResNet50
import tensorflow as tf
def test_gpu():
print(tf.__version__)
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print(sess.run(c))
def _get_first_of_dicom_field_as_int(x):
if type(x) == pydicom.multival.MultiValue:
return int(x[0])
else:
return int(x)
def _get_windowing(data):
dicom_fields = [data.WindowCenter, data.WindowWidth, data.RescaleSlope, data.RescaleIntercept]
return [_get_first_of_dicom_field_as_int(x) for x in dicom_fields]
def _window_image(img, window_center, window_width, slope, intercept):
img = (img * slope + intercept)
img_min = window_center - window_width//2
img_max = window_center + window_width//2
img[img<img_min] = img_min
img[img>img_max] = img_max
return img
def _normalize(img):
if img.max() == img.min():
return np.zeros(img.shape)
return 2 * (img - img.min())/(img.max() - img.min()) - 1
def _read_dicom(path, desired_size=(224, 224)):
"""Will be used in DataGenerator"""
#print('reading ', path)
dcm = pydicom.dcmread(path)
window_params = _get_windowing(dcm) # (center, width, slope, intercept)
try:
# dcm.pixel_array might be corrupt (one case so far)
img = _window_image(dcm.pixel_array, *window_params)
except:
img = np.zeros(desired_size)
img = _normalize(img)
if desired_size != (512, 512):
# resize image
img = cv2.resize(img, desired_size, interpolation=cv2.INTER_LINEAR)
return img[:,:,np.newaxis]
def _read(path, desired_size=(224, 224)):
try:
img = cv2.imread(path)
if desired_size != (512, 512):
img = cv2.resize(img, desired_size, interpolation=cv2.INTER_LINEAR)
except:
print(path)
raise
return img[:, :, :1]
def calculating_class_weights(y_true):
from sklearn.utils.class_weight import compute_class_weight
number_dim = np.shape(y_true)[1]
weights = np.empty([number_dim, 2])
for i in range(number_dim):
x = compute_class_weight('balanced', [0.,1.], y_true[:, i])
weights[i] = x
return weights
# def get_weighted_loss(weights=np.array([
# [ 0.5, 0.5 ],
# [ 0.5, 20.],
# [ 0.5, 10.],
# [ 0.5, 15.],
# [ 0.5, 10.],
# [ 0.5, 5.]])
# ):
# def weighted_loss(y_true, y_pred):
# w = (weights[:,0]**(1-y_true))*(weights[:,1]**(y_true))
# bce = tf.keras.backend.binary_crossentropy(y_true, y_pred)
# return tf.keras.backend.mean(w*bce, axis=-1)
# return weighted_loss
class RsnaDataGenerator(tf.keras.utils.Sequence):
def __init__(self, df, batch_size, img_size,
return_labels=True,
img_dir='../data/stage_1_test_images_jpg/', *args, **kwargs):
self.df = df
self.batch_size = batch_size
self.img_size = img_size
self.img_dir = img_dir
self.return_labels = return_labels
self.on_epoch_end()
def __len__(self):
return int(ceil(len(self.df) / self.batch_size))
def __getitem__(self, index):
indices = self.indices[index*self.batch_size:(index+1)*self.batch_size]
return self._data_generation(self.df.iloc[indices])
def on_epoch_end(self):
self.indices = np.arange(len(self.df))
def _data_generation(self, batch_df):
X = np.empty((self.batch_size, *self.img_size, 1))
for i, ID in enumerate(batch_df.index.values):
X[i,] = _read(self.img_dir+ID+".jpg", self.img_size)
if not self.return_labels:
return X
Y = np.empty((self.batch_size, 6), dtype=np.float32)
for i, label in enumerate(batch_df['Label'].values):
Y[i,] = label
return X, Y
class BalancedTrainDataGenerator(RsnaDataGenerator):
def __init__(self, df, batch_size, img_size, return_labels=True,
img_dir='../data/stage_1_train_images_jpg/', *args, **kwargs):
super().__init__(df, batch_size, img_size, return_labels, img_dir, args, kwargs)
print('building balanced train generator: ', len(df), img_dir)
def __getitem__(self, index):
#indices = self.indices[index*self.batch_size:(index+1)*self.batch_size]
#sample half with 'any'==1
pos = self.df[self.df['Label']['any']==1].sample(n=self.batch_size//2)
neg = self.df[self.df['Label']['any']==0].sample(n=self.batch_size//2)
return self._data_generation(pd.concat([pos, neg]).sample(frac=1))
class TrainDataGenerator(tf.keras.utils.Sequence):
def __init__(self, list_IDs, labels, batch_size=1, img_size=(512, 512),
img_dir='../data/stage_1_train_images_jpg/', *args, **kwargs):
print('building train generator: ', len(list_IDs), img_dir)
self.list_IDs = list_IDs
self.labels = labels
self.batch_size = batch_size
self.img_size = img_size
self.img_dir = img_dir
self.on_epoch_end()
def __len__(self):
return int(ceil(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
indices = self.indices[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indices]
X, Y = self.__data_generation(list_IDs_temp)
return X, Y
def on_epoch_end(self):
self.indices = np.arange(len(self.list_IDs))
np.random.shuffle(self.indices)
def __data_generation(self, list_IDs_temp):
X = np.empty((self.batch_size, *self.img_size, 1))
Y = np.empty((self.batch_size, 6), dtype=np.float32)
for i, ID in enumerate(list_IDs_temp):
X[i,] = _read(self.img_dir+ID+".jpg", self.img_size)
Y[i,] = self.labels.loc[ID].values
return X, Y
class TestDataGenerator(tf.keras.utils.Sequence):
def __init__(self, list_IDs, labels, batch_size=1, img_size=(512, 512),
img_dir='../data/stage_1_test_images_jpg/', *args, **kwargs):
self.list_IDs = list_IDs
self.labels = labels
self.batch_size = batch_size
self.img_size = img_size
self.img_dir = img_dir
self.on_epoch_end()
def __len__(self):
return int(ceil(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
indices = self.indices[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indices]
X = self.__data_generation(list_IDs_temp)
return X
def on_epoch_end(self):
self.indices = np.arange(len(self.list_IDs))
def __data_generation(self, list_IDs_temp):
X = np.empty((self.batch_size, *self.img_size, 1))
for i, ID in enumerate(list_IDs_temp):
X[i,] = _read(self.img_dir+ID+".jpg", self.img_size)
return X
def _initial_layer(input_dims):
inputs = tf.keras.layers.Input(input_dims)
x = tf.keras.layers.Conv2D(filters=3, kernel_size=(1, 1), strides=(1, 1), name="initial_conv2d")(inputs)
x = tf.keras.layers.BatchNormalization(axis=3, epsilon=1.001e-5, name='initial_bn')(x)
x = tf.keras.layers.Activation('relu', name='initial_relu')(x)
return tf.keras.models.Model(inputs, x)
class Model1:
def __init__(self, engine, input_dims, batch_size=5, learning_rate=1e-3,
decay_rate=1.0, decay_steps=1, weights="imagenet", verbose=1):
self.engine = engine
self.input_dims = input_dims
self.batch_size = batch_size
self.learning_rate = learning_rate
self.decay_rate = decay_rate
self.decay_steps = decay_steps
self.weights = weights
self.verbose = verbose
self._build()
def _build(self):
initial_layer = _initial_layer((*self.input_dims, 1))
engine = self.engine(
include_top=False,
weights=self.weights, input_shape=(*self.input_dims, 3),
backend = tf.keras.backend, layers = tf.keras.layers,
models = tf.keras.models, utils = tf.keras.utils)
x = engine(initial_layer.output)
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
out = tf.keras.layers.Dense(6, activation="sigmoid", name='dense_output')(x)
loss = 'binary_crossentropy' # get_weighted_loss()
self.model = tf.keras.models.Model(inputs=initial_layer.input, outputs=out)
self.model.compile(loss=loss, optimizer=tf.keras.optimizers.Adam(0.0))
def fit(self, df, train_idx, img_dir, global_epoch):
train_gen = BalancedTrainDataGenerator(
df.iloc[train_idx].index,
df.iloc[train_idx],
self.batch_size,
self.input_dims,
img_dir)
self.model.fit_generator(
train_gen,
#class_weight={0: 1, 1: 1, 2:, 1, 3: 1, 4: 1, 5: 1},
verbose=self.verbose,
use_multiprocessing=False,
workers=4,
callbacks=[
tf.keras.callbacks.ModelCheckpoint('../data/weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
tf.keras.callbacks.LearningRateScheduler(
lambda epoch: self.learning_rate * pow(self.decay_rate, floor(global_epoch / self.decay_steps))
)
]
)
def predict(self, df, test_idx, img_dir):
test_gen = TestDataGenerator(
df.iloc[test_idx].index,
None,
self.batch_size,
self.input_dims,
img_dir)
predictions = self.model.predict_generator(
test_gen,
verbose=1,
use_multiprocessing=False,
workers=4)
return predictions[:df.iloc[test_idx].shape[0]]
def save(self, path):
self.model.save_weights(path)
def load(self, path):
self.model.load_weights(path)
def read_testset(filename="../input/stage_1_sample_submission.csv"):
df = pd.read_csv(filename)
df["Image"] = df["ID"].str.slice(stop=12)
df["Diagnosis"] = df["ID"].str.slice(start=13)
df = df.loc[:, ["Label", "Diagnosis", "Image"]]
df = df.set_index(['Image', 'Diagnosis']).unstack(level=-1)
return df
def read_trainset(filename="../input/stage_1_train.csv", sample=None):
df = | pd.read_csv(filename, nrows=sample) | pandas.read_csv |
import pandas as pd
import math
from pathos.multiprocessing import ProcessingPool as Pool
import json
import numpy as np
from tqdm import tqdm, tqdm_pandas, trange
def _apply_df(args):
df, func, num, kwargs = args
return num, df.apply(func, **kwargs)
def apply_by_multiprocessing(df, func, **kwargs):
"""
Parallel execution function for the DataFrame
:param df: Input DataFrame
:param func:
:param kwargs: additional arguments for the df.apply() such as axis and et al.
:return: Output DataFrame
"""
workers = kwargs.pop('workers')
pool = Pool(processes=workers)
result = pool.map(_apply_df, [(d, func, i, kwargs) for i, d in enumerate(np.array_split(df, workers))])
pool.close()
result = sorted(result, key=lambda x: x[0])
return | pd.concat([i[1] for i in result]) | pandas.concat |
from django.shortcuts import render
from django.http import HttpResponse
from datetime import datetime
import psycopg2
import math
import pandas as pd
from openpyxl import Workbook
import csv
import random
def psql_pdc(query):
#credenciales PostgreSQL produccion
connP_P = {
'host' : '10.150.1.74',
'port' : '5432',
'user':'postgres',
'password':'<PASSWORD>',
'database' : 'postgres'}
#conexion a PostgreSQL produccion
conexionP_P = psycopg2.connect(**connP_P)
#print('\nConexión con el servidor PostgreSQL produccion establecida!')
cursorP_P = conexionP_P.cursor ()
#ejecucion query telefonos PostgreSQL
cursorP_P.execute(query)
anwr = cursorP_P.fetchall()
cursorP_P.close()
conexionP_P.close()
return anwr
def to_horiz(anwr_P,name,_id):
#vertical horizontal
anwr_P1 = anwr_P.pivot(index=0,columns=1)
anwr_P1[_id] = anwr_P1.index
col1 = []
i=0
for i in range(anwr_P1.shape[1]-1):
col1.append(name+str(i+1))
col1.append(_id)
anwr_P1.columns = col1
return anwr_P1
def csv_o(fn,name):
response = HttpResponse(content_type = "text/csv")
content = "attachment; filename = %s"%name
response["Content-Disposition"] = content
# for j in range(fn.shape[1]):
# try:
# fn.iloc[:,j] = fn.iloc[:,j].str.decode(encoding='utf-8-sig')
# fn.iloc[:,j] = fn.iloc[:,j].str.encode(encoding='utf_16_le')
# except:
# pass
fn2 = [tuple(x) for x in fn.values]
writer = csv.writer(response,delimiter ='|')
writer.writerow(fn.columns)
writer.writerows(fn2)
return response
def excel(fn,name):
wb = Workbook()
ws = wb.active
k = 0
a = pd.DataFrame(fn.columns)
for k in range(a.shape[0]):
ws.cell(row = 1, column = k+1).value = a.iloc[k,0]
i=0
j=0
for i in range(fn.shape[0]):
for j in range(0,fn.shape[1]):
try:
ws.cell(row = i+2, column = j+1).value = fn.iloc[i,j]
except:
pass
response = HttpResponse(content_type = "application/ms-excel")
content = "attachment; filename = %s"%name
response["Content-Disposition"] = content
wb.save(response)
return response
def excel_CV_COL(request):
today = datetime.now()
tablename = "CV_Col"+today.strftime("%Y%m%d%H") + ".xlsx"
with open("./hello/Plantillas/Colp/QueryTel_COL.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Colp/QueryCor_COL.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Colp/QueryDir_COL.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Colp/QueryCV_COL.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Colp/QueryCiu_COL.txt","r") as f6:
queryP_Ciu = f6.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
df = df.rename(columns={0:'rownumber',
1:'obligacion_id',
2:'deudor_id',
3:'unico',
4:'estado',
5:'tipo_cliente',
6:'nombre',
7:'producto',
8:'initial_bucket',
9:'ciudad',
10:'sucursal',
11:'tipo_prod',
12:'dias_mora_inicial',
13:'dias_mora_actual',
14:'rango_mora_inicial',
15:'rango_mora_final',
16:'rango',
17:'suma_pareto',
18:'rango_pareto',
19:'fcast',
20:'fdesem',
21:'vrdesem',
22:'saldo_total_inicial',
23:'saldo_total_actual',
24:'saldo_capital_inicial',
25:'saldo_capital_actual',
26:'saldo_vencido_inicial',
27:'saldo_vencido_actual',
28:'pagomin',
29:'fultpago',
30:'vrultpago',
31:'agencia',
32:'tasainter',
33:'feultref',
34:'ultcond',
35:'fasigna',
36:'eqasicampana',
37:'diferencia_pago',
38:'pago_preliminar',
39:'pago_cliente',
40:'min',
41:'tarifa',
42:'honorarios',
43:'perfil_mes_4',
44:'perfil_mes_3',
45:'perfil_mes_2',
46:'perfil_mes_1',
47:'fecha_primer_gestion',
48:'fecha_ultima_gestion',
49:'perfil_mes_actual',
50:'contactabilidad',
51:'ultimo_alo',
52:'descod1',
53:'descod2',
54:'asesor',
55:'fecha_gestion',
56:'telefono_mejor_gestion',
57:'mejorgestionhoy',
58:'asesor_indicador_hoy',
59:'repeticion',
60:'llamadas',
61:'sms',
62:'correos',
63:'gescall',
64:'visitas',
65:'whatsapp',
66:'no_contacto',
67:'total_gestiones',
68:'telefono_positivo',
69:'marcaciones_telefono_positivo',
70:'ultima_marcacion_telefono_positivo',
71:'fec_creacion_ult_compromiso',
72:'fec_pactada_ult_compromiso',
73:'valor_acordado_ult_compromiso',
74:'asesor_ult_compromiso',
75:'cantidad_acuerdos_mes',
76:'estado_acuerdo',})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
return excel(fn,tablename)
def csv_CV_Claro(request):
today = datetime.now()
tablename = "CV_Claro" + today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Claro/QueryTel_Claro.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Claro/QueryCor_Claro.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Claro/QueryCV_Claro.txt","r") as f4:
queryP_cons = f4.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
yanwr = psql_pdc(queryP_cons)
#dataframes
anwr_P = pd.DataFrame(anwr)
anwr_C = | pd.DataFrame(anwrC) | pandas.DataFrame |
import pandas as pd
import numpy as np
import ml_metrics as metrics
from sklearn.calibration import CalibratedClassifierCV
from sklearn import svm
from sklearn.multiclass import OneVsRestClassifier
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
print("read training data")
path = '../Data/'
train = pd.read_csv(path+'train.csv')
label = train['target']
trainID = train['id']
del train['id']
del train['target']
np.random.seed(131)
svc = svm.SVC(kernel='rbf',C=10,probability=True,verbose=True)
svc.fit(train.values, label)
#calibrated_svc = CalibratedClassifierCV(OneVsRestClassifier(svc,n_jobs=-1), method='isotonic', cv=5)
#calibrated_svc.fit(train.values, label)
print("read test data")
test = pd.read_csv(path+'test.csv')
ID = test['id']
del test['id']
clf_probs = svc.predict_proba(test.values)
#clf_probs = calibrated_svc.predict_proba(test.values)
sample = pd.read_csv(path+'sampleSubmission.csv')
print("writing submission data")
submission = pd.DataFrame(clf_probs, index=ID, columns=sample.columns[1:])
submission.to_csv(path+'svm.csv',index_label='id')
sample = pd.read_csv(path+'sampleSubmission.csv')
# retrain
submission = | pd.DataFrame(index=trainID, columns=sample.columns[1:]) | pandas.DataFrame |
"""
Sleep features.
This file calculates a set of features from the PSG sleep data.
These include:
- Spectral power (with and without adjustement for 1/f)
- Spindles and slow-waves detection
- Slow-waves / spindles phase-amplitude coupling
- Entropy and fractal dimension
Author: Dr <NAME> <<EMAIL>>, UC Berkeley.
Date: March 2021
DANGER: This function has not been extensively debugged and validated.
Use at your own risk.
"""
import mne
import yasa
import logging
import numpy as np
import pandas as pd
import antropy as ant
import scipy.signal as sp_sig
import scipy.stats as sp_stats
logger = logging.getLogger('yasa')
__all__ = ['compute_features_stage']
def compute_features_stage(raw, hypno, max_freq=35, spindles_params=dict(),
sw_params=dict(), do_1f=True):
"""Calculate a set of features for each sleep stage from PSG data.
Features are calculated for N2, N3, NREM (= N2 + N3) and REM sleep.
Parameters
----------
raw : :py:class:`mne.io.BaseRaw`
An MNE Raw instance.
hypno : array_like
Sleep stage (hypnogram). The hypnogram must have the exact same
number of samples as ``data``. To upsample your hypnogram,
please refer to :py:func:`yasa.hypno_upsample_to_data`.
.. note::
The default hypnogram format in YASA is a 1D integer
vector where:
- -2 = Unscored
- -1 = Artefact / Movement
- 0 = Wake
- 1 = N1 sleep
- 2 = N2 sleep
- 3 = N3 sleep
- 4 = REM sleep
max_freq : int
Maximum frequency. This will be used to bandpass-filter the data and
to calculate 1 Hz bins bandpower.
kwargs_sp : dict
Optional keywords arguments that are passed to the
:py:func:`yasa.spindles_detect` function. We strongly recommend
adapting the thresholds to your population (e.g. more liberal for
older adults).
kwargs_sw : dict
Optional keywords arguments that are passed to the
:py:func:`yasa.sw_detect` function. We strongly recommend
adapting the thresholds to your population (e.g. more liberal for
older adults).
Returns
-------
feature : pd.DataFrame
A long-format dataframe with stage and channel as index and
all the calculated metrics as columns.
"""
# #########################################################################
# 1) PREPROCESSING
# #########################################################################
# Safety checks
assert isinstance(max_freq, int), "`max_freq` must be int."
assert isinstance(raw, mne.io.BaseRaw), "`raw` must be a MNE Raw object."
assert isinstance(spindles_params, dict)
assert isinstance(sw_params, dict)
# Define 1 Hz bins frequency bands for bandpower
# Similar to [(0.5, 1, "0.5-1"), (1, 2, "1-2"), ..., (34, 35, "34-35")]
bands = []
freqs = [0.5] + list(range(1, max_freq + 1))
for i, b in enumerate(freqs[:-1]):
bands.append(tuple((b, freqs[i + 1], "%s-%s" % (b, freqs[i + 1]))))
# Append traditional bands
bands_classic = [
(0.5, 1, 'slowdelta'), (1, 4, 'fastdelta'), (0.5, 4, 'delta'),
(4, 8, 'theta'), (8, 12, 'alpha'), (12, 16, 'sigma'), (16, 30, 'beta'),
(30, max_freq, 'gamma')]
bands = bands_classic + bands
# Find min and maximum frequencies. These will be used for bandpass-filter
# and 1/f adjustement of bandpower. l_freq = 0.5 / h_freq = 35 Hz.
all_freqs_sorted = np.sort(np.unique(
[b[0] for b in bands] + [b[1] for b in bands]))
l_freq = all_freqs_sorted[0]
h_freq = all_freqs_sorted[-1]
# Mapping dictionnary integer to string for sleep stages (2 --> N2)
stage_mapping = {
-2: 'Unscored',
-1: 'Artefact',
0: 'Wake',
1: 'N1',
2: 'N2',
3: 'N3',
4: 'REM',
6: 'NREM',
7: 'WN' # Whole night = N2 + N3 + REM
}
# Hypnogram check + calculate NREM hypnogram
hypno = np.asarray(hypno, dtype=int)
assert hypno.ndim == 1, 'Hypno must be one dimensional.'
unique_hypno = np.unique(hypno)
logger.info('Number of unique values in hypno = %i', unique_hypno.size)
# IMPORTANT: NREM is defined as N2 + N3, excluding N1 sleep.
hypno_NREM = pd.Series(hypno).replace({2: 6, 3: 6}).to_numpy()
minutes_of_NREM = (hypno_NREM == 6).sum() / (60 * raw.info['sfreq'])
# WN = Whole night = N2 + N3 + REM (excluding N1)
hypno_WN = pd.Series(hypno).replace({2: 7, 3: 7, 4: 7}).to_numpy()
# minutes_of_WN = (hypno_WN == 7).sum() / (60 * raw.info['sfreq'])
# Keep only EEG channels and copy to avoid in-place modification
raw_eeg = raw.copy().pick_types(eeg=True)
# Remove flat channels
bool_flat = raw_eeg.get_data().std(axis=1) == 0
chan_flat = np.array(raw_eeg.ch_names)[bool_flat].tolist()
if len(chan_flat):
logger.warning("Removing flat channel(s): %s" % chan_flat)
raw_eeg.drop_channels(chan_flat)
# Remove suffix from channels: C4-M1 --> C4
chan_nosuffix = [c.split('-')[0] for c in raw_eeg.ch_names]
raw_eeg.rename_channels(dict(zip(raw_eeg.ch_names, chan_nosuffix)))
# Rename P7/T5 --> P7
chan_noslash = [c.split('/')[0] for c in raw_eeg.ch_names]
raw_eeg.rename_channels(dict(zip(raw_eeg.ch_names, chan_noslash)))
chan = raw_eeg.ch_names
# Resample to 100 Hz and bandpass-filter
raw_eeg.resample(100, verbose=False)
raw_eeg.filter(l_freq, h_freq, verbose=False)
# Extract data and sf
data = raw_eeg.get_data() * 1e6 # Scale from Volts (MNE default) to uV
sf = raw_eeg.info['sfreq']
assert data.ndim == 2, 'data must be 2D (chan, times).'
assert hypno.size == data.shape[1], 'Hypno must have same size as data.'
# #########################################################################
# 2) SPECTRAL POWER
# #########################################################################
print(" ..calculating spectral powers")
# 2.1) 1Hz bins, N2 / N3 / REM
# win_sec = 4 sec = 0.25 Hz freq resolution
df_bp = yasa.bandpower(raw_eeg, hypno=hypno, bands=bands, win_sec=4, include=(2, 3, 4))
# Same for NREM / WN
df_bp_NREM = yasa.bandpower(raw_eeg, hypno=hypno_NREM, bands=bands, include=6)
df_bp_WN = yasa.bandpower(raw_eeg, hypno=hypno_WN, bands=bands, include=7)
df_bp = pd.concat([df_bp, df_bp_NREM, df_bp_WN], axis=0)
df_bp.drop(columns=['TotalAbsPow', 'FreqRes', 'Relative'], inplace=True)
df_bp = df_bp.add_prefix('bp_').reset_index()
# Replace 2 --> N2
df_bp['Stage'] = df_bp['Stage'].map(stage_mapping)
# Assert that there are no negative values (see below issue on 1/f)
assert not (df_bp._get_numeric_data() < 0).any().any()
df_bp.columns = df_bp.columns.str.lower()
# 2.2) Same but after adjusting for 1/F (VERY SLOW!)
# This is based on the IRASA method described in Wen & Liu 2016.
if do_1f:
df_bp_1f = []
for stage in [2, 3, 4, 6, 7]:
if stage == 6:
# Use hypno_NREM
data_stage = data[:, hypno_NREM == stage]
elif stage == 7:
# Use hypno_WN
data_stage = data[:, hypno_WN == stage]
else:
data_stage = data[:, hypno == stage]
# Skip if stage is not present in data
if data_stage.shape[-1] == 0:
continue
# Calculate aperiodic / oscillatory PSD + slope
freqs, _, psd_osc, fit_params = yasa.irasa(
data_stage, sf, ch_names=chan, band=(l_freq, h_freq),
win_sec=4)
# Make sure that we don't have any negative values in PSD
# See https://github.com/raphaelvallat/yasa/issues/29
psd_osc = psd_osc - psd_osc.min(axis=-1, keepdims=True)
# Calculate bandpower
bp = yasa.bandpower_from_psd(psd_osc, freqs, ch_names=chan,
bands=bands)
# Add 1/f slope to dataframe and sleep stage
bp['1f_slope'] = np.abs(fit_params['Slope'].to_numpy())
bp.insert(loc=0, column="Stage", value=stage_mapping[stage])
df_bp_1f.append(bp)
# Convert to a dataframe
df_bp_1f = pd.concat(df_bp_1f)
# Remove the TotalAbsPower column, incorrect because of negative values
df_bp_1f.drop(columns=['TotalAbsPow', 'FreqRes', 'Relative'],
inplace=True)
df_bp_1f.columns = [c if c in ['Stage', 'Chan', '1f_slope']
else 'bp_adj_' + c for c in df_bp_1f.columns]
assert not (df_bp_1f._get_numeric_data() < 0).any().any()
df_bp_1f.columns = df_bp_1f.columns.str.lower()
# Merge with the main bandpower dataframe
df_bp = df_bp.merge(df_bp_1f, how="outer")
# #########################################################################
# 3) SPINDLES DETECTION
# #########################################################################
print(" ..detecting sleep spindles")
spindles_params.update(include=(2, 3))
# Detect spindles in N2 and N3
# Thresholds have to be tuned with visual scoring of a subset of data
# https://raphaelvallat.com/yasa/build/html/generated/yasa.spindles_detect.html
sp = yasa.spindles_detect(raw_eeg, hypno=hypno, **spindles_params)
df_sp = sp.summary(grp_chan=True, grp_stage=True).reset_index()
df_sp['Stage'] = df_sp['Stage'].map(stage_mapping)
# Aggregate using the mean (adding NREM = N2 + N3)
df_sp = sp.summary(grp_chan=True, grp_stage=True)
df_sp_NREM = sp.summary(grp_chan=True).reset_index()
df_sp_NREM['Stage'] = 6
df_sp_NREM.set_index(['Stage', 'Channel'], inplace=True)
density_NREM = df_sp_NREM['Count'] / minutes_of_NREM
df_sp_NREM.insert(loc=1, column='Density', value=density_NREM.to_numpy())
df_sp = pd.concat([df_sp, df_sp_NREM], axis=0)
df_sp.columns = ['sp_' + c if c in ['Count', 'Density'] else
'sp_mean_' + c for c in df_sp.columns]
# Prepare to export
df_sp.reset_index(inplace=True)
df_sp['Stage'] = df_sp['Stage'].map(stage_mapping)
df_sp.columns = df_sp.columns.str.lower()
df_sp.rename(columns={'channel': 'chan'}, inplace=True)
# #########################################################################
# 4) SLOW-WAVES DETECTION & SW-Sigma COUPLING
# #########################################################################
print(" ..detecting slow-waves")
# Make sure we calculate coupling
sw_params.update(coupling=True)
# Detect slow-waves
# Option 1: Using absolute thresholds
# IMPORTANT: THRESHOLDS MUST BE ADJUSTED ACCORDING TO AGE!
sw = yasa.sw_detect(raw_eeg, hypno=hypno, **sw_params)
# Aggregate using the mean per channel x stage
df_sw = sw.summary(grp_chan=True, grp_stage=True)
# Add NREM
df_sw_NREM = sw.summary(grp_chan=True).reset_index()
df_sw_NREM['Stage'] = 6
df_sw_NREM.set_index(['Stage', 'Channel'], inplace=True)
density_NREM = df_sw_NREM['Count'] / minutes_of_NREM
df_sw_NREM.insert(loc=1, column='Density', value=density_NREM.to_numpy())
df_sw = | pd.concat([df_sw, df_sw_NREM]) | pandas.concat |
from sympy import *
import pandas as pd
def seccion_dorada(xl, xu, tolerance, function):
x = Symbol('x')
f = parse_expr(function)
iteration = 0
data = | pd.DataFrame(columns=['iteration','xl','xu','x1','x2','f(x1)','f(x2)','error']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
# def QA_data_make_qfq(bfq_data, xdxr_data):
# '使用数据库数据进行复权'
# info = xdxr_data.query('category==1')
# bfq_data = bfq_data.assign(if_trade=1)
#
# if len(info) > 0:
# data = pd.concat([bfq_data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['category']]], axis=1)
# data['if_trade'].fillna(value=0, inplace=True)
# data = data.fillna(method='ffill')
# data = pd.concat([data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['fenhong', 'peigu', 'peigujia',
# 'songzhuangu']]], axis=1)
# else:
# data = pd.concat([bfq_data, info.loc[:, ['category', 'fenhong', 'peigu', 'peigujia',
# 'songzhuangu']]], axis=1)
# data = data.fillna(0)
# data['preclose'] = (data['close'].shift(1) * 10 - data['fenhong'] + data['peigu']
# * data['peigujia']) / (10 + data['peigu'] + data['songzhuangu'])
# data['adj'] = (data['preclose'].shift(-1) /
# data['close']).fillna(1)[::-1].cumprod()
# data['open'] = data['open'] * data['adj']
# data['high'] = data['high'] * data['adj']
# data['low'] = data['low'] * data['adj']
# data['close'] = data['close'] * data['adj']
# data['preclose'] = data['preclose'] * data['adj']
# data['volume'] = data['volume'] / \
# data['adj'] if 'volume' in data.columns else data['vol']/data['adj']
# try:
# data['high_limit'] = data['high_limit'] * data['adj']
# data['low_limit'] = data['high_limit'] * data['adj']
# except:
# pass
# return data.query('if_trade==1 and open != 0').drop(['fenhong', 'peigu', 'peigujia', 'songzhuangu',
# 'if_trade', 'category'], axis=1)
#
#
# def QA_data_make_hfq(bfq_data, xdxr_data):
# '使用数据库数据进行复权'
# info = xdxr_data.query('category==1')
# bfq_data = bfq_data.assign(if_trade=1)
#
# if len(info) > 0:
# data = pd.concat([bfq_data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['category']]], axis=1)
#
# data['if_trade'].fillna(value=0, inplace=True)
# data = data.fillna(method='ffill')
#
# data = pd.concat([data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['fenhong', 'peigu', 'peigujia',
# 'songzhuangu']]], axis=1)
# else:
# data = pd.concat([bfq_data, info.loc[:, ['category', 'fenhong', 'peigu', 'peigujia',
# 'songzhuangu']]], axis=1)
# data = data.fillna(0)
# data['preclose'] = (data['close'].shift(1) * 10 - data['fenhong'] + data['peigu']
# * data['peigujia']) / (10 + data['peigu'] + data['songzhuangu'])
# data['adj'] = (data['close'] / data['preclose'].shift(-1)
# ).cumprod().shift(1).fillna(1)
# data['open'] = data['open'] * data['adj']
# data['high'] = data['high'] * data['adj']
# data['low'] = data['low'] * data['adj']
# data['close'] = data['close'] * data['adj']
# data['preclose'] = data['preclose'] * data['adj']
# data['volume'] = data['volume'] / \
# data['adj'] if 'volume' in data.columns else data['vol']/data['adj']
# try:
# data['high_limit'] = data['high_limit'] * data['adj']
# data['low_limit'] = data['high_limit'] * data['adj']
# except:
# pass
# return data.query('if_trade==1 and open != 0').drop(['fenhong', 'peigu', 'peigujia', 'songzhuangu'], axis=1)
def _QA_data_stock_to_fq(bfq_data, xdxr_data, fqtype):
'使用数据库数据进行复权'
info = xdxr_data.query('category==1')
bfq_data = bfq_data.assign(if_trade=1)
if len(info) > 0:
data = pd.concat([
bfq_data, info.loc[bfq_data.index[0]:bfq_data.index[-1],
['category']]
],
axis=1)
data['if_trade'].fillna(value=0, inplace=True)
data = data.fillna(method='ffill')
data = pd.concat([
data, info.loc[bfq_data.index[0]:bfq_data.index[-1],
['fenhong', 'peigu', 'peigujia', 'songzhuangu']]
],
axis=1)
else:
data = pd.concat([
bfq_data, info.
loc[:, ['category', 'fenhong', 'peigu', 'peigujia', 'songzhuangu']]
],
axis=1)
data = data.fillna(0)
data['preclose'] = (data['close'].shift(1) * 10 - data['fenhong'] +
data['peigu'] * data['peigujia']) / (
10 + data['peigu'] + data['songzhuangu'])
if fqtype in ['01', 'qfq']:
data['adj'] = (data['preclose'].shift(-1) /
data['close']).fillna(1)[::-1].cumprod()
else:
data['adj'] = (data['close'] /
data['preclose'].shift(-1)).cumprod().shift(1).fillna(1)
for col in ['open', 'high', 'low', 'close', 'preclose']:
data[col] = data[col] * data['adj']
data['volume'] = data['volume'] / \
data['adj'] if 'volume' in data.columns else data['vol']/data['adj']
try:
data['high_limit'] = data['high_limit'] * data['adj']
data['low_limit'] = data['high_limit'] * data['adj']
except:
pass
return data.query('if_trade==1 and open != 0').drop(
['fenhong', 'peigu', 'peigujia', 'songzhuangu', 'if_trade', 'category'],
axis=1,
errors='ignore')
def QA_data_stock_to_fq(__data, type_='01'):
def __QA_fetch_stock_xdxr(code,
format_='pd',
collections=DATABASE.stock_xdxr):
'获取股票除权信息/数据库'
try:
data = pd.DataFrame([
item for item in collections.find({'code': code})
]).drop(['_id'], axis=1)
data['date'] = | pd.to_datetime(data['date']) | pandas.to_datetime |
"""This module aims at recovering OpenStreetMap data through Overpass API
To accomplish a task, the following command must be run on the terminal:
```
python -m luigi --local-scheduler --module urbansprawl.tasks <Task> <params>
```
with `Task` one of the class defined below, and `params` the corresponding
parameters.
This computation is done locally (because of the ̀--local-scheduler` option). It
can be done on a server, by first launching an instance of the luigi daemon :
```
luigid
̀``
and then by running the previous command without the `--local-scheduler`
option. The task dependency graph and some miscellaneous information about the
tasks are visible at `localhost:8082` URL address.
"""
from configparser import ConfigParser
from datetime import date, datetime as dt
import json
import os
import requests
import zipfile
import geopandas as gpd
import luigi
from luigi.format import MixedUnicodeBytes
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
import numpy as np
import osmnx
from osgeo import gdal, ogr, osr
import pandas as pd
import sh
from shapely.geometry import Point, Polygon
from urbansprawl.osm.overpass import (
create_buildings_gdf,
create_building_parts_gdf,
create_pois_gdf,
create_landuse_gdf,
retrieve_route_graph,
)
from urbansprawl.osm.utils import (
sanity_check_height_tags,
associate_structures,
)
from urbansprawl.osm.classification import (
classify_tag,
classify_activity_category,
compute_landuse_inference,
)
from urbansprawl.osm.surface import compute_landuses_m2
from urbansprawl.sprawl.core import get_indices_grid_from_bbox
from urbansprawl.sprawl.landusemix import compute_grid_landusemix
from urbansprawl.sprawl.accessibility import compute_grid_accessibility
from urbansprawl.sprawl.dispersion import compute_grid_dispersion
from urbansprawl.population.data_extract import get_extract_population_data
from urbansprawl.population.urban_features import (
compute_full_urban_features,
get_training_testing_data,
get_Y_X_features_population_data,
prepare_testing_data
)
from urbansprawl.population.downscaling import (
train_population_downscaling_model,
build_downscaling_cnn,
)
from urbansprawl import geometries
config = ConfigParser()
if os.path.isfile("config.ini"):
config.read("config.ini")
else:
raise FileNotFoundError("There is no 'config.ini' in the project folder!")
# Columns of interest corresponding to OSM keys
OSM_TAG_COLUMNS = [
"amenity",
"landuse",
"leisure",
"shop",
"man_made",
"building",
"building:use",
"building:part",
]
COLUMNS_OF_INTEREST = OSM_TAG_COLUMNS + ["osm_id", "geometry", "height_tags"]
COLUMNS_OF_INTEREST_POIS = OSM_TAG_COLUMNS + ["osm_id", "geometry"]
COLUMNS_OF_INTEREST_LANDUSES = ["osm_id", "geometry", "landuse"]
HEIGHT_TAGS = [
"min_height",
"height",
"min_level",
"levels",
"building:min_height",
"building:height",
"building:min_level",
"building:levels",
"building:levels:underground",
]
BUILDING_PARTS_TO_FILTER = ["no", "roof"]
MINIMUM_M2_BUILDING_AREA = 9.0
def define_filename(description, city, date, datapath, extension):
"""Build a distinctive filename regarding a given `description`, `city`,
`date` (ISO-formatted), ̀datapath` and a `extension` for the file extension
Parameters
----------
description : str
Describe the file content in one word
city : str
City of interest, used for the queries to Overpass API
date : str
Date of the Overpass query, in ISO format
datapath : str
Path of the file on the file system
extension : str
File extension, *i.e.* GeoJSON
Returns
-------
str
Full path name on the file system
"""
os.makedirs(datapath, exist_ok=True)
filename = "{}-{}.{}".format(description, date, extension)
return os.path.join(datapath, city, filename)
def set_list_as_str(l):
"""Small utility function to transform list in string
Parameters
----------
l : list
Input list
Returns
-------
str
Stringified version of the input list, with items separated with a comma
"""
if type(l) == list:
return ",".join(str(e) for e in l)
def clean_list_in_geodataframe_column(gdf, column):
"""Stringify items of `column` within ̀gdf`, in order to allow its
serialization
Parameters
----------
gdf : GeoDataFrame
Input data structure
column : str
Column to modify
Returns
-------
GeoDataFrame
Modified input structure, with a fixed `column` (contains stringified items)
"""
if column in gdf.columns:
gdf[column] = gdf[column].apply(lambda x: set_list_as_str(x))
return gdf
class GetBoundingBox(luigi.Task):
"""Extract the bounding box around a given `city`
Example:
```
python -m luigi --local-scheduler --module urbansprawl.tasks GetBoundingBox
--city valence-drome
```
Attributes
----------
city : str
City of interest
datapath : str
Indicates the folder where the task result has to be serialized
(default: `./data`)
"""
city = luigi.Parameter()
datapath = luigi.Parameter("./data")
def output(self):
"""Indicates the task result destination onto the file system
"""
path = os.path.join(self.datapath, self.city)
os.makedirs(path, exist_ok=True)
return luigi.LocalTarget(os.path.join(path, "bounding_box.geojson"))
def run(self):
"""Main operations of the Luigi task
"""
city_gdf = osmnx.gdf_from_place(self.city, which_result=1)
city_gdf.to_file(self.output().path, driver="GeoJSON")
class GetData(luigi.Task):
"""Give a raw version of OpenStreetMap items through an Overpass API query
(buildlings, building parts, POIs or land uses)
Example:
```
python -m luigi --local-scheduler --module urbansprawl.tasks GetData
--city valence-drome --date-query 2017-01-01T1200 --table buildings
```
Attributes
----------
city : str
City of interest
datapath : str
Indicates the folder where the task result has to be serialized
geoformat : str
Output file extension (by default: `GeoJSON`)
date_query : str
Date to which the OpenStreetMap data must be recovered (format:
AAAA-MM-DDThhmm)
table : str
Type of data to retrieve (either `buildings`, `building-parts`, `pois`
or `land-uses`)
"""
city = luigi.Parameter()
datapath = luigi.Parameter("./data")
geoformat = luigi.Parameter("geojson")
date_query = luigi.DateMinuteParameter(default=date.today())
table = luigi.Parameter("buildings")
def requires(self):
"""Gives the task(s) that are needed to accomplish the current one. It
refers implicitely to the project dependency graph.
"""
return GetBoundingBox(self.city, self.datapath)
def output(self):
output_path = define_filename(
"raw-" + self.table,
self.city,
dt.date(self.date_query).isoformat(),
self.datapath,
self.geoformat,
)
return luigi.LocalTarget(output_path)
def run(self):
city_gdf = gpd.read_file(self.input().path)
north, south, east, west = city_gdf.loc[
0, ["bbox_north", "bbox_south", "bbox_east", "bbox_west"]
]
date = "[date:'" + str(self.date_query) + "']"
if self.table == "buildings":
gdf = create_buildings_gdf(
date=date, north=north, south=south, east=east, west=west
)
gdf.drop(["nodes"], axis=1, inplace=True)
elif self.table == "building-parts":
gdf = create_building_parts_gdf(
date=date, north=north, south=south, east=east, west=west
)
if "building" in gdf.columns:
gdf = gdf[
(~gdf["building:part"].isin(BUILDING_PARTS_TO_FILTER))
& (~gdf["building:part"].isnull())
& (gdf["building"].isnull())
]
else:
gdf = gdf[
(~gdf["building:part"].isin(BUILDING_PARTS_TO_FILTER))
& (~gdf["building:part"].isnull())
]
if "nodes" in gdf.columns:
gdf.drop(["nodes"], axis=1, inplace=True)
gdf["osm_id"] = gdf.index
gdf.reset_index(drop=True, inplace=True)
elif self.table == "pois":
gdf = create_pois_gdf(
date=date, north=north, south=south, east=east, west=west
)
columns_to_drop = [
col
for col in list(gdf.columns)
if col not in COLUMNS_OF_INTEREST_POIS
]
gdf.drop(columns_to_drop, axis=1, inplace=True)
gdf["osm_id"] = gdf.index
gdf.reset_index(drop=True, inplace=True)
elif self.table == "land-uses":
gdf = create_landuse_gdf(
date=date, north=north, south=south, east=east, west=west
)
gdf = gdf[["landuse", "geometry"]]
gdf["osm_id"] = gdf.index
columns_to_drop = [
col
for col in list(gdf.columns)
if col not in COLUMNS_OF_INTEREST_LANDUSES
]
gdf.drop(columns_to_drop, axis=1, inplace=True)
gdf.reset_index(drop=True, inplace=True)
else:
raise ValueError(
(
"Please provide a valid table name (either "
"'buildings', 'building-parts', 'pois' "
"or 'land-uses')."
)
)
gdf.to_file(self.output().path, driver="GeoJSON")
class SanityCheck(luigi.Task):
"""Check buildings and building parts GeoDataFrames, especially their
height tags
Example:
```
python -m luigi --local-scheduler --module urbansprawl.tasks SanityCheck
--city valence-drome --table buildings
```
Attributes
----------
city : str
City of interest
datapath : str
Indicates the folder where the task result has to be serialized
geoformat : str
Output file extension (by default: `GeoJSON`)
date_query : str
Date to which the OpenStreetMap data must be recovered (format:
AAAA-MM-DDThhmm)
table : str
Structure to check, either `buildings` or `building-parts`
"""
city = luigi.Parameter()
datapath = luigi.Parameter("./data")
geoformat = luigi.Parameter("geojson")
date_query = luigi.DateMinuteParameter(default=date.today())
table = luigi.Parameter(default="buildings")
def requires(self):
if self.table in ["buildings", "building-parts"]:
return GetData(
self.city,
self.datapath,
self.geoformat,
self.date_query,
self.table,
)
else:
raise ValueError(
(
"Please provide a valid table name (either "
"'buildings' or 'building-parts')."
)
)
def output(self):
output_path = define_filename(
"checked-" + self.table,
self.city,
dt.date(self.date_query).isoformat(),
self.datapath,
self.geoformat,
)
return luigi.LocalTarget(output_path)
def run(self):
gdf = gpd.read_file(self.input().path)
sanity_check_height_tags(gdf)
def remove_nan_dict(x):
"""Remove entries with nan values
"""
return {k: v for k, v in x.items() if | pd.notnull(v) | pandas.notnull |
import pandas as pd
import numpy as np
import random
import os
import json
import csv
from sklearn.model_selection import StratifiedKFold, KFold
def analysis(data):
class Node:
def __init__(self, retest, fp, rmi):
self.retest = retest
self.fp = fp
self.rmi = rmi
if not np.isnan(self.rmi):
self.rmi = int(self.rmi)
else:
self.rmi = np.NaN
def __str__(self):
ret = 'retest: {}, fp: {}, rmi: {}'.format(self.retest, self.fp, self.rmi)
return ret
def __eq__(self, other):
return (self.retest, self.fp, self.rmi) == (other.retest, other.fp, other.rmi)
def __hash__(self):
return hash(self.retest) ^ hash(self.fp) ^ hash(self.rmi)
def __cmp__(self):
return (self.retest, self.fp, self.rmi) == (other.retest, other.fp, other.rmi)
dict_ = {}
for ix, row in data.iterrows():
node = Node(row['Keck_Pria_AS_Retest'], row['Keck_Pria_FP_data'], row['Keck_RMI_cdd'])
if node not in dict_.keys():
dict_[node] = 1
else:
dict_[node] += 1
for k in dict_.keys():
print(k, '\t---', dict_[k])
return
def greedy_multi_splitting(data, k, directory, file_list):
class Node:
def __init__(self, retest, fp, rmi):
self.retest = retest
self.fp = fp
self.rmi = rmi
if not np.isnan(self.rmi):
self.rmi = int(self.rmi)
else:
self.rmi = np.NaN
def __str__(self):
ret = 'retest: {}, fp: {}, rmi: {}'.format(self.retest, self.fp, self.rmi)
return ret
def __eq__(self, other):
return (self.retest, self.fp, self.rmi) == (other.retest, other.fp, other.rmi)
def __hash__(self):
return hash(self.retest) ^ hash(self.fp) ^ hash(self.rmi)
def __cmp__(self):
return (self.retest, self.fp, self.rmi) == (other.retest, other.fp, other.rmi)
dict_ = {}
for ix, row in data.iterrows():
node = Node(row['Keck_Pria_AS_Retest'], row['Keck_Pria_FP_data'], row['Keck_RMI_cdd'])
if node not in dict_.keys():
dict_[node] = []
dict_[node].append(ix)
list_ = []
for key in dict_.keys():
one_group_list = np.array(dict_[key])
current = []
if len(one_group_list) < k:
n = len(one_group_list)
for i in range(n):
current.append(np.array(one_group_list[i]))
for i in range(n, k):
current.append(np.array([]))
else:
kf = KFold(len(one_group_list), k, shuffle=True)
for _, test_index in kf:
current.append(one_group_list[test_index])
random.shuffle(current)
list_.append(current)
if not os.path.exists(directory):
os.makedirs(directory)
print(len(list_))
for split in range(k):
index_block = np.hstack((list_[0][split],
list_[1][split],
list_[2][split],
list_[3][split],
list_[4][split],
list_[5][split],
list_[6][split],
list_[7][split],
list_[8][split]))
index_block = index_block.astype(np.int)
df_block = data.iloc[index_block]
print(df_block.shape)
file_path = directory + file_list[split]
df_block.to_csv(file_path, index=None)
return
def split_data(input_file, output_file_list, k):
data_pd = pd.read_csv(input_file)
y_data = data_pd['true_label']
y_data = y_data.astype(np.float64)
if y_data.ndim == 1:
n = y_data.shape[0]
y_data = y_data.reshape(n, 1)
cnt = 0
split = StratifiedKFold(y_data[:, -1], n_folds=k, shuffle=True, random_state=0)
for train_index, test_index in split:
# For testing
# Can list all existing active ones
# data_batch[data_batch['true_label']>0]['molecule ID(RegID)']
data_batch = data_pd.iloc[test_index]
data_batch.to_csv(output_file_list[cnt], index_label=None, compression='gzip')
cnt += 1
return
def read_merged_data(input_file_list, usecols=None):
whole_pd = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import calendar
import datetime
import cairo
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from itertools import product
#==================================================================================================================
def add_months(sourcedate, months):
"""Función que permite sumar o restar 'months' meses a una fecha 'sourcedate' determinada.
El formato de 'sourcedate' es de la forma datetime.date(año, mes, dia)."""
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
day = min(sourcedate.day, calendar.monthrange(year,month)[1])
return datetime.date(year, month, day)
#==================================================================================================================
def datetime_to_integer(dt_time):
"""Función que permite cambiar el formato de una fecha 'dt_time' a un número entero.
El formato de 'dt_time' es datetime.date(año, mes, dia)"""
integer = 10000*dt_time.year + 100*dt_time.month + dt_time.day
return integer
#====================================================================================================================
def preprocesamiento(rfm):
df = rfm[rfm.ANTIGUEDAD >= 6]
#df = df.dropna(how='any',axis=0)
return df
#=====================================================================================================================
def distribucion_aperturas(df):
a4_dims = (15, 8)
df['APERTURA'] = df['APERTURA'].apply(lambda x: x[0:4])
df = df[['APERTURA', 'DNI']].groupby(['APERTURA']).count()
a4_dims = (15, 8)
fig, ax = plt.subplots(figsize=a4_dims)
ax.set_title('Distribución de aperturas de cuenta por año')
m = sns.barplot(ax = ax, y=df['DNI'], x=df.index)
m.set_xticklabels(rotation=45, labels=df.index)
ax.set(xlabel='Año de apertura', ylabel='Cantidad')
plt.show()
#====================================================================================================================
def histogramas(R_serie, F_serie, M_serie):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 8.27))
plt.subplots_adjust(wspace = 0.4)
fig.suptitle('Distribución de clientes según Recencia, Frecuencia y Monto')
ax1.hist(R_serie, bins = 50, range = [0,100] , facecolor = 'green', alpha = 0.75,
edgecolor = 'black', linewidth = 0.5 )
ax1.set(xlabel='Recencia (días)', ylabel = 'Cantidad')
ax1.tick_params(axis='both', labelrotation = 45)
ax2.hist(F_serie, bins = 50, range = [0,90] , facecolor = 'blue', alpha = 0.75,
edgecolor = 'black', linewidth = 0.5 )
ax2.set(xlabel='Frecuencia')
ax2.tick_params(axis='both', labelrotation = 45)
ax3.hist(M_serie, bins = 50, range = [0,150000] , facecolor = 'red', alpha = 0.75,
edgecolor = 'black', linewidth = 0.5 )
ax3.set(xlabel='Monto (Pesos)')
ax3.tick_params(axis='both', labelrotation = 45)
plt.show()
#==========================================================================================================================
def RScore(x,p,d):
"""Funcion para obtener el Recency score. x es cada registro de la serie rfm['RECENCIA'] y d[p] es la serie quantile['RECENCIA'] """
if x <= d[p][0.20]:
return 5
elif x <= d[p][0.4]:
return 4
elif x <= d[p][0.6]:
return 3
elif x <= d[p][0.8]:
return 2
else:
return 1
def FMScore(x,p,d):
"""Funcion para obtener el score para la frecuencia y para el monto"""
if x <= d[p][0.20]:
return 1
elif x <= d[p][0.4]:
return 2
elif x <= d[p][0.6]:
return 3
elif x <= d[p][0.8]:
return 4
else:
return 5
def quintil_rent(x,p,d):
"""Funcion para obtener la division por quintiles de la rentabilidad"""
if x <= d[p][0.20]:
return 'Q1'
elif x <= d[p][0.4]:
return 'Q2'
elif x <= d[p][0.6]:
return 'Q3'
elif x <= d[p][0.8]:
return 'Q4'
else:
return 'Q5'
#=========================================================================================================================
def rfm_scoring(rfm, fecha_in, fecha_out):
"""Genera la división de las variables recencia, frecuencia y monto en quintiles. Además calcula un RFMscore con
formato string y un Total_score cuyo valor es la suma de los scores individuales. Finalmente, se guarda un .csv
listo para analizar. Como argumentos la función necesita 'rfm' que es el dataframe generado por la consulta SQL
y 'fecha_in' y 'fecha_out', strings de las fechas 'fi_int' y 'ff_int' para poder etiquetar el nombre del
dataframe guardado."""
quantile = rfm[['RECENCIA', 'MONTO', 'FRECUENCIA']].quantile(q=[0.2,0.4,0.6,0.8])
rfm['R_Quintil'] = rfm['RECENCIA'].apply(RScore,args=('RECENCIA',quantile))
rfm['F_Quintil'] = rfm['FRECUENCIA'].apply(FMScore, args=('FRECUENCIA',quantile))
rfm['M_Quintil'] = rfm['MONTO'].apply(FMScore, args=('MONTO',quantile))
rfm['RFMScore'] = rfm.R_Quintil.map(str) \
+ rfm.F_Quintil.map(str) \
+ rfm.M_Quintil.map(str)
rfm['Total_score'] = rfm['R_Quintil'] + rfm['F_Quintil'] + rfm['M_Quintil']
rfm.to_csv(f'rfm-final-{fecha_in}--{fecha_out}.csv', index = False)
return rfm
#=======================================================================================================
def rentabilidad(fecha_in_1, fecha_out_1):
rfm = pd.read_csv(f'rfm-segmentos-{fecha_in_1}--{fecha_out_1}.csv')
q6 = float(rfm[['RENTABILIDAD']].mean() + 3 * rfm[['RENTABILIDAD']].std())
q0 = float(rfm[['RENTABILIDAD']].mean() - 3 * rfm[['RENTABILIDAD']].std())
#Los siguientes son dataframes que contienen q0 y q6 (peores y mejores clientes segun rentabilidad, respectivamente)
#los descartamos para el análisis siguiente.
#df_q6 = rfm[rfm['RENTABILIDAD'] > q6 ]
#df_q0 = rfm[rfm['RENTABILIDAD'] < q0 ]
df_quintil = rfm[rfm['RENTABILIDAD'] > q0][rfm['RENTABILIDAD'] < q6]
quintiles = df_quintil[['RENTABILIDAD']].quantile(q=[0.2,0.4,0.6,0.8])
rfm['RENT_QUINTIL'] = rfm['RENTABILIDAD'].apply(quintil_rent, args=('RENTABILIDAD',quintiles))
df_quintil_segment = rfm[['RENT_QUINTIL', 'Segment']]
summary = pd.pivot_table(data=df_quintil_segment,
index='RENT_QUINTIL',columns='Segment',
aggfunc='size').apply(lambda x: (x/sum(x))*100,axis=0)
summary.to_csv(f'rentabilidad--{fecha_in_1}-{fecha_out_1}.csv')
fig, ((ax0, ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8, ax9)) = plt.subplots(2, 5, figsize=(15, 9))
plt.subplots_adjust(wspace = 0.5)
fig.suptitle('Distribución de rentabilidad para cada segmento RFM', fontsize=25)
sns.barplot(ax=ax0, y=summary['Campeones'], x=summary.index)
ax0.set(ylabel = 'Campeones (%)')
sns.barplot(ax=ax1, y=summary['Leales'], x=summary.index)
ax1.set(ylabel = 'Leales (%)')
sns.barplot(ax=ax2, y=summary['Potencialmente Leales'], x=summary.index)
ax2.set(ylabel = 'Potencialmente Leales (%)')
sns.barplot(ax=ax3, y=summary['Prometedores'], x=summary.index)
ax3.set(ylabel = 'Prometedores (%)')
sns.barplot(ax=ax4, y=summary['Reciente operativo'], x=summary.index)
ax4.set(ylabel = 'Reciente operativo (%)')
sns.barplot(ax=ax5, y=summary['No se pueden perder'], x=summary.index)
ax5.set(ylabel = 'No se pueden perder (%)')
sns.barplot(ax=ax6, y=summary['Necesitan Atencion'], x=summary.index)
ax6.set(ylabel = 'Necesitan Atencion (%)')
sns.barplot(ax=ax7, y=summary['En Riesgo'], x=summary.index)
ax7.set(ylabel = 'En Riesgo (%)')
sns.barplot(ax=ax8, y=summary['Cercanos a Hibernar'], x=summary.index)
ax8.set(ylabel = 'Cercanos a Hibernar (%)')
sns.barplot(ax=ax9, y=summary['Hibernando'], x=summary.index)
ax9.set(ylabel = 'Hibernando (%)')
return summary
#=======================================================================================================
def rentabilidad_acum(fecha_in_1, fecha_out_1, c):
rfm = pd.read_csv(f'rfm-segmentos-{fecha_in_1}--{fecha_out_1}.csv')
q6 = float(rfm[['RENTABILIDAD']].mean() + 3 * rfm[['RENTABILIDAD']].std())
q0 = float(rfm[['RENTABILIDAD']].mean() - 3 * rfm[['RENTABILIDAD']].std())
df_percentil = rfm[rfm['RENTABILIDAD'] > q0][rfm['RENTABILIDAD'] < q6]
percentil = df_percentil[['RENTABILIDAD']].quantile(q=[round(c/100, 2) for c in range(1, 100 + 1)])
lista = []
for i in range(1, 100 + 1):
if i == 1:
lista.append(df_percentil[df_percentil['RENTABILIDAD']<=percentil['RENTABILIDAD'][round(i/100, 2)]]['RENTABILIDAD'].sum())
elif i != 101:
acum_1 = df_percentil[df_percentil['RENTABILIDAD']<=percentil['RENTABILIDAD'][round((i/100)-0.01, 2)]]['RENTABILIDAD'].sum()
acum_2 = df_percentil[df_percentil['RENTABILIDAD']<=percentil['RENTABILIDAD'][round(i/100, 2)]]['RENTABILIDAD'].sum()
lista.append(round(acum_2-acum_1, 2))
else:
pass
dic = {'Percentil': [round(x/100,2) for x in range(1, 100 + 1)], 'Rent_Acum': lista}
df_acum = | pd.DataFrame(dic) | pandas.DataFrame |
# -*- coding:utf-8 -*-
import re
import logging
import pandas as pd
from contrib.utils.DataCleanCheckTool import DataCleanCheckTool
class CorpusFromEllisQTB(object):
"""
CorpusFromEllis, question_text with blank
整个程序是由大量函数构成的
主要的函数是final_process,其他在final_process中调度
final_process完成之后输出的结果是一个包含了所有文本的list
这个list会根据index,跟最后一个make_final_list函数结合
make_final_list函数会把信息添加到final_process函数中的数据中
最终完成带有标签信息的完整corpus
"""
@classmethod
def read_data_from_csv(cls):
# 首先要读进来的表有那么几张,json中重新提取的blank,tmjcxx,exercise_package
data_tmjcxx = pd.read_csv("tmjcxx.csv")
data_packageid = pd.read_csv("exercise_package.csv")
data_stat = | pd.read_csv("exercise_status.csv") | pandas.read_csv |
from typing import Optional, Union, Tuple, List
from warnings import warn
import os
import time
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import mechanicalsoup
import requests
import json
from chemicalc.utils import decode_base64_dict, find_nearest_idx
from chemicalc.file_mgmt import etc_file_dir, download_bluemuse_files
wmko_options = {
"instrument": ["lris", "deimos", "hires", "esi"],
"mag type": ["Vega", "AB"],
"filter": [
"sdss_r.dat",
"sdss_g.dat",
"sdss_i.dat",
"sdss_u.dat",
"sdss_z.dat",
"Buser_B.dat",
"Buser_V.dat",
"Cousins_R.dat",
"Cousins_I.dat",
],
"template": [
"O5V_pickles_1.fits",
"B5V_pickles_6.fits",
"A0V_pickles_9.fits",
"A5V_pickles_12.fits",
"F5V_pickles_16.fits",
"G5V_pickles_27.fits",
"K0V_pickles_32.fits",
"K5V_pickles_36.fits",
"M5V_pickles_44.fits",
],
"grating (DEIMOS)": ["600Z", "900Z", "1200G", "1200B"],
"grating (LRIS)": ["600/7500", "600/10000", "1200/9000", "400/8500", "831/8200"],
"grism (LRIS)": ["B300", "B600"],
"binning (DEIMOS)": ["1x1"],
"binning (LRIS)": ["1x1", "2x1", "2x2", "3x1"],
"binning (ESI)": ["1x1", "2x2", "2x1", "3x1"],
"binning (HIRES)": ["1x1", "2x1", "2x2", "3x1"],
"slitwidth (DEIMOS)": ["0.75", "1.0", "1.5"],
"slitwidth (LRIS)": ["0.7", "1.0", "1.5"],
"slitwidth (ESI)": ["0.75", "0.3", "0.5", "1.0"],
"slitwidth (HIRES)": ["C5", "E4", "B2", "B5", "E5", "D3"],
"slitwidth arcsec (HIRES)": [1.15, 0.40, 0.57, 0.86, 0.80, 1.72],
"dichroic (LRIS)": ["D560"],
"central wavelength (DEIMOS)": ["5000", "6000", "7000", "8000"],
}
mmt_options = {
"inst_mode": [
"BINOSPEC_1000",
"BINOSPEC_270",
"BINOSPEC_600",
"HECTOSPEC_270",
"HECTOSPEC_600",
],
"template": [
"O5V",
"A0V",
"A5V",
"B0V",
"F0V",
"F5V",
"G0V",
"G2V",
"K0V",
"K5V",
"M5V",
"Moon",
],
"filter": ["r_filt", "g_filt", "i_filt"],
"aptype": ["Round", "Square", "Rectangular"],
}
mse_options = {
"spec_mode": ["LR", "MR", "HR"],
"airmass": ["1.0", "1.2", "1.5"],
"filter": ["u", "g", "r", "i", "z", "Y", "J"],
"src_type": ["extended", "point"],
"template": [
"o5v",
"o9v",
"b1v",
"b2ic",
"b3v",
"b8v",
"b9iii",
"b9v",
"a0iii",
"a0v",
"a2v",
"f0v",
"g0i",
"g2v",
"g5iii",
"k2v",
"k7v",
"m2v",
"flat",
"WD",
"LBG_EW_le_0",
"LBG_EW_0_20",
"LBG_EW_ge_20",
"qso1",
"qso2",
"elliptical",
"spiral_Sc",
"HII",
"PN",
],
}
vlt_options = {
"instruments": ["UVES", "FLAMES-UVES", "FLAMES-GIRAFFE", "X-SHOOTER", "MUSE"],
"src_target_mag_band (MUSE)": [
"B",
"V",
"R",
"I",
"sloan_g_prime",
"sloan_r_prime",
"sloan_i_prime",
"sloan_z_prime",
],
"src_target_mag_band (GIRAFFE)": ["U", "B", "V", "R", "I",],
"src_target_mag_band (UVES)": ["U", "B", "V", "R", "I",],
"src_target_mag_band (X-SHOOTER)": ["U", "B", "V", "R", "I", "J", "H", "K",],
"src_target_mag_system": ["Vega", "AB"],
"src_target_type": ["template_spectrum"],
"src_target_spec_type": [
"Pickles_O5V",
"Pickles_O9V",
"Kurucz_B1V",
"Pickles_B2IV",
"Kurucz_B3V",
"Kurucz_B8V",
"Pickles_B9III",
"Pickles_B9V",
"Pickles_A0III",
"Pickles_A0V",
"Kurucz_A1V",
"Kurucz_F0V",
"Pickles_G0V",
"Kurucz_G2V",
"Pickles_K2V",
"Pickles_K7V",
"Pickles_M2V",
"Planetary Nebula",
"HII Region (ORION)",
"Kinney_ell",
"Kinney_s0",
"Kinney_sa",
"Kinney_sb",
"Kinney_starb1",
"Kinney_starb2",
"Kinney_starb3",
"Kinney_starb4",
"Kinney_starb5",
"Kinney_starb6",
"Galev_E",
"qso-interp",
],
"sky_seeing": ["0.5", "0.6", "0.7", "0.8", "1.0", "1.3", "3.0"],
"uves_det_cd_name": [
"Blue_346",
"Blue_437",
"Red__520",
"Red__580",
"Red__600",
"Red__860",
"Dicroic1_Blue_346",
"Dicroic2_Blue_346",
"Dicroic1_Red__580",
"Dicroic1_Blue_390",
"Dicroic2_Blue_390",
"Dicroic1_Red__564",
"Dicroic2_Blue_437",
"Dicroic2_red__760",
"Dicroic2_Red__860",
],
"uves_slit_width": [
"0.3",
"0.4",
"0.5",
"0.6",
"0.7",
"0.8",
"0.9",
"1.0",
"1.1",
"1.2",
"1.5",
"1.8",
"2.1",
"2.4",
"2.7",
"3.0",
"5.0",
"10.0",
],
"uves_ccd_binning": ["1x1", "1x1v", "2x2", "2x1", "3x2"],
"giraffe_sky_sampling_mode": ["MEDUSA", "IFU052", "ARGUS052", "ARGUS030",],
"giraffe_slicer": [
"LR01",
"LR02",
"LR03",
"LR04",
"LR05",
"LR06",
"LR07",
"LR08",
"HR01",
"HR02",
"HR03",
"HR04",
"HR05A",
"HR05B",
"HR06",
"HR07A",
"HR07B",
"HR08",
"HR09A",
"HR09B",
"HR10",
"HR11",
"HR12",
"HR13",
"HR14A",
"HR14B",
"HR15",
"HR15n",
"HR16",
"HR17A",
"HR17B",
"HR17B",
"HR18",
"HR19A",
"HR19B",
"HR20A",
"HR20B",
"HR21",
"HR22A",
"HR22B",
],
"giraffe_ccd_mode": ["standard", "fast", "slow"],
"xshooter_uvb_slitwidth": ["0.5", "0.8", "1.0", "1.3", "1.6", "5.0"],
"xshooter_vis_slitwidth": ["0.4", "0.7", "0.9", "1.2", "1.5", "5.0"],
"xshooter_nir_slitwidth": ["0.4", "0.6", "0.9", "1.2", "1.5", "5.0"],
"xshooter_uvb_ccd_binning": [
"high1x1slow",
"high1x2slow",
"high2x2slow",
"low1x1fast",
"low1x2fast",
"low2x2fast",
],
"xshooter_vis_ccd_binning": [
"high1x1slow",
"high1x2slow",
"high2x2slow",
"low1x1fast",
"low1x2fast",
"low2x2fast",
],
"muse_mode": [
"WFM_NONAO_N", # Wide Field Mode without AO, nominal wavelength range
"WFM_NONAO_E", # Wide Field Mode without AO, extended wavelength range
"WFM_AO_N", # Wide Field Mode with AO, nominal wavelength range
"WFM_AO_E", # Wide Field Mode with AO, extended wavelength range
"NFM_AO_N",
], # Narrow Field Mode with AO, nominal wavelength range
"muse_spatial_binning": ["1", "2", "3", "4", "5", "10", "30", "60", "100"],
"muse_spectra_binning": [
"1",
"2",
"3",
"4",
"5",
"10",
"20",
"30",
"40",
"50",
"100",
"200",
"400",
"800",
"1600",
"3200",
],
}
lco_options = {
"template": ["flat", "O5V", "B0V", "A0V", "F0V", "G0V", "K0V", "M0V"],
"tempfilter": ["u", "g", "r", "i", "z"],
"telescope": ["MAGELLAN1", "MAGELLAN2"],
"MAGELLAN1_instrument": ["IMACS", "MAGE"],
"MAGELLAN2_instrument": ["LDSS3", "MIKE"],
"IMACS_mode": [
"F2_150_11",
"F2_200_15",
"F2_300_17",
"F2_300_26",
"F4_150-3_3.4",
"F4_300-4_6.0",
"F4_600-8_9.3",
"F4_600-13_14.0",
"F4_1200-17_19.0",
"F4_1200-27_27.0",
"F4_1200-27_33.5",
],
"MAGE_mode": ["ECHELLETTE"],
"MIKE_mode": ["BLUE", "RED"],
"LDSS3_mode": ["VPHALL", "VPHBLUE", "VPHRED"],
"binspat": ["1", "2", "3", "4", "5", "6", "7", "8"],
"binspec": ["1", "2", "3", "4", "5", "6", "7", "8"],
"nmoon": [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
],
}
class Sig2NoiseQuery:
"""
Base class for ETC queries
"""
def __init__(self):
pass
def query_s2n(self) -> None:
pass
class Sig2NoiseWMKO(Sig2NoiseQuery):
"""
Superclass for WMKO ETC Queries
:param str instrument: Keck instrument. Must be "DEIMOS", "LRIS", "HIRES", or "ESI"
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param float airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
instrument: str,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseQuery.__init__(self)
if instrument not in wmko_options["instrument"]:
raise KeyError(f"{instrument} not one of {wmko_options['instrument']}")
if magtype not in wmko_options["mag type"]:
raise KeyError(f"{magtype} not one of {wmko_options['mag type']}")
if band not in wmko_options["filter"]:
raise KeyError(f"{band} not one of {wmko_options['filter']}")
if template not in wmko_options["template"]:
raise KeyError(f"{template} not one of {wmko_options['template']}")
self.instrument = instrument
self.mag = mag
self.magtype = magtype
self.filter = band
self.template = template
self.exptime = exptime
self.airmass = airmass
self.seeing = seeing
self.redshift = redshift
def query_s2n(self) -> None:
"""
No generic S/N query, see specific instrument subclasses
:return:
"""
raise NotImplementedError(
"No generic S/N query, see specific instrument children classes"
)
class Sig2NoiseDEIMOS(Sig2NoiseWMKO):
"""
Keck/DEIMOS S/N Query (http://etc.ucolick.org/web_s2n/deimos)
:param str grating: DEIMOS grating. Must be one of "600Z", "900Z", "1200G", or "1200B".
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param str cwave: Central wavelength of grating. Must be one of "5000", "6000", "7000", or "8000"
:param str slitwidth: Width of slit in arcseconds. Must be "0.75", "1.0", or "1.5"
:param str binning: spatial x spectral binning. "1x1" is the only option.
:param flaot airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
grating: str,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
cwave: str = "7000",
slitwidth: str = "0.75",
binning: str = "1x1",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseWMKO.__init__(
self,
"deimos",
exptime,
mag,
template,
magtype,
band,
airmass,
seeing,
redshift,
)
if grating not in wmko_options["grating (DEIMOS)"]:
raise KeyError(f"{grating} not one of {wmko_options['grating (DEIMOS)']}")
if binning not in wmko_options["binning (DEIMOS)"]:
raise KeyError(f"{binning} not one of {wmko_options['binning (DEIMOS)']}")
if slitwidth not in wmko_options["slitwidth (DEIMOS)"]:
raise KeyError(
f"{slitwidth} not one of {wmko_options['slitwidth (DEIMOS)']}"
)
if cwave not in wmko_options["central wavelength (DEIMOS)"]:
raise KeyError(
f"{cwave} not one of {wmko_options['central wavelength (DEIMOS)']}"
)
self.grating = grating
self.binning = binning
self.slitwidth = slitwidth
self.cwave = cwave
def query_s2n(self):
"""
Query the DEIMOS ETC (http://etc.ucolick.org/web_s2n/deimos)
:return:
"""
url = "http://etc.ucolick.org/web_s2n/deimos"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form["grating"] = self.grating
form["cwave"] = self.cwave
form["slitwidth"] = self.slitwidth
form["binning"] = self.binning
form["exptime"] = str(self.exptime)
form["mag"] = str(self.mag)
form["ffilter"] = self.filter
if self.magtype.lower() == "vega":
form["mtype"] = "1"
elif self.magtype.lower() == "ab":
form["mtype"] = "2"
form["seeing"] = str(self.seeing)
form["template"] = self.template
form["airmass"] = str(self.airmass)
form["redshift"] = str(self.redshift)
data = browser.submit_selected().json()
snr = np.array(data["s2n"]).T
return snr
class Sig2NoiseLRIS(Sig2NoiseWMKO):
"""
Keck/LRIS S/N Query (http://etc.ucolick.org/web_s2n/lris)
:param str grating: LRIS red arm grating.
Must be one of "600/7500", "600/10000", "1200/9000", "400/8500", or "831/8200".
:param str grism: LRIS blue arm grism. Must be one of "B300" or "B600".
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param str dichroic: LRIS dichroic separating the red and blue arms. "D560" is the only option currently.
:param str slitwidth: Width of slit in arcseconds. Must be one of "0.7", "1.0", or "1.5"
:param str binning: spatial x spectral binning. Must be one of "1x1", "2x1", "2x2", or "3x1"
:param float airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
grating: str,
grism: str,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
dichroic: str = "D560",
slitwidth: str = "0.7",
binning: str = "1x1",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseWMKO.__init__(
self,
"lris",
exptime,
mag,
template,
magtype,
band,
airmass,
seeing,
redshift,
)
if grating not in wmko_options["grating (LRIS)"]:
raise KeyError(f"{grating} not one of {wmko_options['grating (LRIS)']}")
if grism not in wmko_options["grism (LRIS)"]:
raise KeyError(f"{grism} not one of {wmko_options['grism (LRIS)']}")
if binning not in wmko_options["binning (LRIS)"]:
raise KeyError(f"{binning} not one of {wmko_options['binning (LRIS)']}")
if slitwidth not in wmko_options["slitwidth (LRIS)"]:
raise KeyError(f"{slitwidth} not one of {wmko_options['slitwidth (LRIS)']}")
if dichroic not in wmko_options["dichroic (LRIS)"]:
raise KeyError(f"{dichroic} not one of {wmko_options['dichroic (LRIS)']}")
self.grating = grating
self.grism = grism
self.binning = binning
self.slitwidth = slitwidth
self.dichroic = dichroic
def query_s2n(self):
"""
Query the LRIS ETC (http://etc.ucolick.org/web_s2n/lris)
:return:
"""
url = "http://etc.ucolick.org/web_s2n/lris"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form["grating"] = self.grating
form["grism"] = self.grism
form["dichroic"] = self.dichroic
form["slitwidth"] = self.slitwidth
form["binning"] = self.binning
form["exptime"] = str(self.exptime)
form["mag"] = str(self.mag)
form["ffilter"] = self.filter
if self.magtype.lower() == "vega":
form["mtype"] = "1"
elif self.magtype.lower() == "ab":
form["mtype"] = "2"
form["seeing"] = str(self.seeing)
form["template"] = self.template
form["airmass"] = str(self.airmass)
form["redshift"] = str(self.redshift)
data = browser.submit_selected().json()
snr = np.array(data["s2n"]).T
return snr
class Sig2NoiseESI(Sig2NoiseWMKO):
"""
Keck/ESI S/N Query (http://etc.ucolick.org/web_s2n/esi)
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param str slitwidth: Width of slit in arcseconds. Must be one of "0.75", "0.3", "0.5", or "1.0"
:param str binning: spatial x spectral binning. Must be one of "1x1", "2x1", "2x2", or "3x1"
:param float airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
slitwidth: str = "0.75",
binning: str = "1x1",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseWMKO.__init__(
self,
"lris",
exptime,
mag,
template,
magtype,
band,
airmass,
seeing,
redshift,
)
if binning not in wmko_options["binning (ESI)"]:
raise KeyError(f"{binning} not one of {wmko_options['binning (ESI)']}")
if slitwidth not in wmko_options["slitwidth (ESI)"]:
raise KeyError(f"{slitwidth} not one of {wmko_options['slitwidth (ESI)']}")
self.binning = binning
self.slitwidth = slitwidth
def query_s2n(self):
"""
Query the ESI ETC (http://etc.ucolick.org/web_s2n/esi)
:return:
"""
url = "http://etc.ucolick.org/web_s2n/esi"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form["slitwidth"] = self.slitwidth
form["binning"] = self.binning
form["exptime"] = str(self.exptime)
form["mag"] = str(self.mag)
form["ffilter"] = self.filter
if self.magtype.lower() == "vega":
form["mtype"] = "1"
elif self.magtype.lower() == "ab":
form["mtype"] = "2"
form["seeing"] = str(self.seeing)
form["template"] = self.template
form["airmass"] = str(self.airmass)
form["redshift"] = str(self.redshift)
data = browser.submit_selected().json()
snr = np.array(data["s2n"]).T
return snr
class Sig2NoiseHIRES(Sig2NoiseWMKO):
"""
Keck/HIRES S/N Query (http://etc.ucolick.org/web_s2n/hires)
:param str slitwidth: HIRES Decker. Must be "C5" (1.15"), "E4" (0.40"), "B2" (0.57"),
"B5" (0.86"), "E5" (0.80"), or "D3" (1.72")
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str template: Spectral template. For valid options see s2n.wmko_options['template'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str band: Magnitude band. For valid options see s2n.wmko_options['filter'].
:param str binning: spatial x spectral binning. Must be one of "1x1", "2x1", "2x2", or "3x1".
:param float airmass: Airmass of observation
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float redshift: Redshift of the target
"""
def __init__(
self,
slitwidth: str,
exptime: float,
mag: float,
template: str,
magtype: str = "Vega",
band: str = "Cousins_I.dat",
binning: str = "1x1",
airmass: float = 1.1,
seeing: float = 0.75,
redshift: float = 0,
):
Sig2NoiseWMKO.__init__(
self,
"hires",
exptime,
mag,
template,
magtype,
band,
airmass,
seeing,
redshift,
)
if binning not in wmko_options["binning (HIRES)"]:
raise KeyError(f"{binning} not one of {wmko_options['binning (HIRES)']}")
if slitwidth not in wmko_options["slitwidth (HIRES)"]:
raise KeyError(
f"{slitwidth} not one of {wmko_options['slitwidth (HIRES)']}"
)
self.binning = binning
self.slitwidth = slitwidth
def query_s2n(self):
"""
Query the HIRES ETC (http://etc.ucolick.org/web_s2n/hires)
:return:
"""
url = "http://etc.ucolick.org/web_s2n/hires"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form["slitwidth"] = self.slitwidth
form["binning"] = self.binning
form["exptime"] = str(self.exptime)
form["mag"] = str(self.mag)
form["ffilter"] = self.filter
if self.magtype.lower() == "vega":
form["mtype"] = "1"
elif self.magtype.lower() == "ab":
form["mtype"] = "2"
form["seeing"] = str(self.seeing)
form["template"] = self.template
form["airmass"] = str(self.airmass)
form["redshift"] = str(self.redshift)
data = browser.submit_selected().json()
snr = np.array(data["s2n"]).T
return snr
class Sig2NoiseHectoBinoSpec(Sig2NoiseQuery):
"""
MMT/Hectospec and MMT/Binospec S/N Query (http://hopper.si.edu/etc-cgi/TEST/sao-etc)
:param str inst_mode: Instrument and mode.
One of: "BINOSPEC_1000", "BINOSPEC_270", "BINOSPEC_600", "HECTOSPEC_270", or "HECTOSPEC_600"
:param float exptime: Exposure time in seconds
:param float mag: AB Magnitude of source
:param str band: Magnitude band. One of "r_filt", "g_filt", or "i_filt"
:param str template: Spectral template. For valid options see s2n.mmt_options['template'].
:param float seeing: Seeing (FWHM) of observation in arcseconds
:param float airmass: Airmass of observation
:param float moonage: Moon Phase (days since new moon)
:param str aptype: Aperture shape. Must be one of "Round", "Square", or "Rectangular".
:param float apwidth: Width of aperture in arcseconds
"""
def __init__(
self,
inst_mode: str,
exptime: float,
mag: float,
band: str = "g_filt",
template: str = "K0V",
seeing: float = 0.75,
airmass: float = 1.1,
moonage: float = 0.0,
aptype: str = "Round",
apwidth: float = 1.0,
):
Sig2NoiseQuery.__init__(self)
if inst_mode not in mmt_options["inst_mode"]:
raise KeyError(f"{inst_mode} not one of {mmt_options['inst_mode']}")
self.inst_mode = inst_mode
self.exptime = exptime
self.mag = mag
if band not in mmt_options["filter"]:
raise KeyError(f"{band} not one of {mmt_options['filter']}")
self.band = band
if template not in mmt_options["template"]:
raise KeyError(f"{template} not one of {mmt_options['template']}")
self.template = template
self.seeing = seeing
self.airmass = airmass
self.moonage = moonage
if aptype not in mmt_options["aptype"]:
raise KeyError(f"{aptype} not one of {mmt_options['aptype']}")
self.aptype = aptype
self.apwidth = apwidth
def query_s2n(self):
"""
Query the Hectospec/Binospec ETC (http://hopper.si.edu/etc-cgi/TEST/sao-etc)
:return:
"""
url = "http://hopper.si.edu/etc-cgi/TEST/sao-etc"
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form.new_control(type="select", name="instmode", value="")
form.new_control(type="select", name="objspec_", value="")
form.new_control(type="select", name="objspec__", value="")
form["instmode"] = self.inst_mode
form["exptime"] = self.exptime
form["ABmag"] = self.mag
form["bandfilter"] = self.band
form["objspec_"] = "Stars"
form["objspec__"] = self.template
form["objspec"] = f"Stars/{self.template}.tab"
form["srcext"] = 0.0
form["seeing"] = self.seeing
form["airmass"] = self.airmass
form["moonage"] = self.moonage
form["aptype"] = self.aptype
form["apwidth"] = self.apwidth
data = browser.submit_selected()
snr_text = data.text.split("---")[-1]
snr = pd.DataFrame([row.split("\t") for row in snr_text.split("\n")[1:-1]])
snr.index = snr.pop(0)
snr.drop([1, 2, 3, 4], axis=1, inplace=True)
snr = np.vstack([snr.index.values, snr[5].values]).astype(float)
snr[0] *= 1e4
return snr
class Sig2NoiseVLT(Sig2NoiseQuery):
"""
Superclass for VLT ETC Queries
:param str instrument: VLT instrument. Must be "UVES", "FLAMES-UVES", "FLAMES-GIRAFFE", "X-SHOOTER", or "MUSE"
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str band: Magnitude band. For valid options see s2n.vlt_options['src_target_mag_band (<instrument>)'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str template_type: Type of SED template. For now, only "template_spectrum" is supported.
:param str template: Spectral template. For valid options see s2n.vlt_options['src_target_spec_type'].
:param float redshift: Redshift of the target
:param float airmass: Airmass of observation
:param float moon_phase: Moon Phase between 0.0 (new) and 1.0 (full)
:param str seeing: Seeing (FWHM) of observation in arcseconds.
For valid options see s2n.vlt_options['sky_seeing'].
:param \**kwargs: Other entries in the ETC web form to set.
To see what options are available, an inspection of the ETC website is necessary.
"""
# TODO: Implement MARCS stellar template selection
def __init__(
self,
instrument: str,
exptime: float,
mag: float,
band: str = "V",
magtype: str = "Vega",
template_type: str = "template_spectrum",
template: str = "Pickles_K2V",
redshift: float = 0,
airmass: float = 1.1,
moon_phase: float = 0.0,
seeing: str = "0.8",
**kwargs,
):
Sig2NoiseQuery.__init__(self)
if instrument not in vlt_options["instruments"]:
raise KeyError(f"{instrument} not one of {vlt_options['instruments']}")
if not exptime > 0:
raise ValueError("Exposure Time must be positive")
if magtype not in vlt_options["src_target_mag_system"]:
raise KeyError(
f"{magtype} not one of {vlt_options['src_target_mag_system']}"
)
if template_type not in vlt_options["src_target_type"]:
raise KeyError(
f"{template_type} not one of {vlt_options['src_target_type']}"
)
if template not in vlt_options["src_target_spec_type"]:
raise KeyError(
f"{template} not one of {vlt_options['src_target_spec_type']}"
)
if not redshift >= 0:
raise ValueError("Redshift must be positive")
if not airmass >= 1.0:
raise ValueError("Airmass must be > 1.0")
if moon_phase < 0.0 or moon_phase > 1.0:
raise ValueError("moon_phase must be between 0.0 (new) and 1.0 (full)")
if seeing not in vlt_options["sky_seeing"]:
raise KeyError(f"{seeing} not one of {vlt_options['sky_seeing']}")
self.instrument = instrument
self.exptime = exptime
self.mag = mag
self.band = band
self.magtype = magtype
self.template_type = template_type
self.template = template
self.redshift = redshift
self.airmass = airmass
self.moon_phase = moon_phase
self.seeing = seeing
self.kwargs = kwargs
def query_s2n(self) -> None:
"""
No generic S/N query, see specific instrument subclasses
:return:
"""
raise NotImplementedError(
"No generic S/N query, see specific instrument children classes"
)
class Sig2NoiseUVES(Sig2NoiseVLT):
"""
VLT/UVES S/N Query (http://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES++INS.MODE=spectro)
:param str detector: UVES detector setup. For valid options see s2n.vlt_options['uves_det_cd_name'].
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str band: Magnitude band. For valid options see s2n.vlt_options['src_target_mag_band (UVES)'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str template_type: Type of SED template. For now, only "template_spectrum" is supported.
:param str template: Spectral template. For valid options see s2n.vlt_options['src_target_spec_type'].
:param float redshift: Redshift of the target
:param float airmass: Airmass of observation
:param float moon_phase: Moon Phase between 0.0 (new) and 1.0 (full)
:param str seeing: Seeing (FWHM) of observation in arcseconds.
For valid options see s2n.vlt_options['sky_seeing'].
:param str slitwidth: Width of slit in arcseconds. For valid options see s2n.vlt_options['uves_slit_width'].
:param str binning: spatial x spectral binning. For valid options see s2n.vlt_options['uves_ccd_binning'].
:param bool mid_order_only: If True, returns only peak S/N in each order.
Otherwise the S/N at both ends of each order are also included.
:param \**kwargs: Other entries in the ETC web form to set.
To see what options are available, an inspection of the ETC website is necessary.
"""
def __init__(
self,
detector: str,
exptime: float,
mag: float,
band: str = "V",
magtype: str = "Vega",
template_type: str = "template_spectrum",
template: str = "Pickles_K2V",
redshift: float = 0,
airmass: float = 1.1,
moon_phase: float = 0.0,
seeing: str = "0.8",
slitwidth: str = "1.0",
binning: str = "1x1",
mid_order_only: bool = False,
**kwargs,
):
Sig2NoiseVLT.__init__(
self,
"UVES",
exptime,
mag,
band,
magtype,
template_type,
template,
redshift,
airmass,
moon_phase,
seeing,
**kwargs,
)
self.url = "http://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES++INS.MODE=spectro"
if self.band not in vlt_options["src_target_mag_band (UVES)"]:
raise KeyError(
f"{src_target_mag_band} not one of {vlt_options['src_target_mag_band (UVES)']}"
)
if detector not in vlt_options["uves_det_cd_name"]:
raise KeyError(f"{detector} not one of {vlt_options['uves_det_cd_name']}")
if slitwidth not in vlt_options["uves_slit_width"]:
raise KeyError(f"{slitwidth} not one of {vlt_options['uves_slit_width']}")
if binning not in vlt_options["uves_ccd_binning"]:
raise KeyError(f"{binning} not one of {vlt_options['uves_ccd_binning']}")
self.detector = detector
self.slitwidth = slitwidth
self.binning = binning
self.mid_order_only = mid_order_only
self.data = None
def query_s2n(self):
"""
Query the UVES ETC (http://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES++INS.MODE=spectro)
:return:
"""
url = self.url
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form.new_control(type="select", name="SRC.TARGET.MAG.BAND", value="")
form.new_control(type="select", name="SKY.SEEING.ZENITH.V", value="")
form["POSTFILE.FLAG"] = 0
# Source Parameters
form["SRC.TARGET.MAG"] = self.mag
form["SRC.TARGET.MAG.BAND"] = self.band
form["SRC.TARGET.MAG.SYSTEM"] = self.magtype
form["SRC.TARGET.TYPE"] = self.template_type
form["SRC.TARGET.SPEC.TYPE"] = self.template
form["SRC.TARGET.REDSHIFT"] = self.redshift
form["SRC.TARGET.GEOM"] = "seeing_ltd"
# Sky Parameters
form["SKY.AIRMASS"] = self.airmass
form["SKY.MOON.FLI"] = self.moon_phase
form["USR.SEEING.OR.IQ"] = "seeing_given"
form["SKY.SEEING.ZENITH.V"] = self.seeing
# Default Sky Background
form["almanac_time_option"] = "almanac_time_option_ut_time"
form["SKYMODEL.TARGET.ALT"] = 65.38
form["SKYMODEL.MOON.SUN.SEP"] = 0
# Instrument Specifics
form["INS.NAME"] = "UVES"
form["INS.MODE"] = "spectro"
form["INS.PRE_SLIT.FILTER.NAME"] = "ADC"
form["INS.IMAGE_SLICERS.NAME"] = "None"
form["INS.BELOW_SLIT.FILTER.NAME"] = "NONE"
form["INS.DET.SPECTRAL_FORMAT.NAME"] = "STANDARD"
form["INS.DET.CD.NAME"] = self.detector
form["INS.SLIT.FROM_USER.WIDTH.VAL"] = self.slitwidth
form["INS.DET.CCD.BINNING.VAL"] = self.binning
form["INS.DET.EXP.TIME.VAL"] = self.exptime
form["INS.GEN.TABLE.SF.SWITCH.VAL"] = "yes"
form["INS.GEN.TABLE.RES.SWITCH.VAL"] = "yes"
form["INS.GEN.GRAPH.S2N.SWITCH.VAL"] = "yes"
for key in self.kwargs:
form[key] = self.kwargs[key]
self.data = browser.submit_selected()
if self.mid_order_only:
snr = self.parse_etc_mid()
else:
snr = self.parse_etc()
return snr
def parse_etc(self):
mit_tab1 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[1].split("</table>")[0]
)[0]
mit_tab1.columns = mit_tab1.loc[0]
mit_tab1.drop(0, axis=0, inplace=True)
mit_tab2 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[2].split("</table>")[0]
)[0]
mit_tab2.columns = mit_tab2.loc[1]
mit_tab2.drop([0, 1], axis=0, inplace=True)
eev_tab1 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[3].split("</table>")[0]
)[0]
eev_tab1.columns = eev_tab1.loc[0]
eev_tab1.drop(0, axis=0, inplace=True)
eev_tab2 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[4].split("</table>")[0]
)[0]
eev_tab2.columns = eev_tab2.loc[1]
eev_tab2.drop([0, 1], axis=0, inplace=True)
mit_wave_mid = mit_tab1["wav of central column (nm)"]
mit_wave_min = mit_tab1["FSR l Min (nm)"]
mit_wave_max = mit_tab1["FSR l Max (nm)"]
mit_snr_min = mit_tab2["S/N*"].iloc[:, 0]
mit_snr_mid = mit_tab2["S/N*"].iloc[:, 1]
mit_snr_max = mit_tab2["S/N*"].iloc[:, 2]
eev_wave_mid = eev_tab1["wav of central column (nm)"]
eev_wave_min = eev_tab1["FSR l Min (nm)"]
eev_wave_max = eev_tab1["FSR l Max (nm)"]
eev_snr_min = eev_tab2["S/N*"].iloc[:, 0]
eev_snr_mid = eev_tab2["S/N*"].iloc[:, 1]
eev_snr_max = eev_tab2["S/N*"].iloc[:, 2]
mit_wave = pd.concat([mit_wave_min, mit_wave_mid, mit_wave_max])
mit_snr = pd.concat([mit_snr_min, mit_snr_mid, mit_snr_max])
mit_snr.index = mit_wave
mit_snr.sort_index(inplace=True)
mit_snr = mit_snr.groupby(mit_snr.index).max()
eev_wave = pd.concat([eev_wave_min, eev_wave_mid, eev_wave_max])
eev_snr = pd.concat([eev_snr_min, eev_snr_mid, eev_snr_max])
eev_snr.index = eev_wave
eev_snr.sort_index(inplace=True)
eev_snr = eev_snr.groupby(eev_snr.index).max()
uves_snr = pd.concat([eev_snr, mit_snr])
uves_snr = np.vstack([uves_snr.index.values, uves_snr.iloc[:].values]).astype(
float
)
uves_snr[0] *= 10
return uves_snr
def parse_etc_mid(self):
snr_url1 = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[1].split('" TITLE')[0]
)
snr_url2 = (
"https://www.eso.org"
+ self.data.text.split('ASCII DATA INFO: URL="')[2].split('" TITLE')[0]
)
snr_txt1 = requests.post(snr_url1).text
snr_txt2 = requests.post(snr_url2).text
snr1 = pd.DataFrame([row.split("\t") for row in snr_txt1.split("\n")[:-1]])
snr2 = pd.DataFrame([row.split("\t") for row in snr_txt2.split("\n")[:-1]])
uves_snr = pd.concat([snr1, snr2])
uves_snr.index = uves_snr.pop(0)
uves_snr.sort_index(inplace=True)
uves_snr = np.vstack([uves_snr.index.values, uves_snr[1].values]).astype(float)
uves_snr[0] *= 10
return uves_snr
class Sig2NoiseFLAMESUVES(Sig2NoiseVLT):
"""
VLT/FLAMES-UVES S/N Query (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES+INS.MODE=FLAMES)
:param str detector: UVES detector setup. For valid options see s2n.vlt_options['uves_det_cd_name'].
:param float exptime: Exposure time in seconds
:param float mag: Magnitude of source
:param str band: Magnitude band. For valid options see s2n.vlt_options['src_target_mag_band (UVES)'].
:param str magtype: Magnitude System. Either "Vega" or "AB"
:param str template_type: Type of SED template. For now, only "template_spectrum" is supported.
:param str template: Spectral template. For valid options see s2n.vlt_options['src_target_spec_type'].
:param float redshift: Redshift of the target
:param float airmass: Airmass of observation
:param float moon_phase: Moon Phase between 0.0 (new) and 1.0 (full)
:param str seeing: Seeing (FWHM) of observation in arcseconds.
For valid options see s2n.vlt_options['sky_seeing'].
:param bool mid_order_only: If True, returns only peak S/N in each order.
Otherwise the S/N at both ends of each order are also included.
:param \**kwargs: Other entries in the ETC web form to set.
To see what options are available, an inspection of the ETC website is necessary.
"""
def __init__(
self,
detector: str,
exptime: float,
mag: float,
band: str = "V",
magtype: str = "Vega",
template_type: str = "template_spectrum",
template: str = "Pickles_K2V",
redshift: float = 0,
airmass: float = 1.1,
moon_phase: float = 0.0,
seeing: str = "0.8",
mid_order_only: bool = False,
**kwargs,
):
Sig2NoiseVLT.__init__(
self,
"FLAMES-UVES",
exptime,
mag,
band,
magtype,
template_type,
template,
redshift,
airmass,
moon_phase,
seeing,
**kwargs,
)
self.url = "https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES+INS.MODE=FLAMES"
if self.band not in vlt_options["src_target_mag_band (UVES)"]:
raise KeyError(
f"{src_target_mag_band} not one of {vlt_options['src_target_mag_band (UVES)']}"
)
if detector not in vlt_options["uves_det_cd_name"]:
raise KeyError(f"{detector} not one of {vlt_options['uves_det_cd_name']}")
self.detector = detector
self.mid_order_only = mid_order_only
self.data = None
def query_s2n(self):
"""
Query the FLAMES-UVES ETC (https://www.eso.org/observing/etc/bin/gen/form?INS.NAME=UVES+INS.MODE=FLAMES)
:return:
"""
url = self.url
browser = mechanicalsoup.StatefulBrowser()
browser.open(url)
form = browser.select_form()
form.new_control(type="select", name="SRC.TARGET.MAG.BAND", value="")
form.new_control(type="select", name="SKY.SEEING.ZENITH.V", value="")
form["POSTFILE.FLAG"] = 0
# Source Parameters
form["SRC.TARGET.MAG"] = self.mag
form["SRC.TARGET.MAG.BAND"] = self.band
form["SRC.TARGET.MAG.SYSTEM"] = self.magtype
form["SRC.TARGET.TYPE"] = self.template_type
form["SRC.TARGET.SPEC.TYPE"] = self.template
form["SRC.TARGET.REDSHIFT"] = self.redshift
form["SRC.TARGET.GEOM"] = "seeing_ltd"
# Sky Parameters
form["SKY.AIRMASS"] = self.airmass
form["SKY.MOON.FLI"] = self.moon_phase
form["USR.SEEING.OR.IQ"] = "seeing_given"
form["SKY.SEEING.ZENITH.V"] = self.seeing
# Default Sky Background
form["almanac_time_option"] = "almanac_time_option_ut_time"
form["SKYMODEL.TARGET.ALT"] = 65.38
form["SKYMODEL.MOON.SUN.SEP"] = 0
# Instrument Specifics
form["INS.NAME"] = "UVES"
form["INS.MODE"] = "FLAMES"
form["INS.DET.CD.NAME"] = self.detector
form["INS.DET.EXP.TIME.VAL"] = self.exptime
form["INS.GEN.TABLE.SF.SWITCH.VAL"] = "yes"
form["INS.GEN.TABLE.RES.SWITCH.VAL"] = "yes"
form["INS.GEN.GRAPH.S2N.SWITCH.VAL"] = "yes"
for key in self.kwargs:
form[key] = self.kwargs[key]
self.data = browser.submit_selected()
if self.mid_order_only:
snr = self.parse_etc_mid()
else:
snr = self.parse_etc()
return snr
def parse_etc(self):
mit_tab1 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[1].split("</table>")[0]
)[0]
mit_tab1.columns = mit_tab1.loc[0]
mit_tab1.drop(0, axis=0, inplace=True)
mit_tab2 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[2].split("</table>")[0]
)[0]
mit_tab2.columns = mit_tab2.loc[1]
mit_tab2.drop([0, 1], axis=0, inplace=True)
eev_tab1 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[3].split("</table>")[0]
)[0]
eev_tab1.columns = eev_tab1.loc[0]
eev_tab1.drop(0, axis=0, inplace=True)
eev_tab2 = pd.read_html(
'<table class="echelleTable'
+ self.data.text.split('<table class="echelleTable')[4].split("</table>")[0]
)[0]
eev_tab2.columns = eev_tab2.loc[1]
eev_tab2.drop([0, 1], axis=0, inplace=True)
mit_wave_mid = mit_tab1["wav of central column (nm)"]
mit_wave_min = mit_tab1["FSR l Min (nm)"]
mit_wave_max = mit_tab1["FSR l Max (nm)"]
mit_snr_min = mit_tab2["S/N*"].iloc[:, 0]
mit_snr_mid = mit_tab2["S/N*"].iloc[:, 1]
mit_snr_max = mit_tab2["S/N*"].iloc[:, 2]
eev_wave_mid = eev_tab1["wav of central column (nm)"]
eev_wave_min = eev_tab1["FSR l Min (nm)"]
eev_wave_max = eev_tab1["FSR l Max (nm)"]
eev_snr_min = eev_tab2["S/N*"].iloc[:, 0]
eev_snr_mid = eev_tab2["S/N*"].iloc[:, 1]
eev_snr_max = eev_tab2["S/N*"].iloc[:, 2]
mit_wave = pd.concat([mit_wave_min, mit_wave_mid, mit_wave_max])
mit_snr = pd.concat([mit_snr_min, mit_snr_mid, mit_snr_max])
mit_snr.index = mit_wave
mit_snr.sort_index(inplace=True)
mit_snr = mit_snr.groupby(mit_snr.index).max()
eev_wave = pd.concat([eev_wave_min, eev_wave_mid, eev_wave_max])
eev_snr = | pd.concat([eev_snr_min, eev_snr_mid, eev_snr_max]) | pandas.concat |
from bs4 import BeautifulSoup, element as bs4_element
import numpy as np
import pandas as pd
import re
import requests
from typing import Optional
from .readers import parse_oakland_excel
from ..caada_typing import stringlike
from ..caada_errors import HTMLParsingError, HTMLRequestError
from ..caada_logging import logger
##############
# PORT OF LA #
##############
def _convert_la_numbers(val):
# If there happens to be a ',' two characters from the end, it should probably be a decimal point.
val = re.sub(r',\d\d$', '.', val.strip())
# Then just remove the remaining commas plus any percent signs
val = re.sub(r'[,%]', '', val)
try:
return float(val)
except ValueError:
# This will handle empty cells (e.g. that haven't been filled yet) and misformatted cells (e.g. one number was
# "2,406.662.05" - two decimal points)
return np.nan
def get_all_la_port_container_data(index: str = 'datetime') -> pd.DataFrame:
"""Get Port of LA container data for all years from 1995 to present.
Parameters
----------
index
How to index the dataframe. See the `index` parameter in :func:`get_la_port_container_data` for details.
Returns
-------
pandas.DataFrame
A dataframe containing data for all years. Will be container moves broken down by import vs. export and
empty vs. full.
"""
this_year = pd.Timestamp.now().year
dfs = []
for yr in range(1995, this_year+1):
dfs.append( get_la_port_container_data(yr, index=index) )
return pd.concat(dfs, axis=0)
def get_la_port_container_data(year: int, index: str = 'datetime') -> pd.DataFrame:
"""Get Port of LA container data for a given year, return as a dataframe.
Parameters
----------
year
The year to get data for. The Port of LA keeps monthly data for 1995 and on; years before 1995 will likely
fail.
index
How to index the returned dataframe. `"datetime"` (the default) will create a datetime index; it will also
remove the year total rows. `"table"` will keep the table's original index (as strings) and will retain the
year summary rows.
Returns
-------
pd.DataFrame
A dataframe containing the data for the requested year. Will be container moves broken down by import vs. export
and empty vs. full.
"""
if index == 'datetime':
parse_year = year
elif index == 'table':
parse_year = None
else:
raise ValueError('"{}" is not one of the allowed values for index'.format(index))
r = requests.get('https://www.portoflosangeles.org/business/statistics/container-statistics/historical-teu-statistics-{:04d}'.format(year))
if r.status_code == 200:
return _parse_la_port_html(r.content, parse_year)
elif r.status_code == 404:
# Page not found, usually because you asked for a year that isn't online
raise HTMLRequestError('Failed to retrieve the Port of LA page for {}. Their server may be down, or the '
'year you requested may be out of range. Years before 1995 are not available.'.format(year))
else:
raise HTMLRequestError('Failed to retrieve the Port of LA page for {}. HTML response code was {}'.format(year, r.status_code))
def _parse_la_port_html(html: stringlike, year: Optional[int] = None) -> pd.DataFrame:
"""Parse LA port container data from HTML into a dataframe.
Parameters
----------
html
The raw HTML from the Port of LA "historical-teu-statistics" page.
year
Which year the page is for. If given, the returned dataframe will have a datetime index, and the year summary
rows are removed. If not given, the dataframe uses the original table row labels (as strings) and retains the
year summary rows.
Returns
-------
pd.DataFrame
The dataframe with the container data.
"""
soup = BeautifulSoup(html, 'html.parser')
# Should be exactly one table on the page - find it
table = soup('table')
if len(table) != 1:
raise HTMLParsingError('Expected exactly one table, got {}'.format(len(table)))
else:
table = table[0]
# Get the rows of the table - the first will give us the header, the rest will give
# us the data. Read it into a dict that can be easily converted to a dataframe
tr_tags = table('tr')
header = [_stdize_la_table_header(tag.text) for tag in tr_tags[0]('td')]
index = []
df_dict = {k: [] for k in header[1:]}
for row in tr_tags[1:]:
row_data = [tag.text.strip() if i == 0 else _convert_la_numbers(tag.text) for i, tag in enumerate(row('td'))]
index.append(row_data[0])
for i, k in enumerate(header[1:], start=1):
df_dict[k].append(row_data[i])
df = pd.DataFrame(df_dict, index=index)
# Lastly, convert the index to a datetime index if we were given the year. We'll check that the dataframe's first
# 12 indices are the months
if year is not None:
start_date = '{:04d}-01'.format(year)
end_date = '{:04d}-12'.format(year)
date_index = pd.date_range(start_date, end_date, freq='MS')
if index[:12] != date_index.strftime('%B').tolist():
raise HTMLParsingError('First twelve rows of the table did not have month names as index')
df = df.iloc[:12, :]
df.index = date_index
_check_la_sums(df, year)
return df
def _stdize_la_table_header(column_header: str):
parts = column_header.strip().split()
if len(parts) == 0:
return ''
elif len(parts) == 2:
if re.search(r'load', parts[0], re.IGNORECASE):
parts[0] = 'Full'
elif re.search(r'empty', parts[0], re.IGNORECASE):
parts[0] = 'Empty'
elif re.search(r'total', parts[0], re.IGNORECASE):
parts[0] = 'Total'
else:
raise HTMLParsingError('Unknown LA container table header: {}'.format(parts[0]))
if re.search(r'import', parts[1], re.IGNORECASE):
parts[1] = 'Imports'
elif re.search(r'export', parts[1], re.IGNORECASE):
parts[1] = 'Exports'
elif re.search(r'teu', parts[1], re.IGNORECASE):
parts[1] = 'TEUs'
else:
raise HTMLParsingError('Unknown LA container table header: {}'.format(parts[0]))
return ' '.join(parts)
elif len(parts) == 3 and re.search(r'change', column_header, re.IGNORECASE):
return 'Prior Year Change (%)'
else:
raise HTMLParsingError('Unexpected LA container table header: {}'.format(column_header))
def _check_la_sums(la_df: pd.DataFrame, year):
def check(total_col, col1, col2):
return (la_df[total_col] - (la_df[col1] + la_df[col2])).abs().max() <= 1
# The 1995 LA container table erroneously put "Total TEUs" in the header but then meant "Total Exports". So check
# their sums for parity, and fix that year
if year == 1995:
logger.warning('1995 LA container table has known issue of mislabeled Total Exports and Total TEUs - fixing')
_fix_la_1995(la_df)
if not check('Total Imports', 'Full Imports', 'Empty Imports'):
logger.warning('%d LA container table - total imports do not match sum of full and empty imports', year)
if not check('Total Exports', 'Full Exports', 'Empty Exports'):
logger.warning('%d LA container table - total exports do not match sum of full and empty exports', year)
if not check('Total TEUs', 'Total Imports', 'Total Exports'):
logger.warning('%d LA container table - total TEUs do not match sum of imports and exports', year)
def _fix_la_1995(la_df: pd.DataFrame):
la_df.rename(columns={'Total TEUs': 'Total Exports'}, inplace=True)
la_df['Total TEUs'] = la_df['Total Exports'] + la_df['Total Imports']
###################
# PORT OF OAKLAND #
###################
def get_oakland_container_data(url: str = 'https://www.oaklandseaport.com/performance/facts-figures/') -> pd.DataFrame:
"""Download the full record of Oakland container data
Parameters
----------
url
The URL to retrieve from. Usually does not need to change.
Returns
-------
pandas.DataFrame
A dataframe containing the historical data (extracted from their Excel sheet) and this years data (extracted
directly from the web page).
Notes
-----
This will actually fetch the data from the Port of Oakland webpage. It is best to fetch this data once and reuse
the returned dataframe, rather than requesting it repeatedly.
"""
r = requests.get(url)
if r.status_code != 200:
raise HTMLRequestError('Failed to retrieve Oakland container web page (URL = {})'.format(url))
soup = BeautifulSoup(r.content, features='html.parser')
# First try to find the link to the Excel sheet and download it
xlsx_url = None
for el in soup('a'):
if 'href' in el.attrs and 'xlsx' in el.attrs['href']:
if xlsx_url is None:
xlsx_url = el.attrs['href']
else:
raise HTMLParsingError('Multiple links to Excel files found on Oakland container page')
if xlsx_url is None:
raise HTMLParsingError('No links to Excel files found on Oakland container page')
# The link in the page usually doesn't include the HTTP/HTTPS, so prepend it if needed
if not xlsx_url.startswith('http'):
schema = url.split('//')[0]
xlsx_url = '{}{}'.format(schema, xlsx_url)
r_wb = requests.get(xlsx_url)
if r_wb.status_code != 200:
raise HTMLRequestError('Failed to retrieve Oakland container xlsx file (URL = {})'.format(xlsx_url))
# Parse the Excel file contents first, then append the most recent data from the web page
df = parse_oakland_excel(r_wb.content, is_contents=True)
df_recent = _parse_oakland_page(r.content)
df = pd.concat([df, df_recent], axis=0)
df['Total Imports'] = df['Full Imports'] + df['Empty Imports']
df['Total Exports'] = df['Full Exports'] + df['Empty Exports']
df['Total TEUs'] = df['Total Exports'] + df['Total Imports']
return df
def _parse_oakland_page(content: bytes):
"""Parse the Oakland facts & figures page to extract a dataframe of container moves"""
soup = BeautifulSoup(content, features='html.parser')
# Try to find the year in the page headings. Usually the first <h2> element
# is something like: <h2 style="text-align: center;">2020 Container Activity (TEUs)</h2>
year = None
for heading in soup.find_all('h2'):
m = re.search(r'\d{4}', heading.text)
if m:
year = int(m.group())
break
if year is None:
raise HTMLParsingError('Could not identify year in Oakland port data page')
charts = soup.find_all('div', attrs={'class': 'chart-wrapper'})
chart_data = dict()
# The last chart is a summary of past years' total TEUs so we skip it
for c in charts[:-1]:
category, months, teus = _parse_one_oakland_chart(c)
dtind = pd.DatetimeIndex([pd.Timestamp(year, m, 1) for m in months])
chart_data[category] = pd.Series(teus, index=dtind)
# Compute the total for convenience and make sure the totals are in order
return pd.DataFrame(chart_data)
# col_order = df.columns.tolist()
# df['Total Imports'] = df['Full Imports'] + df['Empty Imports']
# df['Total Exports'] = df['Full Exports'] + df['Empty Exports']
# df['Total TEUs'] = df['Total Imports'] + df['Total Exports']
# col_order.append('Total Imports')
# col_order.append('Total Exports')
# col_order.append('Total TEUs')
# return df[col_order]
def _parse_one_oakland_chart(chart: bs4_element):
"""Parse one of the charts on the Oakland page"""
title_el = chart.find('div', attrs={'class': 'chart-vertical-title'})
title = title_el.text
data_els = [el for el in chart.find_all('li') if 'title' in el.attrs]
months = []
teus = []
for el in data_els:
month = | pd.to_datetime(el.attrs['title'], format='%b') | pandas.to_datetime |
# coding: utf-8
# ***Visualization(Exploratory data analysis) - Phase 1 ***
# * ***Major questions to answer(A/B Testing):***
# 1. Does the installment amount affect loan status ?
# 2. Does the installment grade affect loan status ?
# 3. Which grade has highest default rate ?
# 4. Does annual income/home-ownership affect default rate ?
# 5. Which state has highest default rate ?
# * ***Text Analysis - Phase 2 ***
# 6. Is it that a people with a certain empoyee title are taking up more loans as compared to others ?
# 7. Does a specific purpose affect loan status ?
# * ***Model Building - Phase 3***
# 8. Trying various models and comparing them
# ***Visualization(Exploratory data analysis) - Phase 1 ***
# In[50]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Importing the libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
# Reading the dataset
data = | pd.read_csv("../input/loan.csv") | pandas.read_csv |
import pandas as pd
import sys
import pytz
import gc
import time
input_dir = '../input'
work_dir = '../work'
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32'
}
#nrows=10000
nrows=None
train_df = | pd.read_csv(input_dir+"/train.csv", dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'], nrows=nrows) | pandas.read_csv |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
result = s.interpolate(method="slinear")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="slinear", downcast="infer")
tm.assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="nearest")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="nearest", downcast="infer")
tm.assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="zero")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="zero", downcast="infer")
tm.assert_series_equal(result, expected)
# quadratic
# GH #15662.
expected = Series([1, 3.0, 6.823529, 12.0, 18.058824, 25.0])
result = s.interpolate(method="quadratic")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="quadratic", downcast="infer")
tm.assert_series_equal(result, expected)
# cubic
expected = Series([1.0, 3.0, 6.8, 12.0, 18.2, 25.0])
result = s.interpolate(method="cubic")
tm.assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("limit", [-1, 0])
def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit):
# GH 9217: make sure limit is greater than zero.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
with pytest.raises(ValueError, match="Limit must be greater than 0"):
s.interpolate(limit=limit, method=method, **kwargs)
def test_interpolate_invalid_float_limit(self, nontemporal_method):
# GH 9217: make sure limit is an integer.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
limit = 2.0
with pytest.raises(ValueError, match="Limit must be an integer"):
s.interpolate(limit=limit, method=method, **kwargs)
@pytest.mark.parametrize("invalid_method", [None, "nonexistent_method"])
def test_interp_invalid_method(self, invalid_method):
s = Series([1, 3, np.nan, 12, np.nan, 25])
msg = f"method must be one of.* Got '{invalid_method}' instead"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method)
# When an invalid method and invalid limit (such as -1) are
# provided, the error message reflects the invalid method.
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method, limit=-1)
def test_interp_invalid_method_and_value(self):
# GH#36624
ser = Series([1, 3, np.nan, 12, np.nan, 25])
msg = "Cannot pass both fill_value and method"
with pytest.raises(ValueError, match=msg):
ser.interpolate(fill_value=3, method="pad")
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="forward")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="linear", limit=2, limit_direction="FORWARD")
tm.assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1.0, 3.0, np.nan, np.nan, np.nan, 11.0, np.nan])
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="forward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, np.nan])
result = s.interpolate(method="linear", limit_direction="backward")
tm.assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
r"Invalid limit_direction: expecting one of \['forward', "
r"'backward', 'both'\], got 'abc'"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit=2, limit_direction="abc")
# raises an error even if no limit is specified.
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_direction="abc")
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 3.0, 4.0, 5.0, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(method="linear", limit_area="inside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]
)
result = s.interpolate(method="linear", limit_area="inside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="inside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0])
result = s.interpolate(method="linear", limit_area="outside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]
)
result = s.interpolate(method="linear", limit_area="outside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="backward"
)
tm.assert_series_equal(result, expected)
# raises an error even if limit type is wrong.
msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_area="abc")
@pytest.mark.parametrize(
"method, limit_direction, expected",
[
("pad", "backward", "forward"),
("ffill", "backward", "forward"),
("backfill", "forward", "backward"),
("bfill", "forward", "backward"),
("pad", "both", "forward"),
("ffill", "both", "forward"),
("backfill", "both", "backward"),
("bfill", "both", "backward"),
],
)
def test_interp_limit_direction_raises(self, method, limit_direction, expected):
# https://github.com/pandas-dev/pandas/pull/34746
s = Series([1, 2, 3])
msg = f"`limit_direction` must be '{expected}' for method `{method}`"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method, limit_direction=limit_direction)
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, np.nan, 7.0, 9.0, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 3.0, 5.0, np.nan, 9.0, 11.0])
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan])
expected = Series([1.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0])
result = s.interpolate(method="linear", limit=2, limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series(
[1.0, 3.0, 4.0, np.nan, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0]
)
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, np.nan])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, 9.0])
result = s.interpolate(method="linear", limit=2, limit_direction="both")
tm.assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5.0, 7.0, 7.0, np.nan])
result = s.interpolate(method="linear", limit=1, limit_direction="forward")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 5.0, 5.0, 7.0, np.nan, np.nan])
result = s.interpolate(method="linear", limit=1, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 5.0, 5.0, 7.0, 7.0, np.nan])
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_all_good(self):
s = Series([1, 2, 3])
result = s.interpolate(method="polynomial", order=1)
tm.assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
)
def test_interp_multiIndex(self, check_scipy):
idx = MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
tm.assert_series_equal(result, expected)
msg = "Only `method=linear` interpolation is supported on MultiIndexes"
if check_scipy:
with pytest.raises(ValueError, match=msg):
s.interpolate(method="polynomial", order=1)
@td.skip_if_no_scipy
def test_interp_nonmono_raise(self):
s = Series([1, np.nan, 3], index=[0, 2, 1])
msg = "krogh interpolation requires that the index be monotonic"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="krogh")
@td.skip_if_no_scipy
@pytest.mark.parametrize("method", ["nearest", "pad"])
def test_interp_datetime64(self, method, tz_naive_fixture):
df = Series(
[1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)
)
result = df.interpolate(method=method)
expected = Series(
[1.0, 1.0, 3.0],
index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture),
)
tm.assert_series_equal(result, expected)
def test_interp_pad_datetime64tz_values(self):
# GH#27628 missing.interpolate_2d should handle datetimetz values
dti = pd.date_range("2015-04-05", periods=3, tz="US/Central")
ser = Series(dti)
ser[1] = pd.NaT
result = ser.interpolate(method="pad")
expected = Series(dti)
expected[1] = expected[0]
tm.assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = Series([1.0, 2.0, 3.0])
result = s.interpolate(limit=1)
expected = s
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize("method", ["polynomial", "spline"])
def test_no_order(self, method):
# see GH-10633, GH-24014
s = Series([0, 1, np.nan, 3])
msg = "You must specify the order of the spline or polynomial"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method)
@td.skip_if_no_scipy
@pytest.mark.parametrize("order", [-1, -1.0, 0, 0.0, np.nan])
def test_interpolate_spline_invalid_order(self, order):
s = Series([0, 1, np.nan, 3])
msg = "order needs to be specified and greater than 0"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="spline", order=order)
@td.skip_if_no_scipy
def test_spline(self):
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method="spline", order=1)
expected = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_spline_extrapolate(self):
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method="spline", order=1, ext=3)
expected3 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0])
tm.assert_series_equal(result3, expected3)
result1 = s.interpolate(method="spline", order=1, ext=0)
expected1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
tm.assert_series_equal(result1, expected1)
@td.skip_if_no_scipy
def test_spline_smooth(self):
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (
s.interpolate(method="spline", order=3, s=0)[5]
!= s.interpolate(method="spline", order=3)[5]
)
@td.skip_if_no_scipy
def test_spline_interpolation(self):
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method="spline", order=1)
expected1 = s.interpolate(method="spline", order=1)
tm.assert_series_equal(result1, expected1)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method="time")
expected = Series([1.0, 2.0, 3.0], index=pd.to_timedelta([1, 2, 3]))
tm.assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method="time")
expected = Series([1.0, 1.666667, 3.0], index=pd.to_timedelta([1, 2, 4]))
tm.assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# GH#1646
rng = date_range("1/1/2000", "1/20/2000", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
ts[::2] = np.nan
result = ts.interpolate(method="values")
exp = ts.interpolate()
tm.assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range("1/1/2012", periods=4, freq="12D")
ts = Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method="time")
index = pd.date_range("1/1/2012", periods=4, freq="12H")
ts = Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method="time")
tm.assert_numpy_array_equal(result.values, exp.values)
@pytest.mark.parametrize(
"ind",
[
["a", "b", "c", "d"],
pd.period_range(start="2019-01-01", periods=4),
pd.interval_range(start=0, end=4),
],
)
def test_interp_non_timedelta_index(self, interp_methods_ind, ind):
# gh 21662
df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
method, kwargs = interp_methods_ind
if method == "pchip":
pytest.importorskip("scipy")
if method == "linear":
result = df[0].interpolate(**kwargs)
expected = | Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from argcheck import (expect_types,
optional,
preprocess)
from xutils import py_assert
from alphaware.const import INDEX_FACTOR
from alphaware.enums import (FreqType,
OutputDataFormat)
from .input_validation import ensure_pd_df
@expect_types(data=(pd.Series, pd.DataFrame))
def convert_df_format(data, target_format=OutputDataFormat.MULTI_INDEX_DF, col_name='factor',
multi_index=INDEX_FACTOR):
if target_format == OutputDataFormat.MULTI_INDEX_DF:
tmp = data.stack()
data_ = pd.DataFrame(tmp)
data_.index.names = multi_index.full_index
data_.columns = [col_name]
else:
tmp = data.unstack()
index = tmp.index
columns = tmp.columns.get_level_values(multi_index.sec_index).tolist()
data_ = pd.DataFrame(tmp.values, index=index, columns=columns)
return data_
@expect_types(df=(pd.Series, pd.DataFrame))
def top(df, column=None, n=5):
if isinstance(df, pd.Series):
ret = df.sort_values(ascending=False)[:n]
else:
py_assert(column is not None, "Specify the col name or use pandas Series type of data")
ret = df.sort_values(by=column, ascending=False)[:n]
return ret
@expect_types(data=(pd.DataFrame, pd.Series), freq=optional(FreqType, str))
def group_by_freq(data, freq=FreqType.EOM):
data_ = | pd.DataFrame(data) | pandas.DataFrame |
import json
import pandas as pd
import re
import spacy
from re import search
nlp = spacy.load('mygeoparse/ner/model')
def load_postcode_dataframe():
df_postcode_my = pd.read_csv("mygeoparse/data/postcode_my.csv")
df_postcode_my['Location'] = df_postcode_my['Location'].str.upper()
df_postcode_my['Post_Office'] = df_postcode_my['Post_Office'].str.upper()
df_postcode_my['Postcode'] = df_postcode_my['Postcode'].str.upper()
df_postcode_my['State'] = df_postcode_my['State'].str.upper()
return df_postcode_my
def clean_postcodes(p):
if len(p) > 1:
postal_code = p[1]
elif len(p) == 0:
postal_code = 'N/A'
else:
postal_code = p[0]
return postal_code
def find_missing_postcode(df_cleaned_address_list):
for index, row in df_cleaned_address_list.iterrows():
if row['postcode'] == 'N/A':
street = row['street_name']
pattern = "\d{5}"
p_code = re.findall(pattern, street)
postal_code = clean_postcodes(p_code)
df_cleaned_address_list.loc[index, 'postcode'] = postal_code
return df_cleaned_address_list
def find_missing_state(df_cleaned_address_list):
df_microdb = load_postcode_dataframe()
for index, row in df_cleaned_address_list.iterrows():
if row['state'] == 'N/A':
p_code = row['postcode']
if p_code != 'N/A':
df = df_microdb[df_microdb['Postcode'] == p_code]
lst_state = df['State']
if len(lst_state) != 0:
df_cleaned_address_list.loc[index, 'state'] = lst_state.iloc[0]
return df_cleaned_address_list
def find_missing_city(df_cleaned_address_list):
df_microdb = load_postcode_dataframe()
for index, row in df_cleaned_address_list.iterrows():
if row['city'] == 'N/A':
p_code = row['postcode']
if p_code != 'N/A':
df = df_microdb[df_microdb['Postcode'] == p_code]
lst_city = df['Post_Office']
if len(lst_city) != 0:
df_cleaned_address_list.loc[index, 'city'] = lst_city.iloc[0]
return df_cleaned_address_list
def split_house_address_and_street(address):
address_keywords = ['JALAN', 'TAMAN', 'LORONG', 'KAWASAN', 'PERSIARAN', 'FLAT',
'PANGSAPURI', 'DOMAIN']
index = 0
location = 0
check = False
for word in address:
for key in address_keywords:
if search(key, word):
check = True
break
else:
check = False
if check:
location = index
break
else:
location = 0
check = False
index = index + 1
return location
def remove_city(token, city_list):
found_city = False
for i in city_list:
if i == token:
found_city = True
break
else:
found_city = False
return found_city
def remove_state(token, state_list):
found_state = False
for i in state_list:
if i == token:
found_state = True
break
else:
found_state = False
return found_state
def remove_country(token, country_list):
found_country = False
for i in country_list:
if i == token:
found_country = True
break
else:
found_country = False
return found_country
def remove_postcodes(token, postcode_list):
found_postcode = False
for i in postcode_list:
if i == token:
found_postcode = True
break
else:
found_postcode = False
return found_postcode
def find_missing_address_parts(df):
df_bjobs_new = find_missing_postcode(df)
df_bjobs_new = find_missing_city(df_bjobs_new)
df_bjobs_new = find_missing_state(df_bjobs_new)
# print('Pre-process 2 - Fill in missing values with existing data... Success')
# print('Preprocessing Stage 2... Success')
return df_bjobs_new
def parse_one_address(address):
df_postcode_my = load_postcode_dataframe()
state_list = df_postcode_my['State'].str.upper()
state_list = set(state_list)
state_list = list(state_list)
state_list = [x for x in state_list if pd.notnull(x)]
city_list = df_postcode_my['Post_Office'].str.upper()
city_list = set(city_list)
city_list = list(city_list)
city_list = [x for x in city_list if pd.notnull(x)]
postcode_list = df_postcode_my['Postcode'].str.upper()
postcode_list = set(postcode_list)
postcode_list = list(postcode_list)
postcode_list = [x for x in postcode_list if pd.notnull(x)]
location_list = df_postcode_my['Location'].str.upper()
location_list = list(location_list)
location_list = [x for x in location_list if pd.notnull(x)]
country_list = ['MY']
postcodes_found = []
country_found = []
state_found = []
city_found = []
list_address_full = []
all_address_r_list = []
list_building_name = []
list_house_number = []
country = 'MY'
state = 'N/A'
city = 'N/A'
postcode = 'N/A'
for index in range(len(address) - 1, 0, -1):
if remove_country(address[index], country_list):
country = address[index]
address.remove(address[index])
elif remove_state(address[index], state_list):
state = address[index]
address.remove(address[index])
elif remove_city(address[index], city_list):
city = address[index]
address.remove(address[index])
elif remove_postcodes(address[index], postcode_list):
postcode = address[index]
address.remove(address[index])
country_found.append(country)
state_found.append(state)
city_found.append(city)
postcodes_found.append(postcode)
all_address_r_list.append(address)
# print("===============================")
# print("Before split street and house address:", address)
street_index = split_house_address_and_street(address)
if street_index > 0:
house_address = str(address[:street_index])[1:-1].replace("'", "")
street_name = str(address[street_index:])[1:-1].replace("'", "")
else:
house_address = 'N/A'
street_name = str(address[street_index:])[1:-1].replace("'", "")
# print("After Split:")
# print("House Address: ", house_address)
# print("Street Name: ", street_name)
if house_address != 'N/A':
doc = nlp(house_address)
building_name = ''
house_number = ''
# print('\nOutput: ', [(ent.text, ent.label_) for ent in doc.ents])
for ent in doc.ents:
if ent.label_ == 'HOUSE NUMBER':
house_number = house_number + ' ' + ent.text
elif ent.label_ == "BUILDING NAME":
building_name = building_name + ' ' + ent.text
if building_name == '':
building_name = 'N/A'
if house_number == '':
house_number = 'N/A'
list_building_name.append(building_name)
list_house_number.append(house_number)
else:
building_name = 'N/A'
house_number = 'N/A'
list_building_name.append(building_name)
list_house_number.append(house_number)
# print("House Address:", house_address)
# print("After labeled by NER:")
# print(house_number, ",", building_name)
# print("===============================")
# address_full = [house_number, building_name, house_address.replace(",", ""), street_name.replace(",", ""),
# postcode, city, state, country]
address_full = [house_number, building_name, street_name.replace(",", ""),
postcode, city, state, country]
list_address_full.append(address_full)
df = pd.DataFrame(list_address_full,
columns=['house_number', 'building_name', 'street_name', 'postcode', 'city',
'state', 'country'])
# print('Pre-process 1 - Identify Address Items... Success')
return df
def address_splitting(address):
raw_address = address.upper()
# Remove Commas
c_address = raw_address.replace(',', ' ')
# Remove Dot
c_address = c_address.replace('.', ' ')
# Tokenize Address
tokenized_address = c_address.split(" ")
c_address_2 = ''
pattern = re.compile("\d{5}")
address_keywords = ['JALAN', 'KG', 'KAMPUNG', 'TAMAN', 'PERSIARAN', 'BLOK', 'BLOCK', 'JLN',
'TMN', 'SEKSYEN', 'SECTION', 'RUMAH', 'KAMPUS', 'PLAZA', 'BUKIT', 'KOMPLEKS',
'KAWASAN', 'NO', 'NO.', 'LORONG', 'JABATAN', 'LADANG', 'SEKOLAH',
'PANGSAPURI', 'BANGUNAN', 'LADANG', 'THE', 'PORT', 'LOT', 'WISMA']
end_address_keywords = ['MALL', 'RESIDENCE', 'CARNIVAL', 'WAREHOUSE', 'CENTRE', 'JAYA', 'CITY',
'COMPLEX', 'ZONE', 'VILLAGE', 'PARK', 'BARAT', 'HEIGHTS', 'OUTLETS',
'SQUARE', 'RESORT', 'OUTLET', 'BHD']
state_list = ['SELANGOR', 'PAHANG', 'TERENGGANU', 'KELANTAN', 'PERAK', 'KEDAH', 'PENANG',
'PERLIS', 'NEGERI', 'MELAKA', 'JOHOR', 'SABAH',
'SARAWAK', 'WP']
country_list = ['MY']
address_keywords.extend(state_list)
address_keywords.extend(country_list)
for word in tokenized_address:
check = False
check_number_exists = any(char.isdigit() for char in word)
if check_number_exists:
if pattern.match(word):
c_address_2 = c_address_2 + ',' + word + ', '
else:
c_address_2 = c_address_2 + word + ','
else:
for key in address_keywords:
if search(key, word):
check = True
break
else:
check = False
if check:
c_address_2 = c_address_2 + ',' + word + ' '
else:
check = False
for key in end_address_keywords:
if search(key, word):
check = True
break
else:
check = False
if check:
c_address_2 = c_address_2 + word + ', '
else:
c_address_2 = c_address_2 + word + ' '
c_address_2 = c_address_2.replace(u'\xa0', u' ')
tokenized_address_2 = c_address_2.split(",")
strip_tokenized_address_2 = [item.strip() for item in tokenized_address_2]
strip_tokenized_address_2 = [i for i in strip_tokenized_address_2 if i]
c_tokenized_address_3 = []
for word in strip_tokenized_address_2:
if not word.isspace():
c_tokenized_address_3.append(word)
final_cleaned_tokenized_address = c_tokenized_address_3
# print('Pre-process 2 - Tokenizing addresses... Success')
return final_cleaned_tokenized_address
def decontracted(phrase):
# specific
phrase = re.sub(r"JLN", "JALAN", phrase)
phrase = re.sub(r"TMN", "TAMAN", phrase)
phrase = re.sub(r"\bSG\b", "SUNGAI", phrase)
# City
phrase = re.sub(r"\bWP KL\b", "WP KUALA LUMPUR", phrase)
phrase = re.sub(r"\bK.L.\b", "WP KUALA LUMPUR", phrase)
phrase = re.sub(r"\bKL\b", "WP KUALA LUMPUR", phrase)
phrase = re.sub(r"\bW.P. KUALA LUMPUR\b", "WP KUALA LUMPUR", phrase)
phrase = re.sub(r"\bW.P KUALA LUMPUR\b", "WP KUALA LUMPUR", phrase)
phrase = re.sub(r"\bW. PERSEKUTUAN\b", "WP", phrase)
phrase = re.sub(r"WILAYAH PERSEKUTUAN KUALA LUMPUR", "WP KUALA LUMPUR", phrase)
phrase = re.sub(r"\b, KUALA LUMPUR\b", ", WP KUALA LUMPUR", phrase)
# phrase = re.sub(r"\bW.P\b","WP", phrase)
phrase = re.sub(r"\bW.PERSEKUTUAN\b", "WP", phrase)
phrase = re.sub(r"\bFEDERAL TERRITORY OF KUALA LUMPUR\b", "WP KUALA LUMPUR", phrase)
phrase = re.sub(r"\bPJ\b", "PETALING JAYA", phrase)
# phrase = re.sub(r"\bWp\b", "WP", phrase)
phrase = re.sub(r"\bJOHOR BHARU\b", "JOHOR BAHRU", phrase)
phrase = re.sub(r"\bPENANG\b", "<NAME>", phrase)
phrase = re.sub(r"\bBDR\b", "BANDAR", phrase)
phrase = re.sub(r"\bWPKL\b", "WP KUALA LUMPUR", phrase)
phrase = re.sub(r"WP-PUTRAJAYA", "WP PUTRAJAYA", phrase)
phrase = re.sub(r"\b, PUTRAJAYA\b", ", WP PUTRAJAYA", phrase)
phrase = re.sub(r"\bSEK.\b", "SEKSYEN", phrase)
phrase = re.sub(r"\bDARUL KHUSUS\b", "", phrase)
phrase = re.sub(r"\bDARUL EHSAN\b", "", phrase)
return phrase
def expand_address_v2(address):
address = address.upper()
expanded_address = decontracted(address)
return expanded_address
def expand_address(address):
address = address.upper()
expanded_address = decontracted(address)
return expanded_address
def clean_one_address(address):
expanded_address = expand_address(address)
# print('Pre-process 1 - Expanding Abbreviations... Success')
splitted_address = address_splitting(expanded_address)
# print('Preprocessing Stage 1... Success')
return splitted_address
def parse_addresses(address_list):
df_postcode_my = load_postcode_dataframe()
state_list = df_postcode_my['State'].str.upper()
state_list = set(state_list)
state_list = list(state_list)
state_list = [x for x in state_list if pd.notnull(x)]
city_list = df_postcode_my['Post_Office'].str.upper()
city_list = set(city_list)
city_list = list(city_list)
city_list = [x for x in city_list if pd.notnull(x)]
postcode_list = df_postcode_my['Postcode'].str.upper()
postcode_list = set(postcode_list)
postcode_list = list(postcode_list)
postcode_list = [x for x in postcode_list if pd.notnull(x)]
location_list = df_postcode_my['Location'].str.upper()
location_list = list(location_list)
location_list = [x for x in location_list if | pd.notnull(x) | pandas.notnull |
import os
import pandas as pd
import tweepy
import sys
from danlp.download import DATASETS, download_dataset, DEFAULT_CACHE_DIR, _unzip_process_func
from danlp.utils import extract_single_file_from_zip
class EuroparlSentiment1:
"""
Class for loading the Europarl Sentiment dataset.
:param str cache_dir: the directory for storing cached models
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name = 'europarl.sentiment1'
self.file_extension = DATASETS[self.dataset_name]['file_extension']
self.dataset_dir = download_dataset(self.dataset_name, cache_dir=cache_dir)
self.file_path = os.path.join(self.dataset_dir, self.dataset_name + self.file_extension)
def load_with_pandas(self):
"""
Loads the dataset in a dataframe
and drop duplicates and nan values
:return: a dataframe
"""
df = pd.read_csv(self.file_path, sep=',', index_col=0, encoding='utf-8')
df = df[['valence', 'text']].dropna()
return df.drop_duplicates()
class EuroparlSentiment2:
"""
Class for loading the Europarl Sentiment dataset.
:param str cache_dir: the directory for storing cached models
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name = 'europarl.sentiment2'
self.dataset_dir = download_dataset(self.dataset_name, cache_dir=cache_dir, process_func=_unzip_process_func)
self.file_path = os.path.join(cache_dir, self.dataset_name + '.csv')
def load_with_pandas(self):
"""
Loads the dataset as a dataframe
:return: a dataframe
"""
return pd.read_csv(self.file_path, sep=',', encoding='utf-8')
class LccSentiment:
"""
Class for loading the LCC Sentiment dataset.
:param str cache_dir: the directory for storing cached models
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name1 = 'lcc1.sentiment'
self.file_extension1 = DATASETS[self.dataset_name1]['file_extension']
self.dataset_dir1 = download_dataset(self.dataset_name1, cache_dir=cache_dir)
self.file_path1 = os.path.join(self.dataset_dir1, self.dataset_name1 + self.file_extension1)
self.dataset_name2 = 'lcc2.sentiment'
self.file_extension2 = DATASETS[self.dataset_name2]['file_extension']
self.dataset_dir2 = download_dataset(self.dataset_name2, cache_dir=cache_dir)
self.file_path2 = os.path.join(self.dataset_dir2, self.dataset_name2 + self.file_extension2)
def load_with_pandas(self):
"""
Loads the dataset in a dataframe,
combines and drops duplicates and nan values
:return: a dataframe
"""
df1 = pd.read_csv(self.file_path1, sep=',', encoding='utf-8')
df2 = | pd.read_csv(self.file_path2, sep=',', encoding='utf-8') | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.