prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import string
from io import StringIO
import time
from functools import reduce
import itertools
import operator
import pyspark
from pyspark.sql.types import *
from pyspark.sql import *
from pyspark import SparkConf, SparkContext
from pyspark.sql.functions import col
import nltk
from nltk import RegexpTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords as nltkstopwords
from nltk.corpus import wordnet
from nltk.stem.porter import *
from nltk import edit_distance
from autocorrect import spell
get_ipython().run_line_magic('matplotlib', 'inline')
nltk.download('stopwords')
nltk.download('wordnet')
##data readin
inp_path = 'encounter_notes_12.csv'
dat=open(inp_path,errors='ignore',)
dat=dat.read()
test = StringIO(dat)
df = pd.read_csv(test, sep=",",na_values=" ")
left_eye ='SLE_L_CORNEA_1020'
right_eye ='SLE_R_CORNEA_1013'
patid = 'PAT_ID'
## combine left and right eye description
df['description'] = df[left_eye].map(str) +' '+ df[right_eye].map(str)
## setting spark environment
conf = SparkConf().setAppName("wd_count")
sc = SparkContext(conf=conf)
sqlCtx = SQLContext(sc)
ps_df=sqlCtx.createDataFrame(df[[patid,'description']])
## from pyspark.df to pyspark rdd, get word frequency
Rdd=ps_df.rdd
wd_ct=Rdd.map(lambda x: [x[0],x[1].lower().strip().split()]).flatMap(lambda x: [tuple([x[0], x[1][i].strip(string.punctuation)]) for i in range(0,len(x[1]))]).map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y).sortBy(lambda x: -x[0]).map(lambda x:[x[0][0],x[0][1],x[1]]).toDF()
## print col name
wd_ct.printSchema()
## rename cols
wd_ct = wd_ct.selectExpr("_1 as PAT_ID","_2 as word", "_3 as cts")
## aggregate words together by summing frequency
words=wd_ct.groupBy("word").agg({"cts": "sum"}).sort(col("sum(cts)").desc())
## transform to pandas df
pd_words=words.toPandas().sort_values('word')
pd_words.sort_values('word').head(10)
#correction('wiht')
#start = timeit.timeit()
#newlist = list(map(correction, pd_words['word'].tolist()))
#end = timeit.timeit()
#print(end - start)
## tokenizing
tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
tokens = tokenizer.tokenize(' '.join(pd_words['word'].tolist()))
pd_words=pd_words.loc[pd_words['word'].isin(tokens)]
##spelling correction
start = time.time()
corrected=list(map(spell, pd_words['word'].tolist()))
end = time.time()
print(end-start)
pd_words['corrected']=pd.Series(corrected,index=pd_words.index)
##remove stopwords
nonstopwords = [wd for wd in corrected if wd not in nltkstopwords.words('english')]
pd_words=pd_words.loc[pd_words['corrected'].isin(nonstopwords)]
##stemming
stemmer = PorterStemmer()
words1 = [stemmer.stem(word) for word in pd_words['corrected'].tolist()]
pd_words['stemmer']=pd.Series(words1,index=pd_words.index)
#lmtzr = WordNetLemmatizer()
#words2 = [lmtzr.lemmatize(word) for word in pd_words['corrected'].tolist()]
#pd_words['lmtzr']=pd.Series(words2,index=pd_words.index)
#nonstopwords = [wd for wd in pd_words.word if wd not in nltkstopwords.words('english')]
#pd_words=pd_words.loc[pd_words['corrected'].isin(nonstopwords)]
## aggregate words with same stemmer
a=pd_words.groupby('stemmer')['word'].apply(lambda x: ', '.join(x)).to_frame()
b=pd_words.groupby('stemmer')['sum(cts)'].sum().to_frame()
combined= pd.concat([a, b], axis=1)
combined=combined[combined.index.isin(['nan'])==False ]
combined=combined.reset_index()
def Prob(word, N=sum(pd_words['sum(cts)'])):
"Probability of `word`."
return pd_words[pd_words.word==word]['sum(cts)'].values/ N
def correction(lst_of_word):
"Most probable spelling correction for word."
return max(lst_of_word, key=Prob)
corrected=[]
for i in range (0, len(combined)):
corrected.append(correction(combined.word.iloc[i].split(', ')))
combined['stemmed_corrected']=pd.Series(corrected, index=combined.index)
cols=['stemmer','stemmed_corrected','word', 'sum(cts)']
combined = combined[cols]
newlist=combined.stemmer.tolist()
needed=list()
for i in newlist:
if len(i)>=2:
needed.append(i)
combined=combined[combined.stemmer.isin(needed)==True ]
def closed_wd(lst):
'''find close words using leveinshtein distance'''
pairs = [[lst[w1], lst[w2]] for w1 in range(len(lst)) for w2 in range(w1+1,len(lst))]
closed_pairs=list()
for i in pairs:
if edit_distance(i[0], i[1])<max(len(i[0])/5,len(i[1])/5):
if i[0][:2]==i[1][:2]:
i.sort()
closed_pairs.append(i)
closed_pairs = [list(x) for x in set(tuple(x) for x in closed_pairs)]
LL = set(itertools.chain.from_iterable(closed_pairs))
for each in LL:
components = [x for x in closed_pairs if each in x]
for i in components:
closed_pairs.remove(i)
closed_pairs += [list(set(itertools.chain.from_iterable(components)))]
closed_pairs = [list(x) for x in set(tuple(x) for x in closed_pairs)]
return closed_pairs
#closed_wd(combined.stemmed_corrected.tolist())
newlist=combined.stemmed_corrected.tolist()
sim=closed_wd(newlist)
#newlist=combined.stemmer.tolist()
#simil_list3=closed_wd(newlist)
sub=combined[combined.stemmed_corrected.isin(reduce(operator.concat, sim))]
combined=combined[combined.stemmed_corrected.isin(reduce(operator.concat, sim))==False]
## assign same group to similar words
groups=list(['na']*sub.shape[0])
for j in range(0,len(sub)):
for i in range(0,len(sim)):
if sub.stemmed_corrected.iloc[j] in sim[i]:
groups[j]=i
sub['groups'] = pd.Series(groups, index=sub.index)
## aggregation
a=sub.groupby('groups')['stemmer'].apply(lambda x: ', '.join(x)).to_frame()
b=sub.groupby('groups')['word'].apply(lambda x: ', '.join(x)).to_frame()
c=sub.groupby('groups')['sum(cts)'].sum().to_frame()
d=sub.groupby('groups')['stemmed_corrected'].apply(lambda x: ', '.join(x)).to_frame()
grouped_sub= pd.concat([a, b,c,d], axis=1)
## updating corrected word by frequency
corrected=[]
for i in range (0, len(grouped_sub)):
corrected.append(correction(grouped_sub.word.iloc[i].split(', ')))
grouped_sub['stemmed_corrected']= | pd.Series(corrected, index=grouped_sub.index) | pandas.Series |
import logging
from temporal_granularity.src.metrics.metrics import Metrics
from pandas.util.testing import assert_frame_equal
import pandas as pd
import sys
from pathlib import Path
project_dir = Path("__file__").resolve().parents[1]
sys.path.insert(0, '{}/temporal_granularity/'.format(project_dir))
logging.basicConfig(level=logging.DEBUG)
class Test_Metrics:
def test_all_nrmse(self):
original_solar = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
representative_solar = pd.DataFrame({"capacity_factor": [1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8], "datetime": [1, 2, 3, 4, 5, 6, 7]})
original_wind = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
representative_wind = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
original_load = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
representative_load = pd.DataFrame({"capacity_factor": [1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65], "datetime": [1, 2, 3, 4, 5, 6, 7]})
all_nrmse = Metrics(original_solar, representative_solar, original_wind, representative_wind, original_load, representative_load, "dc")._get_nrmse()
expected_nrmse = [{'metric': 'nrmse dc', 'series_type': 'solar', 'value': 16.666666666666668}, {'metric': 'nrmse dc',
'series_type': 'wind', 'value': 0.0}, {'metric': 'nrmse dc', 'series_type': 'load', 'value': 8.33333333333334}]
assert all_nrmse == expected_nrmse
def test_all_rae(self):
original_solar = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
representative_solar = pd.DataFrame({"capacity_factor": [1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
original_wind = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
representative_wind = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
original_load = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
representative_load = pd.DataFrame({"capacity_factor": [1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
all_nrmse = Metrics(original_solar, representative_solar, original_wind, representative_wind, original_load, representative_load, "dc")._get_rae()
expected_nrmse = [{'metric': 'rae dc', 'series_type': 'solar', 'value': 7.142857142857138}, {'metric': 'rae dc',
'series_type': 'wind', 'value': 0.0}, {'metric': 'rae dc', 'series_type': 'load', 'value': 3.5714285714285796}]
assert all_nrmse == expected_nrmse
def test_all_correlations(self):
original_solar = | pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]}) | pandas.DataFrame |
import multiprocessing as mp
import numpy as np
import pandas as pd
import time
import concurrent.futures
import time
np.random.seed(321)
global df, temp_df
# ======================== Generate Sample Data =============================
# store names
stores = ["store_a", "store_b", "store_c", "store_d", "store_e"]
# number of daily sales
number_of_sales_per_store = 5
df = pd.DataFrame(columns=["store_name", "sales"], dtype=np.int8)
for store in stores:
d = pd.DataFrame(
dict(
store_name=[store for _ in range(number_of_sales_per_store)],
sales=[
np.random.randint(100, 1000) for _ in range(number_of_sales_per_store)
],
)
)
df = df.append(d)
# create a place holder data frame and save it
results = pd.DataFrame(columns=["store_name", "average_sales"])
results.to_csv("results.csv", index=False)
# create a shared dictionary
# if you are using windows, create this dictionary
# inside the if __name__ == condition, just above the
# with statement
global shared_dictionary
shared_dictionary = mp.Manager().dict()
# target function
def target_function(store):
df_slice = df[df["store_name"] == store]
mean_of_slice = np.mean(df_slice["sales"])
# method 1: save to shared dictionary
shared_dictionary[store] = [mean_of_slice]
# print(shared_dictionary)
# method 2: save to the place holder csv file
# first save results to pandas df, then append the results to
# the csv place holder we created already
temp_df = | pd.DataFrame(columns=["store_name", "average_sales"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Project: Psychophysics_exps
Creator: Miao
Create time: 2020-12-19 21:32
IDE: PyCharm
Introduction:
"""
import os
import pandas as pd
def __get_dataframe_file(data_path: str, filetype: str, filename: str) -> pd.DataFrame:
if filetype == ".csv":
df = pd.read_csv(data_path + filename)
elif filetype == ".xlsx":
df = pd.read_excel(data_path + filename)
return df
def merge_all_file2dataframe(data_path: str, filetype: str, filename_prefix: str) -> pd.DataFrame:
# list data files
files = os.listdir(data_path)
# collect all raw data files
filenames_list = [file for file in files if file.startswith(filename_prefix) & file.endswith(filetype)]
# read data
all_data = | pd.DataFrame() | pandas.DataFrame |
'''
Utility functions to support trading algorithms
Created by <NAME>
'''
import numpy as np
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.dates as mdates
from zipline.api import symbols, record, order_target_percent
import empyrical as ep
# PyPortfolioOpt imports
from pypfopt import risk_models, expected_returns
from pypfopt.cla import CLA
from pypfopt.base_optimizer import portfolio_performance
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt.hierarchical_risk_parity import HRPOpt
# Pyfolio imports
import pyfolio as pf
from pyfolio.utils import print_table
from pyfolio.timeseries import perf_stats
def add_portfolio(all_portfolios, group, subgroup, sym, risk_level):
if group in all_portfolios:
if subgroup not in all_portfolios[group]:
all_portfolios[group][subgroup] = {}
else:
all_portfolios[group] = {}
all_portfolios[group][subgroup] = {}
all_portfolios[group][subgroup] = {
'stocks': sym,
'levels': risk_level
}
def initialize_portfolio(verbose=False):
"""Initialises pre-assembled fixed basket of ETFs, each with their own predefined risk buckets
Currently contains
Vanguard ETF strategic model portfolios (https://advisors.vanguard.com/iwe/pdf/FASINVMP.pdf)
- CORES_SERIES
- CRSP_SERIES
- SP_SERIES
- RUSSELL_SERIES
- INCOME_SERIES
- TAX_SERIES
Ray Dalio's All-Weather Portfolio
Returns:
dict containing all added portfolios
"""
if verbose: print('Initialising portfolio database')
all_portfolios = {}
# Benchmarks
# use [] for single ticker, so that it is iterable to avoid error 'int' object is not iterable
add_portfolio(all_portfolios, 'BENCHMARK', 'SPY', symbols('SPY'), {0: [1]})
# 11 SPDR sector ETFs
add_portfolio(all_portfolios, 'SPDR', 'ALL_SECTORS', symbols('XLE', 'XLRE', 'XLF', 'XLV', 'XLC', 'XLI', 'XLY', 'XLP', 'XLB', 'XLK', 'XLU'), {
0: tuple(1 for _ in range(11))
})
# <NAME>'s All Weather Portfolio. Rebalancing once a year or more, with the following suggested distributions:
# * 30% stocks (eg VTI)
# * 40% long-term bonds (eg TLT)
# * 15% intermediate-term bonds (eg IEF)
# * 7.5% gold (eg GLD)
# * 7.5% commodities (eg DBC)
add_portfolio(all_portfolios, 'DALIO', 'ALL_WEATHER', symbols('VTI', 'TLT', 'IEF', 'GLD', 'DBC'), {
0: (0.3, 0.4, 0.15, 0.075, 0.075)
})
# Vanguard Core Serie
add_portfolio(all_portfolios, 'VANGUARD', 'CORE_SERIES', symbols('VTI', 'VXUS', 'BND', 'BNDX'), {
0: (0, 0, 0.686, 0.294),
1: (0.059, 0.039, 0.617, 0.265),
2: (0.118, 0.078, 0.549, 0.235),
3: (0.176, 0.118, 0.480, 0.206),
4: (0.235, 0.157, 0.412, 0.176),
5: (0.294, 0.196, 0.343, 0.147),
6: (0.353, 0.235, 0.274, 0.118),
7: (0.412, 0.274, 0.206, 0.088),
8: (0.470, 0.314, 0.137, 0.059),
9: (0.529, 0.353, 0.069, 0.029),
10: (0.588, 0.392, 0, 0)
})
# add_portfolio(all_portfolios, 'VANGUARD', 'CRSP_SERIES', symbols('VUG', 'VTV', 'VB', 'VEA', 'VWO', 'BSV', 'BIV', 'BLV', 'VMBS', 'BNDX'), {
# 0: (0, 0, 0, 0, 0, 0.273, 0.14, 0.123, 0.15, 0.294),
# 1: (0.024, 0.027, 0.008, 0.03, 0.009, 0.245, 0.126, 0.111, 0.135, 0.265),
# 2: (0.048, 0.054, 0.016, 0.061, 0.017, 0.218, 0.112, 0.099, 0.12, 0.235),
# 3: (0.072, 0.082, 0.022, 0.091, 0.027, 0.191, 0.098, 0.086, 0.105, 0.206),
# 4: (0.096, 0.109, 0.03, 0.122, 0.035, 0.164, 0.084, 0.074, 0.09, 0.176),
# 5: (0.120, 0.136, 0.038, 0.152, 0.044, 0.126, 0.07, 0.062, 0.075, 0.147),
# 6: (0.143, 0.163, 0.047, 0.182, 0.053, 0.109, 0.056, 0.049, 0.06, 0.118),
# 7: (0.167, 0.190, 0.055, 0.213, 0.061, 0.082, 0.042, 0.037, 0.045, 0.088),
# 8: (0.191, 0.217, 0.062, 0.243, 0.071, 0.055, 0.028, 0.024, 0.030, 0.059),
# 9: (0.215, 0.245, 0.069, 0.274, 0.079, 0.027, 0.014, 0.013, 0.015, 0.029),
# 10: (0.239, 0.272, 0.077, 0.304, 0.088, 0, 0, 0, 0, 0)
# })
# add_portfolio(all_portfolios, 'VANGUARD', 'SP_SERIES', symbols('VOO', 'VXF', 'VEA', 'VWO', 'BSV', 'BIV', 'BLV', 'VMBS', 'BNDX'), {
# 0: (0, 0, 0, 0, 0.273, 0.140, 0.123, 0.150, 0.294),
# 1: (0.048, 0.011, 0.03, 0.009, 0.245, 0.126, 0.111, 0.135, 0.265),
# 2: (0.097, 0.021, 0.061, 0.017, 0.218, 0.112, 0.099, 0.12, 0.235),
# 3: (0.145, 0.031, 0.091, 0.027, 0.191, 0.098, 0.086, 0.105, 0.206),
# 4: (0.194, 0.041, 0.0122, 0.035, 0.164, 0.084, 0.074, 0.09, 0.176),
# 5: (0.242, 0.052, 0.152, 0.044, 0.136, 0.07, 0.062, 0.075, 0.147),
# 6: (0.29, 0.063, 0.182, 0.053, 0.109, 0.056, 0.049, 0.06, 0.118),
# 7: (0.339, 0.073, 0.213, 0.061, 0.082, 0.042, 0.037, 0.045, 0.088),
# 8: (0.387, 0.083, 0.243, 0.071, 0.055, 0.028, 0.024, 0.03, 0.059),
# 9: (0.436, 0.093, 0.274, 0.079, 0.027, 0.014, 0.013, 0.015, 0.029),
# 10: (0.484, 0.104, 0.304, 0.088, 0, 0, 0, 0, 0)
# })
# add_portfolio(all_portfolios, 'VANGUARD', 'RUSSELL_SERIES', symbols('VONG', 'VONV', 'VTWO', 'VEA', 'VTWO', 'VEA', 'VWO', 'BSV', 'BIV', 'BLV', 'VMBS', 'BNDX'), {
# 0: (0, 0, 0, 0, 0, 0.273, 0.14, 0.123, 0.15, 0.294),
# 1: (0.028, 0.026, 0.005, 0.03, 0.009, 0.245, 0.126, 0.111, 0.135, 0.265),
# 2: (0.056, 0.052, 0.01, 0.061, 0.017, 0.218, 0.112, 0.099, 0.086, 0.105, 0.206),
# 3: (0.084, 0.079, 0.013, 0.091, 0.027, 0.191, 0.098, 0.086, 0.105, 0.206),
# 4: (0.112, 0.105, 0.018, 0.122, 0.035, 0.164, 0.084, 0.074, 0.09, 0.176, 0.02),
# 5: (0.14, 0.131, 0.023, 0.152, 0.044, 0.136, 0.07, 0.062, 0.075, 0.147),
# 6: (0.168, 0.157, 0.028, 0.182, 0.053, 0.109, 0.056, 0.049, 0.06, 0.118),
# 7: (0.196, 0.184, 0.032, 0.213, 0.061, 0.082, 0.042, 0.037, 0.045, 0.088),
# 8: (0.224, 0.210, 0.036, 0.243, 0.071, 0.055, 0.028, 0.024, 0.03, 0.059),
# 9: (0.252, 0.236, 0.041, 0.274, 0.079, 0.027, 0.014, 0.013, 0.015, 0.029),
# 10: (0.281, 0.262, 0.045, 0.304, 0.088, 0, 0, 0, 0, 0)
# })
# add_portfolio(all_portfolios, 'VANGUARD', 'INCOME_SERIES', symbols('VTI', 'VYM', 'VXUS', 'VYMI', 'BND', 'VTC', 'BNDX'), {
# 0: (0, 0, 0, 0, 0.171, 0.515, 0.294),
# 1: (0.015, 0.044, 0.01, 0.029, 0.154, 0.463, 0.265),
# 2: (0.03, 0.088, 0.019, 0.059, 0.137, 0.412, 0.235),
# 3: (0.044, 0.132, 0.03, 0.088, 0.12, 0.36, 0.206),
# 4: (0.059, 0.176, 0.039, 0.118, 0.103, 0.309, 0.176),
# 5: (0.073, 0.221, 0.049, 0.147, 0.086, 0.257, 0.147),
# 6: (0.088, 0.265, 0.059, 0.176, 0.068, 0.206, 0.118),
# 7: (0.103, 0.309, 0.068, 0.206, 0.052, 0.154, 0.088),
# 8: (0.117, 0.353, 0.079, 0.235, 0.034, 0.103, 0.059),
# 9: (0.132, 0.397, 0.088, 0.265, 0.018, 0.051, 0.029),
# 10: (0.147, 0.441, 0.098, 0.294, 0, 0, 0)
# })
# add_portfolio(all_portfolios, 'VANGUARD', 'TAX_SERIES', symbols('VUG', 'VTV', 'VB', 'VEA', 'VWO', 'VTEB'), {
# 1: (0.024, 0.027, 0.008, 0.03, 0.009, 0.882),
# 2: (0.048, 0.054, 0.016, 0.061, 0.017, 0.784),
# 3: (0.072, 0.082, 0.022, 0.091, 0.027, 0.686),
# 4: (0.096, 0.109, 0.03, 0.122, 0.035, 0.588),
# 5: (0.12, 0.136, 0.038, 0.152, 0.044, 0.49),
# 6: (0.143, 0.163, 0.047, 0.182, 0.053, 0.392),
# 7: (0.167, 0.190, 0.055, 0.213, 0.061, 0.294),
# 8: (0.191, 0.217, 0.062, 0.243, 0.071, 0.196),
# 9: (0.215, 0.245, 0.069, 0.274, 0.079, 0.098)
# })
return all_portfolios
def get_mu_sigma(prices, returns_model='mean_historical_return', risk_model='ledoit_wolf',
frequency=252, span=500):
"""Get mu (returns) and sigma (asset risk) given a expected returns model and risk model
prices (pd.DataFrame) – adjusted closing prices of the asset,
each row is a date and each column is a ticker/id.
returns_model (string, optional) - Model for estimating expected returns of assets,
either 'mean_historical_return' or 'ema_historical_return' (default: mean_historical_return)
risk_model (string, optional) - Risk model to quantify risk: sample_cov, ledoit_wolf,
defaults to ledoit_wolf, as recommended by Quantopian in their lecture series on quantitative finance.
frequency (int, optional) – number of time periods in a year, defaults to 252 (the number of trading days in a year)
span (int, optional) – Applicable only for 'ema_historical_return' expected returns.
The time-span for the EMA, defaults to 500-day EMA)
"""
CHOICES_EXPECTED_RETURNS = {
'mean_historical_return': expected_returns.mean_historical_return(prices, frequency),
'ema_historical_return': expected_returns.ema_historical_return(prices, frequency, span)
}
CHOICES_RISK_MODEL = {
'sample_cov': risk_models.sample_cov(prices),
'ledoit_wolf': risk_models.CovarianceShrinkage(prices).ledoit_wolf()
}
mu = CHOICES_EXPECTED_RETURNS.get(returns_model.lower(), None)
S = CHOICES_RISK_MODEL.get(risk_model.lower(), None)
if mu is None:
raise Exception('Expected returns model %s is not supported. Only mean_historical_return and ema_historical_return are supported currently.' % risk_model)
if S is None:
raise Exception('Risk model %s is not supported. Only sample_cov and ledoit_wolf are supported currently.' % risk_model)
return mu, S
def hrp_portfolio(prices):
"""Solve for Hierarchical risk parity portfolio
Arguments:
prices (pd.DataFrame) – adjusted (daily) closing prices of the asset, each row is a date and each column is a ticker/id.
"""
returns = expected_returns.returns_from_prices(prices)
hrp = HRPOpt(returns)
weights = hrp.hrp_portfolio()
return weights
def optimal_portfolio(mu, S, objective='max_sharpe', get_entire_frontier=True, **kwargs):
"""Solve for optimal portfolio. Wrapper for pypfopt functions
Arguments:
mu (pd.Series) - Expected annual returns
S (pd.DataFrame/np.ndarray) - Expected annual volatility
objective (string, optional) - Optimise for either 'max_sharpe', or 'min_volatility', defaults to 'max_sharpe'
get_entire_frontier (boolean, optional) - Also get the entire efficient frontier, defaults to True
"""
# if need to efficiently compute the entire efficient frontier for plotting, use CLA
# else use standard EfficientFrontier optimiser.
# (Note that optimum weights might be slightly different depending on whether CLA or EfficientFrontier was used)
Optimiser = CLA if get_entire_frontier else EfficientFrontier
op = Optimiser(mu, S)
# risk_aversion = kwargs.get("risk_aversion", 1) # only for max quadratic utility
if (objective is None):
# Get weights for both max_sharpe and min_volatility
opt_weights = []
op.max_sharpe()
opt_weights.append(op.clean_weights())
op.min_volatility()
opt_weights.append(op.clean_weights())
# ef = EfficientFrontier(mu, S)
# ef.max_quadratic_utility(risk_aversion)
# opt_weights.append(ef.clean_weights())
else:
if (objective == 'max_sharpe'):
op.max_sharpe()
elif ('min_vol' in objective):
op.min_volatility()
elif (objective == 'efficient_risk'):
target_volatility = kwargs.get("target_volatility", None)
if target_volatility is None:
print("Error: You have to specify the target_volatility!")
return None, None, None, None
else:
try:
op.efficient_risk(target_volatility)
except ValueError:
# could not solve based on target_volatility, we try lookup table instead
cla = CLA(mu, S)
cla.max_sharpe()
ef_returns, ef_risks, ef_weights = cla.efficient_frontier(points=300)
lookup_v_w = dict(zip(ef_risks, ef_weights))
lookup_v_w = OrderedDict(sorted(lookup_v_w.items()))
w = lookup_v_w[min(lookup_v_w.keys(), key=lambda key: abs(key-target_volatility))]
w = [i[0] for i in w] # flatten
return w, None, None
elif (objective == 'efficient_return'):
target_return = kwargs.get("target_return", None)
if target_return is None:
print("Error: You have to specify the target_return!")
return None, None, None, None
else:
op.efficient_return(target_return)
# elif (objective == 'max_quadratic_utility'):
# op.max_quadratic_utility(risk_aversion)
# # print("Using MAX_QUADRATIC UTILITY")
opt_weights = op.clean_weights()
if get_entire_frontier:
opt_returns, opt_risks, _ = op.efficient_frontier(points=200)
return opt_weights, opt_returns, opt_risks
else:
return opt_weights, None, None
def generate_markowitz_bullet(prices, returns_model='mean_historical_return', risk_model='ledoit_wolf',
frequency=252, span=500, objective='max_sharpe', num_random=20000,
ax=None, plot_individual=True, verbose=True, visualise=True):
"""Plot the markowitz bullet taking reference for plotting style from
https://towardsdatascience.com/efficient-frontier-portfolio-optimisation-in-python-e7844051e7f
Arguments:
prices (pd.DataFrame) – adjusted closing prices of the asset, each row is a date and each column is a ticker/id.
returns_model (string, optional) - Model for estimating expected returns of assets,
either 'mean_historical_return' or 'ema_historical_return' (default: mean_historical_return)
risk_model (string, optional) - Risk model to quantify risk: sample_cov, ledoit_wolf,
defaults to ledoit_wolf, as recommended by Quantopian in their lecture series on quantitative finance.
frequency (int, optional) – number of time periods in a year, defaults to 252 (the number of trading days in a year)
span (int, optional) – Applicable only for 'ema_historical_return' expected returns.
The time-span for the EMA, defaults to 500-day EMA)
objective (string, optional) - Optimise for either 'max_sharpe', or 'min_volatility', defaults to 'max_sharpe'
num_random (int, optional) - Number of random portfolios to generate for Markowitz Bullet. Set to 0 if not required
plot_individual (boolean, optional) - If True, plots individual stocks on chart as well
verbose (boolean, optional) - If True, prints out optimum portfolio allocations
visualise (boolean, optional) - If True, plots Markowitz bullet
Returns:
r_volatility, r_returns, opt_volatility, opt_returns
where
r_volatility - array containing expected annual volatility values for generated random portfolios
r_returns - array containg expected annual returns values for generated random portfolios
opt_volatility - array containing expected annual volatility values along the efficient frontier
opt_returns - array containing expected annual returns values along the efficient frontier
"""
mu, S = get_mu_sigma(prices, returns_model, risk_model, frequency, span)
opt_weights, opt_returns, opt_volatility = optimal_portfolio(mu, S, None, True)
if (verbose): print("-"*80 + "\nMaximum Sharpe Ratio Portfolio Allocation\n")
max_sharpe_returns, max_sharpe_volatility, max_sharpe_ratio = portfolio_performance(mu, S, opt_weights[0], verbose)
if (verbose): print("-"*80 + "\nMinimum Volatility Portfolio Allocation\n")
min_vol_returns, min_vol_volatility, min_vol_ratio = portfolio_performance(mu, S, opt_weights[1], verbose)
if (visualise):
plt.style.use('fivethirtyeight')
if (ax is None): fig, ax = plt.subplots(figsize=(12, 8))
# Plot Efficient Frontier (Annualised returns vs annualised volatility)
ax.plot(opt_volatility, opt_returns, linestyle='-.', linewidth=1, color='black', label='Efficient frontier')
# Plot optimum portfolios
ax.plot(max_sharpe_volatility, max_sharpe_returns, 'r*', label='Max Sharpe', markersize=20)
ax.plot(min_vol_volatility, min_vol_returns, 'g*', label='Min Volatility', markersize=20)
# Plot individual stocks in 'prices' pandas dataframe
if plot_individual:
stock_names = list(prices.columns)
s_returns = []
s_volatility = []
for i in range(len(stock_names)):
w = [0] * len(stock_names)
w[i] = 1
s_returns, s_volatility, _ = portfolio_performance(mu, S, w)
ax.plot(s_volatility, s_returns, 'o', markersize=10)
ax.annotate(stock_names[i], (s_volatility, s_returns), xytext=(10, 0), textcoords='offset points')
# Generate random portfolios
if (num_random > 0):
r_returns, r_volatility, r_sharpe = tuple(zip(*[portfolio_performance(mu, S, rand_weights(len(mu))) for _ in range(num_random)]))
ax.scatter(r_volatility, r_returns, c=r_sharpe, cmap='YlGnBu', marker='o', s=10, alpha=0.3) # random portfolios, colormap based on sharpe ratio
# Set graph's axes
ax.set_title('Markowitz Bullet')
ax.set_xlabel('annualised volatility')
ax.set_ylabel('annualised returns')
ax.legend()
plt.style.use('default')
return r_volatility, r_returns, opt_volatility, opt_returns
def rand_weights(n):
"""Produces n random weights that sum to 1"""
k = np.random.rand(n)
return k / sum(k)
def print_table_from_perf_array(perf, factor_returns=None, show_baseline=False, show_header=True):
APPROX_BDAYS_PER_MONTH = 21
# APPROX_BDAYS_PER_YEAR = 252
STAT_FUNCS_PCT = [
'Annual return',
'Cumulative returns',
'Annual volatility',
'Max drawdown',
'Daily value at risk',
'Daily turnover'
]
arr = list(zip(*[(pData[0], pf.utils.extract_rets_pos_txn_from_zipline(pData[1])[0]) for pData in perf]))
names_arr = arr[0]
returns_arr = arr[1]
# get headers
if show_header:
returns = returns_arr[0] # take first row as representative of all other backtests
date_rows = OrderedDict()
if len(returns.index) > 0:
date_rows['Start date'] = returns.index[0].strftime('%Y-%m-%d')
date_rows['End date'] = returns.index[-1].strftime('%Y-%m-%d')
date_rows['Total months'] = int(len(returns) / APPROX_BDAYS_PER_MONTH)
else:
date_rows = None
# get peformance stats
perf_stats_arr = []
# show baseline as one of the columns
if show_baseline:
perf_stats_arr.append(
perf_stats(factor_returns, factor_returns=factor_returns)
)
names_arr = ['Baseline'] + list(names_arr)
for i in range(len(returns_arr)):
perf_stats_arr.append(
perf_stats(returns_arr[i], factor_returns=factor_returns)
)
perf_stats_all = pd.concat(perf_stats_arr, axis=1)
for column in perf_stats_all.columns:
for stat, value in perf_stats_all[column].iteritems():
if stat in STAT_FUNCS_PCT:
perf_stats_all.loc[stat, column] = str(np.round(value * 100, 3)) + '%'
df = pd.DataFrame(perf_stats_all)
df.columns = names_arr
# print table
print_table(df, float_format='{0:.2f}'.format, header_rows=date_rows)
# return performance stats
return df
def plot_rolling_returns_from_perf_array(perf, factor_returns=None, extra_bm=0):
"""
Plot cumulative rolling returns, given an array of performance data and benchmark
Arguments:
----------
perf (array of tuple of (string, pd.DataFrame))
- Array of tuple of (run_name, performance). Performance is the output of zipline.api.run_algorithm
factor_returns (pd.Series, optional)
- Daily noncumulative returns of the benchmark factor to which betas are computed.
- Usually a benchmark such as market returns. This is in the same style as returns.
"""
arr = list(zip(*[(pData[0], pf.utils.extract_rets_pos_txn_from_zipline(pData[1])[0]) for pData in perf]))
names_arr = arr[0]
returns_arr = arr[1]
ax = plot_rolling_returns_multiple(returns_arr, factor_returns, names_arr=names_arr, extra_bm=extra_bm)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
return ax
def plot_rolling_returns_multiple(returns_arr, factor_returns=None, logy=False, ax=None, names_arr=None, extra_bm=0):
"""
Plots cumulative rolling returns versus some benchmarks'.
This is based on https://github.com/quantopian/pyfolio/blob/master/pyfolio/plotting.py,
but modified to plot multiple rolling returns on the same graph
Arguments
----------
returns_arr : array of pd.Series. Each element contains daily returns of the strategy, noncumulative.t.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
logy : bool, optional
Whether to log-scale the y-axis.
ax : matplotlib.Axes, optional
Axes upon which to plot.
names_arr: array of names for the plots, optional
extra_bm: number of extra benchmarks. These will be assumed to be at the front of returns_array and will be plotted differently
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
ax.set_xlabel('')
ax.set_ylabel('Cumulative returns')
ax.set_yscale('log' if logy else 'linear')
for i in range(len(returns_arr)):
# pData = perfData[i]
# returns, positions, transactions = pf.utils.extract_rets_pos_txn_from_zipline(pData)
returns = returns_arr[i]
returns.name = 'Portfolio %i' % i if names_arr is None else names_arr[i]
cum_rets = ep.cum_returns(returns, 1.0)
is_cum_returns = cum_rets
if (i == 0 and factor_returns is not None):
cum_factor_returns = ep.cum_returns(factor_returns[cum_rets.index], 1.0)
cum_factor_returns.plot(lw=1, color='gray', label=factor_returns.name, alpha=0.60, ax=ax, style=['-.'])
is_cum_returns.plot(lw=1, alpha=0.6, label=returns.name, ax=ax, style=['-.'] if (i < extra_bm) else None)
# is_cum_returns.plot(lw=1, alpha=0.6, label=returns.name, ax=ax)
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
major_fmt = mdates.DateFormatter('%b %Y')
ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(major_fmt)
ax.xaxis.set_minor_locator(months)
return ax
def record_social_media(context):
# print('recording social media')
record(buzz=context.buzz, sentiment=context.sentiment)
# def record_allocation(context):
# """Record allocation data for use in analysis
# """
# # targets = list([(k.symbol, np.asscalar(v)) for k, v in context.target_allocation.items()])
# targets = list([(k.symbol, v) for k, v in context.target_allocation.items()])
# record(allocation=targets, cash=context.portfolio.cash)
# # print(type(targets), targets)
def record_current_weights(context, data):
"""Record current weights of portfolio for use in analysis
"""
weights = []
for stock in context.stocks:
current_weight = (data.current(stock, 'close') * context.portfolio.positions[stock].amount) / context.portfolio.portfolio_value
weights.append((stock.symbol, current_weight))
targets = list([(k.symbol, v) for k, v in context.target_allocation.items()])
record(allocation=targets, cash=context.portfolio.cash)
record(curr_weights=weights)
def seriesToDataFrame(recorded_data):
m = []
index = []
columns = [l[0] for l in recorded_data[-1]]
for k, v in recorded_data.items():
if (type(v) == list):
m.append(list(zip(*v))[1])
# m.append((v[0][1], v[1][1], v[2][1], v[3][1]))
index.append(k)
df = pd.DataFrame(m, columns=columns)
df.index = index # by right, can just use allocation.index, but there are some NaN values
return df
# def rebalance_o(context, data, verbose):
# # allocate(context, data)
# if verbose: print("-"*30)
# # Sell first so that got more cash
# for stock in context.stocks:
# current_weight = (data.current(stock, 'close') * context.portfolio.positions[stock].amount) / context.portfolio.portfolio_value
# target_weight = context.target_allocation[stock]
# distance = current_weight - target_weight
# if (distance > 0):
# amount = -1 * (distance * context.portfolio.portfolio_value) / data.current(stock, 'close')
# if (int(amount) == 0):
# continue
# if verbose: print("Selling " + str(int(amount * -1)) + " shares of " + str(stock))
# print("-"*20)
# print("BO ", context.portfolio.cash)
# order(stock, int(amount))
# print("AO ", context.portfolio.cash)
# # Buy after selling
# for stock in context.stocks:
# current_weight = (data.current(stock, 'close') * context.portfolio.positions[stock].amount) / context.portfolio.portfolio_value
# target_weight = context.target_allocation[stock]
# distance = current_weight - target_weight
# if (distance < 0):
# amount = -1 * (distance * context.portfolio.portfolio_value) / data.current(stock, 'close')
# if (int(amount) == 0):
# continue
# if verbose: print("Buying " + str(int(amount)) + " shares of " + str(stock))
# order(stock, int(amount))
# if verbose: print('-'*30)
# # record for use in analysis
# # record_allocation(context)
def rebalance(context, data, verbose):
"""Rebalance portfolio
If function enters, rebalance is deemed to be necessary, and rebalancing will be done
"""
# allocate(context, data)
if verbose: print("-"*30)
# Just use order_target to rebalance?
for stock in context.stocks:
current_weight = (data.current(stock, 'close') * context.portfolio.positions[stock].amount) / context.portfolio.portfolio_value
if stock in context.target_allocation:
order_target_percent(stock, context.target_allocation[stock])
if verbose: print("%s: %.5f -> %.5f" % (stock, current_weight, context.target_allocation[stock]))
# record for use in analysis
# record_allocation(context)
def retrieve_social_media(path):
social_media = | pd.read_csv(path, usecols=['date', 'buzz', 'finBERT', 'sent12', 'sent26']) | pandas.read_csv |
import sys
import os
import glob
import errno
from datetime import date, datetime, timedelta
import numpy as np
from numpy import NaN, Inf, arange, isscalar, asarray, array
import pandas as pd
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
from pre_processFiles.gauge_reference import gauge_reference
def set_user_params(user_params, def_params):
for key in def_params.keys():
if key in user_params.keys():
def_params[key] = user_params[key]
return def_params
def calculate_average_each_column(matrix):
average = []
index = 0
for _ in matrix[0]:
average.append(np.nanmean(matrix[:, index]))
index = index + 1
return average
def create_folders():
folders = ['post_processedFiles/Boxplots', 'post_processedFiles/Wateryear_Type', 'post_processedFiles/Supplementary_Metrics', 'post_processedFiles/Class-1', 'post_processedFiles/Class-2', 'post_processedFiles/Class-3', 'post_processedFiles/Class-4', 'post_processedFiles/Class-5', 'post_processedFiles/Class-6', 'post_processedFiles/Class-7', 'post_processedFiles/Class-8', 'post_processedFiles/Class-9']
for folder in folders:
try:
os.makedirs(folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def median_of_time(lt):
n = len(lt)
if n < 1:
return None
elif n % 2 == 1:
return lt[n//2].start_date
elif n == 2:
first_date = lt[0].start_date
second_date = lt[1].start_date
return (first_date + second_date) / 2
else:
first_date = lt[n//2 - 1].start_date
second_date = lt[n//2 + 1].start_date
return (first_date + second_date) / 2
def median_of_magnitude(object_array):
flow_array = []
for obj in object_array:
flow_array= flow_array + obj.flow
return np.nanmean(np.array(flow_array, dtype=np.float))
def peak_magnitude(object_array):
flow_array = []
for obj in object_array:
flow_array= flow_array + obj.flow
return np.nanmax(np.array(flow_array, dtype=np.float))
def peakdet(v, delta, x = None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
def add_years(d, years):
"""Return a date that's `years` years after the date (or datetime)
object `d`. Return the same calendar date (month and day) in the
destination year, if it exists, otherwise use the following day
(thus changing February 29 to March 1). Parameter: d for date object,
years for added or subtracted years
"""
try:
return d.replace(year=d.year + years)
except ValueError:
return d + (date(d.year + years, 1, 1) - date(d.year, 1, 1))
def replace_nan(flow_data):
for index, flow in enumerate(flow_data):
if index == 0 and np.isnan(flow):
flow_data[index] = 0
elif np.isnan(flow):
flow_data[index] = flow_data[index-1]
return flow_data
def is_multiple_date_data(df):
two_digit_year = '/' in df.iloc[4,0]
year_in_front = '-' in df.iloc[4,0]
if two_digit_year and len(str(df.iloc[4,0]).split("/")) > 1:
return 2
elif year_in_front and len(str(df.iloc[4,0]).split("-")) > 1:
return 2
else:
print("return 1")
return 1
def is_two_digit_year(date):
return '/' in date[-3:]
def year_in_front(date):
return '-' in date[-3:]
def get_date_from_offset_julian_date(row_number, year, start_date):
start_year = year
end_year = year + 1
julian_start_date_start_year = datetime.strptime("{}/{}".format(start_date, start_year), "%m/%d/%Y").timetuple().tm_yday
if start_year % 4 == 0:
days_in_year_start = 366
else:
days_in_year_start = 365
if row_number <= days_in_year_start - julian_start_date_start_year:
current_year = start_year
date_delta = julian_start_date_start_year + row_number
current_date = datetime(current_year, 1, 1) + timedelta(date_delta - 1)
else:
current_year = end_year
date_delta = row_number - days_in_year_start + julian_start_date_start_year - 1
current_date = datetime(current_year, 1, 1) + timedelta(date_delta)
return current_date
def moving_average(data_array):
result_data = []
for index, data in enumerate(data_array):
if index < 2:
result_data.append(data)
elif index > len(data_array) - 3:
result_data.append(data)
else:
result_data.append((data + data_array[index - 1] + data_array[index - 2] + data_array[index + 1] + data_array[index + 2])/5)
return result_data
def get_nan_fraction_in_array(data_array):
length_array = len(data_array)
counter = 0
for data in data_array:
if | pd.isnull(data) | pandas.isnull |
"""Catchall module within the catchall module for really one-off stuff."""
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import str
from builtins import map
from builtins import range
from past.builtins import basestring
from builtins import object
from past.utils import old_div
import numpy as np
import warnings
import matplotlib.mlab as mlab
import matplotlib # for spectrogrammer
import scipy.signal
import os
import re
import datetime
import glob
import wwutils
import pandas
## Deprecated stuff
from wwutils.video import OutOfFrames
def frame_dump(*args, **kwargs):
warnings.warn("use wwutils.video instead of wwutils.misc", stacklevel=2)
return wwutils.video.frame_dump(*args, **kwargs)
def frame_dump_pipe(*args, **kwargs):
warnings.warn("use wwutils.video instead of wwutils.misc", stacklevel=2)
return wwutils.video.get_frame(*args, **kwargs)
def process_chunks_of_video(*args, **kwargs):
warnings.warn("use wwutils.video instead of wwutils.misc", stacklevel=2)
return wwutils.video.process_chunks_of_video(*args, **kwargs)
def get_video_aspect(*args, **kwargs):
warnings.warn("use wwutils.video instead of wwutils.misc", stacklevel=2)
return wwutils.video.get_video_aspect(*args, **kwargs)
def get_video_duration(*args, **kwargs):
warnings.warn("use wwutils.video instead of wwutils.misc", stacklevel=2)
return wwutils.video.get_video_duration(*args, **kwargs)
##
def globjoin(dirname, pattern, normalize=True):
"""Join dirname to pattern, and glob it
If normalize: calls os.path.abspath on every result
"""
res = glob.glob(os.path.join(dirname, pattern))
if normalize:
res = list(map(os.path.abspath, res))
return res
def time_of_file(filename, fmt='%Y%m%d%H%M%S'):
"""Return the modification time of the file as a datetime.
If fmt is not None: apply strftime(fmt) and return the string
"""
dt = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
if fmt is None:
return dt
else:
return dt.strftime(fmt)
class Spectrogrammer(object):
"""Turns a waveform into a spectrogram"""
def __init__(self, NFFT=256, downsample_ratio=1, new_bin_width_sec=None,
max_freq=None, min_freq=None, Fs=1.0, noverlap=None, normalization=0,
detrend=mlab.detrend_mean, **kwargs):
"""Object to turn waveforms into spectrograms.
This is a wrapper around mlab.specgram. What this object provides
is slightly more intelligent parameter choice, and a nicer way
to trade off resolution in frequency and time. It also remembers
parameter choices, so that the same object can be used to batch
analyze a bunch of waveforms using the `transform` method.
Arguments passed to mlab.specgram
----------------------------------
NFFT - number of points used in each segment
Determines the number of frequency bins, which will be
NFFT / 2 before stripping out those outside min_freq and max_freq
noverlap - int, number of samples of overlap between segments
Default is NFFT / 2
Fs - sampling rate
detrend - detrend each segment before FFT
Default is to remove the mean (DC component)
**kwargs - anything else you want to pass to mlab.specgram
Other arguments
---------------
downsample_ratio - int, amount to downsample in time
After all other calculations are done, the temporal resolution
new_bin_width_sec - float, target temporal resolution
The returned spectrogram will have a temporal resolution as
close to this as possible.
If this is specified, then the downsample_ratio is adjusted
as necessary to achieve it. If noverlap is left as default,
it will try 50% first and then 0, to achieve the desired resolution.
If it is not possible to achieve within a factor of 2 of this
resolution, a warning is issued.
normalization - the power in each frequency bin is multiplied by
the frequency raised to this power.
0 means do nothing.
1 means that 1/f noise becomes white.
min_freq, max_freq - discard frequencies outside of this range
Returns
-------
Pxx - 2d array of power in dB. Shape (n_freq_bins, n_time_bins)
May contain -np.inf where the power was exactly zero.
freqs - 1d array of frequency bins
t - 1d array of times
Theory
------
The fundamental tradeoff is between time and frequency resolution and
is set by NFFT.
For instance, consider a 2-second signal, sampled at 1024Hz, chosen
such that the number of samples is 2048 = 2**11.
* If NFFT is 2048, you will have 1024 frequency bins (spaced
between 0KHz and 0.512KHz) and 1 time bin.
This is a simple windowed FFT**2, with the redundant negative
frequencies discarded since the waveform is real.
Note that the phase information is lost.
* If NFFT is 256, you will have 128 frequency bins and 8 time bins.
* If NFFT is 16, you will have 8 freqency bins and 128 time bins.
In each case the FFT-induced trade-off is:
n_freq_bins * n_time_bins_per_s = Fs / 2
n_freq_bins = NFFT / 2
So far, using only NFFT, we have traded off time resolution for
frequency resolution. We can achieve greater noise reduction with
appropriate choice of noverlap and downsample_ratio. The PSD
function achieves this by using overlapping segments, then averaging
the FFT of each segment. The use of noverlap in mlab.specgram is
a bit of a misnomer, since no temporal averaging occurs there!
But this object can reinstate this temporal averaging.
For our signal above, if our desired temporal resolution is 64Hz,
that is, 128 samples total, and NFFT is 16, we have a choice.
* noverlap = 0. Non-overlapping segments. As above, 8 frequency
bins and 128 time bins. No averaging
* noverlap = 64. 50% overlap. Now we will get 256 time bins.
We can then average together each pair of adjacent bins
by downsampling, theoretically reducing the noise. Note that
this will be a biased estimate since the overlapping windows
are not redundant.
* noverlap = 127. Maximal overlap. Now we will get about 2048 bins,
which we can then downsample by 128 times to get our desired
time resolution.
The trade-off is now:
overlap_factor = (NFFT - overlap) / NFFT
n_freq_bins * n_time_bins_per_s * overlap_factor = Fs / downsample_ratio / 2
Since we always do the smoothing in the time domain, n_freq bins = NFFT / 2
and the tradeoff becomes
n_time_bins_per_s = Fs / downsample_ratio / (NFFT - overlap)
That is, to increase the time resolution, we can:
* Decrease the frequency resolution (NFFT)
* Increase the overlap, up to a maximum of NFFT - 1
This is a sort of spurious improvement because adjacent windows
are highly correlated.
* Decrease the downsample_ratio (less averaging)
To decrease noise, we can:
* Decrease the frequency resolution (NFFT)
* Increase the downsample_ratio (more averaging, fewer timepoints)
How to choose the overlap, or the downsample ratio? In general,
50% overlap seems good, since we'd like to use some averaging, but
we get limited benefit from averaging many redundant samples.
This object tries for 50% overlap and adjusts the downsample_ratio
(averaging) to achieve the requested temporal resolution. If this is
not possible, then no temporal averaging is done (just like mlab.specgram)
and the overlap is increased as necessary to achieve the requested
temporal resolution.
"""
self.downsample_ratio = downsample_ratio # until set otherwise
# figure out downsample_ratio
if new_bin_width_sec is not None:
# Set noverlap to default
if noverlap is None:
# Try to do it with 50% overlap
noverlap = old_div(NFFT, 2)
# Calculate downsample_ratio to achieve this
self.downsample_ratio = \
Fs * new_bin_width_sec / float(NFFT - noverlap)
# If this is not achievable, then try again with minimal downsampling
if np.rint(self.downsample_ratio).astype(np.int) < 1:
self.downsample_ratio = 1
noverlap = np.rint(NFFT - Fs * new_bin_width_sec).astype(np.int)
# Convert to nearest int and test if possible
self.downsample_ratio = np.rint(self.downsample_ratio).astype(np.int)
if self.downsample_ratio == 0:
print("requested temporal resolution too high, using maximum")
self.downsample_ratio = 1
# Default value for noverlap if still None
if noverlap is None:
noverlap = old_div(NFFT, 2)
self.noverlap = noverlap
# store other defaults
self.NFFT = NFFT
self.max_freq = max_freq
self.min_freq = min_freq
self.Fs = Fs
self.normalization = normalization
self.detrend = detrend
self.specgram_kwargs = kwargs
def transform(self, waveform):
"""Converts a waveform to a suitable spectrogram.
Removes high and low frequencies, rebins in time (via median)
to reduce data size. Returned times are the midpoints of the new bins.
Returns: Pxx, freqs, t
Pxx is an array of dB power of the shape (len(freqs), len(t)).
It will be real but may contain -infs due to log10
"""
# For now use NFFT of 256 to get appropriately wide freq bands, then
# downsample in time
Pxx, freqs, t = mlab.specgram(waveform, NFFT=self.NFFT,
noverlap=self.noverlap, Fs=self.Fs, detrend=self.detrend,
**self.specgram_kwargs)
# Apply the normalization
Pxx = Pxx * np.tile(freqs[:, np.newaxis] ** self.normalization,
(1, Pxx.shape[1]))
# strip out unused frequencies
if self.max_freq is not None:
Pxx = Pxx[freqs < self.max_freq, :]
freqs = freqs[freqs < self.max_freq]
if self.min_freq is not None:
Pxx = Pxx[freqs > self.min_freq, :]
freqs = freqs[freqs > self.min_freq]
# Rebin in size "downsample_ratio". If last bin is not full, discard.
Pxx_rebinned = []
t_rebinned = []
for n in range(0, len(t) - self.downsample_ratio + 1,
self.downsample_ratio):
Pxx_rebinned.append(
np.median(Pxx[:, n:n+self.downsample_ratio], axis=1).flatten())
t_rebinned.append(
np.mean(t[n:n+self.downsample_ratio]))
# Convert to arrays
Pxx_rebinned_a = np.transpose(np.array(Pxx_rebinned))
t_rebinned_a = np.array(t_rebinned)
# log it and deal with infs
Pxx_rebinned_a_log = -np.inf * np.ones_like(Pxx_rebinned_a)
Pxx_rebinned_a_log[np.nonzero(Pxx_rebinned_a)] = \
10 * np.log10(Pxx_rebinned_a[np.nonzero(Pxx_rebinned_a)])
self.freqs = freqs
self.t = t_rebinned_a
return Pxx_rebinned_a_log, freqs, t_rebinned_a
def fix_pandas_display_width(meth=1, dw=0, max_columns=12):
"""Various fixes"""
import pandas
if meth == 0:
pandas.set_option('display.width', 0)
elif meth == 1:
pandas.set_option('display.max_columns', max_columns)
pandas.set_option('display.width', None)
class UniquenessError(Exception):
pass
def only_one(l):
"""Returns the only value in l, or l itself if non-iterable.
Compare 'unique_or_error', which allows multiple identical etnries.
"""
# listify
if not hasattr(l, '__len__'):
l = [l]
# check length
if len(l) != 1:
raise UniquenessError("must contain exactly one value; instead: %r" % l)
# return entry
return l[0]
def unique_or_error(a):
"""Asserts that `a` contains only one unique value and returns it
Compare 'only_one' which does not allow repeats.
"""
u = np.unique(np.asarray(a))
if len(u) == 0:
raise UniquenessError("no unique values found, should be one")
if len(u) > 1:
raise UniquenessError("%d unique values found, should be one" % len(u))
else:
return u[0]
def printnow(s):
"""Write string to stdout and flush immediately"""
import sys
sys.stdout.write(str(s) + "\n")
sys.stdout.flush()
def get_file_time(filename, human=False):
import time
# Get modification time
res = os.path.getmtime(filename)
# Convert to human-readable
if human:
res = time.ctime(res)
return res
def pickle_load(filename):
import pickle
with open(filename, 'rb') as fi:
res = pickle.load(fi)
return res
def pickle_dump(obj, filename):
import pickle
with open(filename, 'wb') as fi:
pickle.dump(obj, fi)
def invert_linear_poly(p):
"""Helper function for inverting fit.coeffs"""
return old_div(np.array([1, -p[1]]).astype(np.float), p[0])
def apply_and_filter_by_regex(pattern, list_of_strings, sort=True):
"""Apply regex pattern to each string and return result.
Non-matches are ignored.
If multiple matches, the first is returned.
"""
res = []
for s in list_of_strings:
m = re.match(pattern, s)
if m is None:
continue
else:
res.append(m.groups()[0])
if sort:
return sorted(res)
else:
return res
def regex_filter(pattern, list_of_strings):
"""Apply regex pattern to each string and return those that match.
See also regex_capture
"""
return [s for s in list_of_strings if re.match(pattern, s) is not None]
def regex_capture(pattern, list_of_strings, take_index=0):
"""Apply regex pattern to each string and return a captured group.
Same as old apply_and_filter_by_regex, but without the sorting.
See also regex_filter. This will match that order.
"""
# Apply filter to each string
res_l = []
for s in list_of_strings:
m = re.match(pattern, s)
# Append the capture, if any
if m is not None:
res_l.append(m.groups()[take_index])
return res_l
def rint(arr):
"""Round with rint and cast to int
If `arr` contains NaN, casting it to int causes a spuriously negative
number, because NaN cannot be an int. In this case we raise ValueError.
"""
if np.any(np.isnan(np.asarray(arr))):
raise ValueError("cannot convert arrays containing NaN to int")
return np.rint(arr).astype(np.int)
def is_nonstring_iter(val):
"""Check if the input is iterable, but not a string.
Recently changed this to work for Unicode.
This should catch a subset of the old way, because previously Unicode
strings caused this to return True, but now they should return False.
Will print a warning if this is not the case.
"""
# Old way
res1 = hasattr(val, '__len__') and not isinstance(val, str)
# New way
res2 = hasattr(val, '__len__') and not isinstance(val, basestring)
if res2 and not res1:
print("warning: check is_nonstring_iter")
return res2
def pick(df, isnotnull=None, **kwargs):
"""Function to pick row indices from DataFrame.
Copied from kkpandas
This method provides a nicer interface to choose rows from a DataFrame
that satisfy specified constraints on the columns.
isnotnull : column name, or list of column names, that should not be null.
See pandas.isnull for a defintion of null
All additional kwargs are interpreted as {column_name: acceptable_values}.
For each column_name, acceptable_values in kwargs.items():
The returned indices into column_name must contain one of the items
in acceptable_values.
If acceptable_values is None, then that test is skipped.
Note that this means there is currently no way to select rows that
ARE none in some column.
If acceptable_values is a single string or value (instead of a list),
then the returned rows must contain that single string or value.
TODO:
add flags for string behavior, AND/OR behavior, error if item not found,
return unique, ....
"""
msk = np.ones(len(df), dtype=np.bool)
for key, val in list(kwargs.items()):
if val is None:
continue
elif is_nonstring_iter(val):
msk &= df[key].isin(val)
else:
msk &= (df[key] == val)
if isnotnull is not None:
# Edge case
if not is_nonstring_iter(isnotnull):
isnotnull = [isnotnull]
# Filter by not null
for key in isnotnull:
msk &= - | pandas.isnull(df[key]) | pandas.isnull |
import tensorflow as tf
import random as rn
import numpy as np
import os
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(45)
# Setting the graph-level random seed.
tf.set_random_seed(1337)
rn.seed(73)
from keras import backend as K
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
# Force Tensorflow to use a single thread
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import math
import pandas as pd
import keras
from keras import backend as K
from keras.models import Model
from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM
from keras.layers.merge import concatenate
from keras.callbacks import TensorBoard, EarlyStopping
from keras.optimizers import Adam, Adamax
from keras.models import load_model
from keras import regularizers
import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_convergence
from skopt.plots import plot_objective, plot_evaluations
from skopt.utils import use_named_args
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.metrics import confusion_matrix, classification_report
import sys
dim_learning_rate = Real(low=1e-4, high=1e-2, prior='log-uniform', name='learning_rate')
dim_weight_decay = Real(low=1e-3, high=0.5, prior='log-uniform', name='weight_decay')
dim_num_dense_layers = Integer(low=0, high=5, name='num_dense_layers')
dim_num_dense_nodes = Integer(low=5, high=1024, name='num_dense_nodes')
dim_activation = Categorical(categories=['relu', 'softplus'], name='activation')
dim_dropout = Real(low=1e-6, high=0.5, prior='log-uniform', name='dropout')
### DRIVER
dim_driver_weight_decay = Real(low=1e-3, high=0.5, prior='log-uniform', name='driver_weight_decay')
dim_driver_num_dense_layers = Integer(low=0, high=5, name='driver_num_dense_layers')
dim_driver_num_dense_nodes = Integer(low=5, high=1024, name='driver_num_dense_nodes')
dim_driver_activation = Categorical(categories=['relu', 'softplus'], name='driver_activation')
dim_driver_dropout = Real(low=1e-6, high=0.5, prior='log-uniform', name='driver_dropout')
### MOTIF
dim_motif_weight_decay = Real(low=1e-3, high=0.5, prior='log-uniform', name='motif_weight_decay')
dim_motif_num_dense_layers = Integer(low=0, high=5, name='motif_num_dense_layers')
dim_motif_num_dense_nodes = Integer(low=5, high=1024, name='motif_num_dense_nodes')
dim_motif_activation = Categorical(categories=['relu', 'softplus'], name='motif_activation')
dim_motif_dropout = Real(low=1e-6, high=0.5, prior='log-uniform', name='motif_dropout')
dimensions = [dim_learning_rate, dim_weight_decay, dim_dropout, dim_num_dense_layers, dim_num_dense_nodes,
dim_activation, dim_driver_weight_decay, dim_driver_dropout, dim_driver_num_dense_layers, dim_driver_num_dense_nodes,
dim_driver_activation, dim_motif_weight_decay, dim_motif_dropout, dim_motif_num_dense_layers, dim_motif_num_dense_nodes,
dim_motif_activation]
default_paramaters = [1e-4, 1e-3, 1e-6, 0, 100, 'relu', 1e-3, 1e-6, 0, 100, 'relu', 1e-3, 1e-6, 0, 100, 'relu']
def log_dir_name(learning_rate, weight_decay, num_dense_layers, num_dense_nodes, activation):
log_dir = "./crossvalidation{}_logs/{}__lr_{}_wd_{}_layers_{}_nodes{}_{}/".format(fold, output_name, learning_rate, weight_decay, num_dense_layers, num_dense_nodes, activation)
## make sure that dir exists
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def create_model(learning_rate, weight_decay, dropout, num_dense_layers, num_dense_nodes, activation,
driver_weight_decay, driver_dropout, driver_num_dense_layers, driver_num_dense_nodes, driver_activation,
motif_weight_decay, motif_dropout, motif_num_dense_layers, motif_num_dense_nodes, motif_activation):
### Define model here
main_input = Input(shape=(input_size,), name='main_input')
name = 'main_layer_dense_{0}'.format(1)
main_branch = Dense(num_dense_nodes, activation=activation, name=name,
kernel_regularizer=regularizers.l2(weight_decay))(main_input)
main_branch = Dropout(dropout)(main_branch)
for i in range(1,num_dense_layers):
name = 'main_layer_dense_{0}'.format(i + 1)
main_branch = Dense(num_dense_nodes, activation=activation, name=name, kernel_regularizer=regularizers.l2(weight_decay))(main_branch)
main_branch = Dropout(dropout)(main_branch)
driver_input = Input(shape=(input_driver_size,), name='driver_input')
name = 'driver_layer_dense_{0}'.format(1)
driver_branch = Dense(driver_num_dense_nodes, activation=driver_activation, name=name,
kernel_regularizer=regularizers.l2(driver_weight_decay))(driver_input)
driver_branch = Dropout(driver_dropout)(driver_branch)
for i in range(1,driver_num_dense_layers):
name = 'driver_layer_dense_{0}'.format(i + 1)
driver_branch = Dense(driver_num_dense_nodes, activation=driver_activation, name=name, kernel_regularizer=regularizers.l2(driver_weight_decay))(driver_branch)
driver_branch = Dropout(driver_dropout)(driver_branch)
motif_input = Input(shape=(input_motif_size,), name='motif_input')
name = 'motif_layer_dense_{0}'.format(1)
motif_branch = Dense(motif_num_dense_nodes, activation=motif_activation, name=name,
kernel_regularizer=regularizers.l2(motif_weight_decay))(motif_input)
motif_branch = Dropout(motif_dropout)(motif_branch)
for i in range(1,motif_num_dense_layers):
name = 'motif_layer_dense_{0}'.format(i + 1)
motif_branch = Dense(motif_num_dense_nodes, activation=motif_activation, name=name, kernel_regularizer=regularizers.l2(motif_weight_decay))(motif_branch)
motif_branch = Dropout(motif_dropout)(motif_branch)
x = concatenate([main_branch, driver_branch, motif_branch])
predictions = Dense(num_classes, activation='softmax', name='output')(x)
optimizer = Adam(lr=learning_rate)
model = Model(inputs=[main_input, driver_input, motif_input], outputs=predictions)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
@use_named_args(dimensions=dimensions)
def fitness(learning_rate, weight_decay, dropout, num_dense_layers, num_dense_nodes, activation, driver_weight_decay, driver_dropout, driver_num_dense_layers, driver_num_dense_nodes, driver_activation,
motif_weight_decay, motif_dropout, motif_num_dense_layers, motif_num_dense_nodes, motif_activation):
global best_accuracy
# best_accuracy = 0.0
print('learning rate: ', learning_rate)
print('weight_decay: ', weight_decay)
print('dropout', dropout)
print('num_dense_layers: ', num_dense_layers)
print('num_dense_nodes: ', num_dense_nodes)
print('activation: ', activation)
print('driver_weight_decay: ', driver_weight_decay)
print('driver_dropout', driver_dropout)
print('driver_num_dense_layers: ', driver_num_dense_layers)
print('driver_num_dense_nodes: ', driver_num_dense_nodes)
print('driver_activation: ', driver_activation)
print('motif_weight_decay: ', motif_weight_decay)
print('motif_dropout', motif_dropout)
print('motif_num_dense_layers: ', motif_num_dense_layers)
print('motif_num_dense_nodes: ', motif_num_dense_nodes)
print('motif_activation: ', motif_activation)
model = create_model(learning_rate=learning_rate, weight_decay=weight_decay, dropout=dropout,
num_dense_layers=num_dense_layers, num_dense_nodes=num_dense_nodes, activation=activation,
driver_weight_decay=driver_weight_decay, driver_dropout=driver_dropout,
driver_num_dense_layers=driver_num_dense_layers, driver_num_dense_nodes=driver_num_dense_nodes, driver_activation=driver_activation,
motif_weight_decay=motif_weight_decay, motif_dropout=motif_dropout,
motif_num_dense_layers=motif_num_dense_layers, motif_num_dense_nodes=motif_num_dense_nodes, motif_activation=motif_activation)
log_dir = log_dir_name(learning_rate, weight_decay, num_dense_layers, num_dense_nodes, activation)
callback_log = TensorBoard(
log_dir=log_dir,
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=True,
write_images=False)
callbacks = [callback_log]
### FIXME model.fit - x_train and y_train
history = model.fit(x=[x_train, x_train_driver, x_train_motif], y=y_train, epochs=50, batch_size=32, validation_data=validation_data,
callbacks=callbacks)
accuracy = history.history['val_acc'][-1]
print('Accuracy: {0:.2%}'.format(accuracy))
if accuracy > best_accuracy:
model.save(path_best_model)
best_accuracy = accuracy
del model
K.clear_session()
return -accuracy
def to_table(report):
report = report.splitlines()
res = []
header = [''] + report[0].split()
for row in report[2:-4]:
res.append(np.array(row.split()))
return np.array(res), header
if __name__ == '__main__':
fold = int(sys.argv[1])
input_data_filename = sys.argv[2]
input_driver_data_filename = sys.argv[3]
input_motif_data_filename = sys.argv[4]
output_name = sys.argv[5]
path_best_model = './{}__crossvalidation{}_best_model.keras'.format(output_name, fold)
best_accuracy = 0.0
data = pd.read_csv("./{}.csv".format(input_data_filename), index_col=[0])
driver_data = pd.read_csv("./{}.csv".format(input_driver_data_filename),
index_col=[0])
motif_data = pd.read_csv("./{}.csv".format(input_motif_data_filename),
index_col=[0])
### Making training, test, validation data
training_samples = pd.read_csv('./training_idx_pcawg.csv', index_col=[0])
training_samples.columns = ['guid', 'split']
training_samples = training_samples[training_samples.split == fold]
frames = []
for guid_ in training_samples.guid:
frames.append(data[data['guid'].str.contains(guid_)])
training_data = pd.concat(frames)
training_data = training_data.sort_values(by=['guid'])
print(training_data.head())
validation_samples = pd.read_csv('./validation_idx_pcawg.csv', index_col=[0])
validation_samples.columns = ['guid', 'split']
validation_samples = validation_samples[validation_samples.split == fold]
validation_data = data[data['guid'].isin(validation_samples.guid)]
validation_data = validation_data.sort_values(by=['guid'])
print(validation_data.head())
test_samples = pd.read_csv('./test_idx_pcawg.csv', index_col=[0])
test_samples.columns = ['guid', 'split']
test_samples = test_samples[test_samples.split == fold]
test_data = data[data['guid'].isin(test_samples.guid)]
test_data = test_data.sort_values(by=['guid'])
print(test_data.head())
training_data = training_data.drop(['guid'], axis=1)
validation_data = validation_data.drop(['guid'], axis=1)
test_data = test_data.drop(['guid'], axis=1)
x_train = training_data.values
y_train = training_data.index
x_val = validation_data.values
y_val = validation_data.index
x_test = test_data.values
y_test = test_data.index
### DRIVER Making training, test, validation data
frames = []
for guid_ in training_samples.guid:
frames.append(driver_data[driver_data['guid'].str.contains(guid_)])
driver_training_data = pd.concat(frames)
driver_training_data = driver_training_data.sort_values(by=['guid'])
driver_validation_data = driver_data[driver_data['guid'].isin(validation_samples.guid)]
driver_validation_data = driver_validation_data.sort_values(by=['guid'])
driver_test_data = driver_data[driver_data['guid'].isin(test_samples.guid)]
driver_test_data = driver_test_data.sort_values(by=['guid'])
driver_training_data = driver_training_data.drop(['guid'], axis=1)
driver_validation_data = driver_validation_data.drop(['guid'], axis=1)
driver_test_data = driver_test_data.drop(['guid'], axis=1)
x_train_driver = driver_training_data.values
y_train_driver = driver_training_data.index
x_val_driver = driver_validation_data.values
y_val_driver = driver_validation_data.index
x_test_driver = driver_test_data.values
y_test_driver = driver_test_data.index
### MOTIF Making training, test, validation data
frames = []
for guid_ in training_samples.guid:
frames.append(motif_data[motif_data['guid'].str.contains(guid_)])
motif_training_data = pd.concat(frames)
motif_training_data = motif_training_data.sort_values(by=['guid'])
motif_validation_data = motif_data[motif_data['guid'].isin(validation_samples.guid)]
motif_validation_data = motif_validation_data.sort_values(by=['guid'])
motif_test_data = motif_data[motif_data['guid'].isin(test_samples.guid)]
motif_test_data = motif_test_data.sort_values(by=['guid'])
motif_training_data = motif_training_data.drop(['guid'], axis=1)
motif_validation_data = motif_validation_data.drop(['guid'], axis=1)
motif_test_data = motif_test_data.drop(['guid'], axis=1)
x_train_motif = motif_training_data.values
y_train_motif = motif_training_data.index
x_val_motif = motif_validation_data.values
y_val_motif = motif_validation_data.index
x_test_motif = motif_test_data.values
y_test_motif = motif_test_data.index
encoder = LabelEncoder()
test_labels_names = y_test
y_test = encoder.fit_transform(y_test)
test_labels = y_test
num_of_cancers = len(encoder.classes_)
print("Num of cancers: {}".format(num_of_cancers))
y_test = keras.utils.to_categorical(y_test, num_of_cancers)
y_train = encoder.fit_transform(y_train)
y_train = keras.utils.to_categorical(y_train, num_of_cancers)
y_val = encoder.fit_transform(y_val)
y_val = keras.utils.to_categorical(y_val, num_of_cancers)
### DRIVER + MOTIF
y_train_driver = encoder.fit_transform(y_train_driver)
y_train_driver = keras.utils.to_categorical(y_train_driver, num_of_cancers)
y_val_driver = encoder.fit_transform(y_val_driver)
y_val_driver = keras.utils.to_categorical(y_val_driver, num_of_cancers)
y_test_driver = encoder.fit_transform(y_test_driver)
y_test_driver = keras.utils.to_categorical(y_test_driver, num_of_cancers)
y_train_motif = encoder.fit_transform(y_train_motif)
y_train_motif = keras.utils.to_categorical(y_train_motif, num_of_cancers)
y_val_motif = encoder.fit_transform(y_val_motif)
y_val_motif = keras.utils.to_categorical(y_val_motif, num_of_cancers)
y_test_motif = encoder.fit_transform(y_test_motif)
y_test_motif = keras.utils.to_categorical(y_test_motif, num_of_cancers)
validation_data = ([x_val, x_val_driver, x_val_motif], y_val)
input_size = x_train.shape[1]
input_driver_size = x_train_driver.shape[1]
input_motif_size = x_train_motif.shape[1]
num_classes = num_of_cancers
### Run Bayesian optimization
search_result = gp_minimize(func=fitness, dimensions=dimensions, acq_func='EI', n_calls=200, x0=default_paramaters, random_state=7, n_jobs=-1)
# Save Best Hyperparameters
hyps = np.asarray(search_result.x)
np.save('./crossvalidation_results/{}__fold{}_hyperparams'.format(output_name, fold), hyps, allow_pickle=False)
model = load_model(path_best_model)
# Evaluate best model on test data
result = model.evaluate(x=[x_test, x_test_driver, x_test_motif], y=y_test)
# Save best model
model.save('./crossvalidation_results/{}__fold_{}_model.keras'.format(output_name, fold))
Y_pred = model.predict([x_test, x_test_driver, x_test_motif])
y_pred = np.argmax(Y_pred, axis=1)
a = | pd.Series(test_labels_names) | pandas.Series |
import pandas as pd
import os
import csv
import gdal
import numpy as np
import re
def ad_industry_profiles_dict(dicts):
dict_names = ["load_profile_industry_chemicals_and_petrochemicals_yearlong_2018",
"load_profile_industry_food_and_tobacco_yearlong_2018",
"load_profile_industry_iron_and_steel_yearlong_2018",
"load_profile_industry_non_metalic_minerals_yearlong_2018",
"load_profile_industry_paper_yearlong_2018"]
data = []
for name, dictionary in zip(dict_names, dicts):
raw_data = pd.DataFrame(dictionary[name])
raw_data = raw_data.loc[:, ("NUTS0_code", "process", "hour", "load")]
raw_data["load"] = pd.to_numeric(raw_data["load"])
raw_data["hour"] = pd.to_numeric(raw_data["hour"])
data.append(raw_data)
return data
def ad_residential_heating_profile_dict(dictionary):
data = pd.DataFrame(dictionary["load_profile_residential_heating_yearlong_2010"])
data = data.loc[:, ("NUTS2_code", "process", "hour", "load")]
data["load"] = pd.to_numeric(data["load"])
data["hour"] = pd.to_numeric(data["hour"])
return data
def ad_industry_profiles_local(nuts0_ids):
"""
Loads industry profiles of different subcategories from different csv files.
:return: List of dataframes containing the csv files data.
:rtype: list [pd.Dataframe, pd.Dataframe, ...].
"""
file_names = ("hotmaps_task_2.7_load_profile_industry_chemicals_and_petrochemicals_yearlong_2018.csv",
"hotmaps_task_2.7_load_profile_industry_food_and_tobacco_yearlong_2018.csv",
"hotmaps_task_2.7_load_profile_industry_iron_and_steel_yearlong_2018.csv",
"hotmaps_task_2.7_load_profile_industry_non_metalic_minerals_yearlong_2018.csv",
"hotmaps_task_2.7_load_profile_industry_paper_yearlong_2018.csv")
path = os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(path, "data")
data = []
for file_name in file_names:
sub_path = os.path.join(path, file_name)
# determine delimiter of csv file
with open(sub_path, 'r', encoding='utf-8') as csv_file:
delimiter = csv.Sniffer().sniff(csv_file.readline()).delimiter
raw_data = pd.read_csv(sub_path, sep=delimiter, usecols=("NUTS0_code", "process", "hour", "load"))
raw_data = raw_data[raw_data["NUTS0_code"].isin(nuts0_ids)]
data.append(raw_data)
return data
def ad_residential_heating_profile_local(nuts2_ids):
"""
Loads residential heating profiles from csv file.
:return: Dataframe containing the data of the csv file.
:rtype: pandas dataframe.
"""
path = os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(path, "data")
path1 = os.path.join(path, "hotmaps_task_2.7_load_profile_residential_heating_yearlong_2010_part1.csv")
path2 = os.path.join(path, "hotmaps_task_2.7_load_profile_residential_heating_yearlong_2010_part2.csv")
path3 = os.path.join(path, "hotmaps_task_2.7_load_profile_tertiary_shw_yearlong_2010_part1.csv")
path4 = os.path.join(path, "hotmaps_task_2.7_load_profile_tertiary_shw_yearlong_2010_part2.csv")
paths = [path1, path2]
# determine delimiter of csv file
data = pd.DataFrame(columns=("NUTS2_code", "process", "hour", "load"))
for path in paths:
with open(path, 'r', encoding='utf-8') as csv_file:
delimiter = csv.Sniffer().sniff(csv_file.readline()).delimiter
dat = pd.read_csv(path, sep=delimiter, usecols=("NUTS2_code", "process", "hour", "load"))
data = data.append(dat)
data = data[data["NUTS2_code"].isin(nuts2_ids)]
paths = [path3, path4]
data2 = pd.DataFrame(columns=("NUTS2_code", "process", "hour", "load"))
for path in paths:
with open(path, 'r', encoding='utf-8') as csv_file:
delimiter = csv.Sniffer().sniff(csv_file.readline()).delimiter
dat = pd.read_csv(path, sep=delimiter, usecols=("NUTS2_code", "process", "hour", "load"))
data2 = data2.append(dat)
data2 = data2[data2["NUTS2_code"].isin(nuts2_ids)]
return data, data2
def ad_tertiary_profile_local(nuts2_ids):
"""
Loads residential heating profiles from csv file.
:return: Dataframe containing the data of the csv file.
:rtype: pandas dataframe.
"""
path = os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(path, "data")
path1 = os.path.join(path, "data_hotmaps_task_2.7_load_profile_tertiary_heating_yearlong_2010_part1.csv")
path2 = os.path.join(path, "data_hotmaps_task_2.7_load_profile_tertiary_heating_yearlong_2010_part2.csv")
path3 = os.path.join(path, "hotmaps_task_2.7_load_profile_tertiary_shw_yearlong_2010_part1.csv")
path4 = os.path.join(path, "hotmaps_task_2.7_load_profile_tertiary_shw_yearlong_2010_part2.csv")
paths = [path1, path2]
# determine delimiter of csv file
data = pd.DataFrame(columns=("NUTS2_code", "process", "hour", "load"))
for path in paths:
with open(path, 'r', encoding='utf-8') as csv_file:
delimiter = csv.Sniffer().sniff(csv_file.readline()).delimiter
dat = pd.read_csv(path, sep=delimiter, usecols=("NUTS2_code", "process", "hour", "load"))
data = data.append(dat)
data = data[data["NUTS2_code"].isin(nuts2_ids)]
paths = [path3, path4]
data2 = pd.DataFrame(columns=("NUTS2_code", "process", "hour", "load"))
for path in paths:
with open(path, 'r', encoding='utf-8') as csv_file:
delimiter = csv.Sniffer().sniff(csv_file.readline()).delimiter
dat = pd.read_csv(path, sep=delimiter, usecols=("NUTS2_code", "process", "hour", "load"))
data2 = data2.append(dat)
data2 = data2[data2["NUTS2_code"].isin(nuts2_ids)]
return data, data2
def ad_industrial_database_local(nuts2_ids):
"""
loads data of heat sources given by a csv file.
:return: dataframe containing the data of the csv file.
:rtype: pandas dataframe.
"""
country_to_nuts0 = {"Austria": "AT", "Belgium": "BE", "Bulgaria": "BG", "Cyprus": "CY", "Czech Republic": "CZ",
"Germany": "DE", "Denmark": "DK", "Estonia": "EE", "Finland": "FI", "France": "FR",
"Greece": "EL", "Hungary": "HU", "Croatia": "HR", "Ireland": "IE", "Italy": "IT",
"Lithuania": "LT", "Luxembourg": "LU", "Latvia": "LV", "Malta": "MT", "Netherland": "NL",
"Netherlands": "Nl",
"Poland": "PL", "Portugal": "PT", "Romania": "RO", "Spain": "ES", "Sweden": "SE",
"Slovenia": "SI", "Slovakia": "SK", "United Kingdom": "UK", "Albania": "AL", "Montenegro": "ME",
"North Macedonia": "MK", "Serbia": "RS", "Turkey": "TR", "Switzerland": "CH", "Iceland": "IS",
"Liechtenstein": "LI", "Norway": "NO"}
path = os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(os.path.join(path, "data"), "Industrial_Database.csv")
# determine delimiter of csv file
with open(path, 'r', encoding='utf-8') as csv_file:
delimiter = csv.Sniffer().sniff(csv_file.readline()).delimiter
raw_data = pd.read_csv(path, sep=delimiter, usecols=("geom", "Subsector", "Excess_Heat_100-200C",
"Excess_Heat_200-500C", "Excess_Heat_500C", "Country", "Nuts2_ID"))
raw_data = raw_data[raw_data["Nuts2_ID"].isin(nuts2_ids)]
# dataframe for processed data
data = pd.DataFrame(columns=("ellipsoid", "Lon", "Lat", "Nuts0_ID", "Subsector", "Excess_heat", "Temperature", "Nuts2_ID"))
for i, site in raw_data.iterrows():
# check if site location is available
if not pd.isna(site["geom"]):
# extract ellipsoid model and (lon, lat) from the "geom" column
ellipsoid, coordinate = site["geom"].split(";")
m = re.search("[-+]?[0-9]*\.?[0-9]+.[-+]?[0-9]*\.?[0-9]+", coordinate)
m = m.group(0)
lon, lat = m.split(" ")
lon = float(lon)
lat = float(lat)
nuts0 = country_to_nuts0[site["Country"]]
# check if heat at specific temperature range is available
# TODO deal with units; hard coded temp ranges?
if not pd.isna(site["Excess_Heat_100-200C"]) and site["Excess_Heat_100-200C"] != "" and site["Excess_Heat_100-200C"] != 0:
data.loc[data.shape[0]] = (ellipsoid, lon, lat, nuts0, site["Subsector"],
site["Excess_Heat_100-200C"] * 1000, 150, site["Nuts2_ID"])
if not pd.isna(site["Excess_Heat_200-500C"]) and site["Excess_Heat_200-500C"] != "" and site["Excess_Heat_200-500C"] != 0:
data.loc[data.shape[0]] = (ellipsoid, lon, lat, nuts0,
site["Subsector"], site["Excess_Heat_200-500C"] * 1000, 350, site["Nuts2_ID"])
if not | pd.isna(site["Excess_Heat_500C"]) | pandas.isna |
## 03. Pandas数据结构
"""
1. Series
2. DataFrame
3. 从DataFrame中查询出Series
"""
import pandas as pd
import numpy as np
"""
### 1. Series
Series是一种类似于一维数组的对象,它由一组数据(不同数据类型)以及一组与之相关的数据标签(即索引)组成。
"""
#### 1.1 仅有数据列表即可产生最简单的Series
s1 = pd.Series([1,'a',5.2,7])
print(s1.index,s1.values)
#### 1.2 创建一个具有标签索引的Series
s2 = pd.Series([1, 'a', 5.2, 7], index=['d','b','a','c'])
print(s2.index)
#### 1.3 使用Python字典创建Series
sdata={'Ohio':35000,'Texas':72000,'Oregon':16000,'Utah':5000}
s3= | pd.Series(sdata) | pandas.Series |
# -*- coding: utf-8 -*-
import unittest
import pandas as pd
import pandas.testing as tm
import numpy as np
from pandas_xyz import algorithms as algs
class TestAlgorithms(unittest.TestCase):
def test_displacement(self):
"""Test out my distance algorithm with hand calcs."""
lon = pd.Series([0.0, 0.0, 0.0])
lon_ew = pd.Series([0.0, 1.0, 2.0])
lat = pd.Series([0.0, 0.0, 0.0])
lat_ns = | pd.Series([0.0, 1.0, 2.0]) | pandas.Series |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = | pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
import pandas as pd
import Levenshtein
from Chapter05.regex import get_emails
data_file = "Chapter05/DataScientist.csv"
def find_levenshtein(input_string, df):
df['distance_to_' + input_string] = df['emails'].apply(lambda x: Levenshtein.distance(input_string, x))
return df
def find_jaro(input_string, df):
df['distance_to_' + input_string] = df['emails'].apply(lambda x: Levenshtein.jaro(input_string, x))
return df
def get_closest_email_lev(df, email):
df = find_levenshtein(email, df)
column_name = 'distance_to_' + email
minimum_value_email_index = df[column_name].idxmin()
email = df.loc[minimum_value_email_index]['emails']
return email
def get_closest_email_jaro(df, email):
df = find_jaro(email, df)
column_name = 'distance_to_' + email
maximum_value_email_index = df[column_name].idxmax()
email = df.loc[maximum_value_email_index]['emails']
return email
def main():
df = | pd.read_csv(data_file, encoding='utf-8') | pandas.read_csv |
# %% [markdown]
# This notebook is a -modified- VSCode notebook version of:
# https://www.kaggle.com/sheriytm/brewed-tpot-for-nyc-with-love-lb0-37
#
# You could find the train data from:
# https://www.kaggle.com/c/nyc-taxi-trip-duration/data
# You could find the fastest routes data from:
# https://www.kaggle.com/oscarleo/new-york-city-taxi-with-osrm
## All the data files should be in the same directory with this file!
#%%
# Importing necessary libraries
import os
import numpy as np
import os
import pandas as pd
from haversine import haversine
import datetime as dt
#%%
# Loading training data
train = pd.read_csv('train.csv')
#%%
# Long and painful future generation part
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
t0 = dt.datetime.now()
train['pickup_datetime'] = pd.to_datetime(train.pickup_datetime)
train.loc[:, 'pickup_date'] = train['pickup_datetime'].dt.date
train['dropoff_datetime'] = pd.to_datetime(train.dropoff_datetime)
train['store_and_fwd_flag'] = 1 * (train.store_and_fwd_flag.values == 'Y')
train['check_trip_duration'] = (train['dropoff_datetime'] - train['pickup_datetime']).map(lambda x: x.total_seconds())
duration_difference = train[np.abs(train['check_trip_duration'].values - train['trip_duration'].values) > 1]
print('Trip_duration and datetimes are ok.') if len(duration_difference[['pickup_datetime', 'dropoff_datetime', 'trip_duration', 'check_trip_duration']]) == 0 else print('Ooops.')
train['trip_duration'].describe()
train['log_trip_duration'] = np.log(train['trip_duration'].values + 1)
# Feature Extraction
coords = np.vstack((train[['pickup_latitude', 'pickup_longitude']].values,
train[['dropoff_latitude', 'dropoff_longitude']].values))
pca = PCA().fit(coords)
train['pickup_pca0'] = pca.transform(train[['pickup_latitude', 'pickup_longitude']])[:, 0]
train['pickup_pca1'] = pca.transform(train[['pickup_latitude', 'pickup_longitude']])[:, 1]
train['dropoff_pca0'] = pca.transform(train[['dropoff_latitude', 'dropoff_longitude']])[:, 0]
train['dropoff_pca1'] = pca.transform(train[['dropoff_latitude', 'dropoff_longitude']])[:, 1]
# Distance
def haversine_array(lat1, lng1, lat2, lng2):
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
AVG_EARTH_RADIUS = 6371 # in km
lat = lat2 - lat1
lng = lng2 - lng1
d = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lng * 0.5) ** 2
h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d))
return h
def dummy_manhattan_distance(lat1, lng1, lat2, lng2):
a = haversine_array(lat1, lng1, lat1, lng2)
b = haversine_array(lat1, lng1, lat2, lng1)
return a + b
def bearing_array(lat1, lng1, lat2, lng2):
AVG_EARTH_RADIUS = 6371 # in km
lng_delta_rad = np.radians(lng2 - lng1)
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
y = np.sin(lng_delta_rad) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lng_delta_rad)
return np.degrees(np.arctan2(y, x))
train.loc[:, 'distance_haversine'] = haversine_array(train['pickup_latitude'].values, train['pickup_longitude'].values, train['dropoff_latitude'].values, train['dropoff_longitude'].values)
train.loc[:, 'distance_dummy_manhattan'] = dummy_manhattan_distance(train['pickup_latitude'].values, train['pickup_longitude'].values, train['dropoff_latitude'].values, train['dropoff_longitude'].values)
train.loc[:, 'direction'] = bearing_array(train['pickup_latitude'].values, train['pickup_longitude'].values, train['dropoff_latitude'].values, train['dropoff_longitude'].values)
train.loc[:, 'pca_manhattan'] = np.abs(train['dropoff_pca1'] - train['pickup_pca1']) + np.abs(train['dropoff_pca0'] - train['pickup_pca0'])
train.loc[:, 'center_latitude'] = (train['pickup_latitude'].values + train['dropoff_latitude'].values) / 2
train.loc[:, 'center_longitude'] = (train['pickup_longitude'].values + train['dropoff_longitude'].values) / 2
# Datetime features
train.loc[:, 'pickup_weekday'] = train['pickup_datetime'].dt.weekday
train.loc[:, 'pickup_hour_weekofyear'] = train['pickup_datetime'].dt.weekofyear
train.loc[:, 'pickup_hour'] = train['pickup_datetime'].dt.hour
train.loc[:, 'pickup_minute'] = train['pickup_datetime'].dt.minute
train.loc[:, 'pickup_dt'] = (train['pickup_datetime'] - train['pickup_datetime'].min()).dt.total_seconds()
train.loc[:, 'pickup_week_hour'] = train['pickup_weekday'] * 24 + train['pickup_hour']
train.loc[:,'week_delta'] = train['pickup_datetime'].dt.weekday + \
((train['pickup_datetime'].dt.hour + (train['pickup_datetime'].dt.minute / 60.0)) / 24.0)
# Make time features cyclic
train.loc[:,'week_delta_sin'] = np.sin((train['week_delta'] / 7) * np.pi)**2
train.loc[:,'hour_sin'] = np.sin((train['pickup_hour'] / 24) * np.pi)**2
# Speed
train.loc[:, 'avg_speed_h'] = 1000 * train['distance_haversine'] / train['trip_duration']
train.loc[:, 'avg_speed_m'] = 1000 * train['distance_dummy_manhattan'] / train['trip_duration']
train.loc[:, 'pickup_lat_bin'] = np.round(train['pickup_latitude'], 3)
train.loc[:, 'pickup_long_bin'] = np.round(train['pickup_longitude'], 3)
# Average speed for regions
gby_cols = ['pickup_lat_bin', 'pickup_long_bin']
coord_speed = train.groupby(gby_cols).mean()[['avg_speed_h']].reset_index()
coord_count = train.groupby(gby_cols).count()[['id']].reset_index()
coord_stats = pd.merge(coord_speed, coord_count, on=gby_cols)
coord_stats = coord_stats[coord_stats['id'] > 100]
train.loc[:, 'pickup_lat_bin'] = np.round(train['pickup_latitude'], 2)
train.loc[:, 'pickup_long_bin'] = np.round(train['pickup_longitude'], 2)
train.loc[:, 'center_lat_bin'] = np.round(train['center_latitude'], 2)
train.loc[:, 'center_long_bin'] = np.round(train['center_longitude'], 2)
train.loc[:, 'pickup_dt_bin'] = (train['pickup_dt'] // (3 * 3600))
# Clustering
sample_ind = np.random.permutation(len(coords))[:500000]
kmeans = MiniBatchKMeans(n_clusters=100, batch_size=10000).fit(coords[sample_ind])
train.loc[:, 'pickup_cluster'] = kmeans.predict(train[['pickup_latitude', 'pickup_longitude']])
train.loc[:, 'dropoff_cluster'] = kmeans.predict(train[['dropoff_latitude', 'dropoff_longitude']])
t1 = dt.datetime.now()
print('Time till clustering: %i seconds' % (t1 - t0).seconds)
# Temporal and geospatial aggregation
for gby_col in ['pickup_hour', 'pickup_date', 'pickup_dt_bin',
'pickup_week_hour', 'pickup_cluster', 'dropoff_cluster']:
gby = train.groupby(gby_col).mean()[['avg_speed_h', 'avg_speed_m', 'log_trip_duration']]
gby.columns = ['%s_gby_%s' % (col, gby_col) for col in gby.columns]
train = pd.merge(train, gby, how='left', left_on=gby_col, right_index=True)
for gby_cols in [['center_lat_bin', 'center_long_bin'],
['pickup_hour', 'center_lat_bin', 'center_long_bin'],
['pickup_hour', 'pickup_cluster'], ['pickup_hour', 'dropoff_cluster'],
['pickup_cluster', 'dropoff_cluster']]:
coord_speed = train.groupby(gby_cols).mean()[['avg_speed_h']].reset_index()
coord_count = train.groupby(gby_cols).count()[['id']].reset_index()
coord_stats = pd.merge(coord_speed, coord_count, on=gby_cols)
coord_stats = coord_stats[coord_stats['id'] > 100]
coord_stats.columns = gby_cols + ['avg_speed_h_%s' % '_'.join(gby_cols), 'cnt_%s' % '_'.join(gby_cols)]
train = pd.merge(train, coord_stats, how='left', on=gby_cols)
group_freq = '60min'
df_all = train[['id', 'pickup_datetime', 'pickup_cluster', 'dropoff_cluster']]
train.loc[:, 'pickup_datetime_group'] = train['pickup_datetime'].dt.round(group_freq)
# Count trips over 60min
df_counts = df_all.set_index('pickup_datetime')[['id']].sort_index()
df_counts['count_60min'] = df_counts.isnull().rolling(group_freq).count()['id']
train = train.merge(df_counts, on='id', how='left')
# Count how many trips are going to each cluster over time
dropoff_counts = df_all \
.set_index('pickup_datetime') \
.groupby([pd.TimeGrouper(group_freq), 'dropoff_cluster']) \
.agg({'id': 'count'}) \
.reset_index().set_index('pickup_datetime') \
.groupby('dropoff_cluster').rolling('240min').mean() \
.drop('dropoff_cluster', axis=1) \
.reset_index().set_index('pickup_datetime').shift(freq='-120min').reset_index() \
.rename(columns={'pickup_datetime': 'pickup_datetime_group', 'id': 'dropoff_cluster_count'})
train['dropoff_cluster_count'] = train[['pickup_datetime_group', 'dropoff_cluster']].merge(dropoff_counts, on=['pickup_datetime_group', 'dropoff_cluster'], how='left')['dropoff_cluster_count'].fillna(0)
# Count how many trips are going from each cluster over time
df_all = train[['id', 'pickup_datetime', 'pickup_cluster', 'dropoff_cluster']]
pickup_counts = df_all \
.set_index('pickup_datetime') \
.groupby([pd.TimeGrouper(group_freq), 'pickup_cluster']) \
.agg({'id': 'count'}) \
.reset_index().set_index('pickup_datetime') \
.groupby('pickup_cluster').rolling('240min').mean() \
.drop('pickup_cluster', axis=1) \
.reset_index().set_index('pickup_datetime').shift(freq='-120min').reset_index() \
.rename(columns={'pickup_datetime': 'pickup_datetime_group', 'id': 'pickup_cluster_count'})
train['pickup_cluster_count'] = train[['pickup_datetime_group', 'pickup_cluster']].merge(pickup_counts, on=['pickup_datetime_group', 'pickup_cluster'], how='left')['pickup_cluster_count'].fillna(0)
# For this particular problem we can add OSRM ([Open Source Routing Machine](http://project-osrm.org/
# "OSRM")) features. This data contains the fastest routes from specific starting points in NY.
fr1 = pd.read_csv('fastest_routes_train_part_1.csv', usecols=['id', 'total_distance', 'total_travel_time', 'number_of_steps'])
fr2 = | pd.read_csv('fastest_routes_train_part_2.csv', usecols=['id', 'total_distance', 'total_travel_time', 'number_of_steps']) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable
"""
tests.test_validator
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2018 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
import numpy as np
from pandas import Series, date_range, to_datetime
from dfmapper import (
DateRangeValidator,
DtypeValidator,
MinValueValidator,
MaxValueValidator,
MaxLengthValidator,
NullableValidator
)
def test_date_range_validator():
series_1 = Series(['2018-01-15 12:00:00', '2018-01-15 13:00:00'])
series_1 = to_datetime(series_1)
date_range_validator_1 = DateRangeValidator(date_range(start='2018-01-01', end='2018-01-30'))
assert date_range_validator_1(series_1) == True
series_2 = Series(['2018-01-15 12:00:00', '2018-01-15 13:00:00'])
series_2 = to_datetime(series_2)
date_range_validator_2 = DateRangeValidator(date_range(start='2018-01-20', end='2018-01-30'))
assert date_range_validator_2(series_2) == False
def test_type_validator():
series_1 = Series([1, 2, 3, 4, 5], dtype='int')
dtype_validator_1 = DtypeValidator(dtype=np.int64)
assert dtype_validator_1(series_1) == True
series_2 = Series([1, 2, 3, 4, None])
dtype_validator_2 = DtypeValidator(dtype=np.float64)
assert dtype_validator_2(series_2) == True
series_3 = Series([1.0, 2.0, 3.0])
dtype_validator_3 = DtypeValidator(dtype=np.float64)
assert dtype_validator_3(series_3) == True
series_4 = Series([1.0, 2.0, None])
dtype_validator_4 = DtypeValidator(dtype=np.float64)
assert dtype_validator_4(series_4) == True
series_5 = Series([True, False])
dtype_validator_5 = DtypeValidator(dtype=np.bool)
assert dtype_validator_5(series_5) == True
series_6 = | Series([True, False, None]) | pandas.Series |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import json
import import_db_assessment
def createTransformersVariable(transformerRule):
# Convert the JSON fields into variables like dictionaries, lists, string and numbers and return it
if str(transformerRule['action_details']['datatype']).upper() == 'DICTIONARY':
# For dictionaries
return json.loads(str(transformerRule['action_details']['value']).strip())
elif str(transformerRule['action_details']['datatype']).upper() == 'LIST':
# For Lists it is expected to be separated by comma
return str(transformerRule['action_details']['value']).split(',')
elif str(transformerRule['action_details']['datatype']).upper() == 'STRING':
# For strings we just need to carry out the content
return str(transformerRule['action_details']['value'])
elif str(transformerRule['action_details']['datatype']).upper() == 'NUMBER':
# For number we are casting it to float
return float(transformerRule['action_details']['value'])
else:
# If the JSON file has any value not expected
return None
def runRules(transformerRules, dataFrames, singleRule, args, collectionKey, transformersTablesSchema, fileList, rulesAlreadyExecuted, transformersParameters):
# Variable to keep track of rules executed and its results and status
transformerResults = {}
# Variable to keep track and make available all the variables from the JSON file
transformersRulesVariables = {}
# Standardize Statuses
# Executed
EXECUTEDSTATUS = 'EXECUTED'
FAILEDSTATUS = 'FAILED'
if singleRule:
# If parameter is set then we will run only 1 rule
sorted_keys = []
sorted_keys.append(singleRule)
else:
# Getting ordered list of keys by priority to iterate over the dictionary
sorted_keys = sorted(transformerRules, key=lambda x: (transformerRules[x]['priority']))
# Looping on ALL rules from transformers.json
for ruleItem in sorted_keys:
stringExpression = getParsedRuleExpr(transformerRules[ruleItem]['expr1'])
iferrorExpression = getParsedRuleExpr(transformerRules[ruleItem]['iferror'])
if str(transformerRules[ruleItem]['status']).upper() == "ENABLED":
if ruleItem not in rulesAlreadyExecuted:
print('Processing rule item: "{}"\nPriority: "{}"'.format(ruleItem,transformerRules[ruleItem]['priority']))
if str(transformerRules[ruleItem]['type']).upper() == "VARIABLE" and str(transformerRules[ruleItem]['action']).upper() == "CREATE":
# transformers.json asking to create a variable which is a dictionary
try:
transformerResults[ruleItem] = {'Status': EXECUTEDSTATUS, 'Result Value': createTransformersVariable(transformerRules[ruleItem])}
transformersRulesVariables[transformerRules[ruleItem]['action_details']['varname']] = transformerResults[ruleItem]['Result Value']
except:
# In case of any issue the rule will be marked as FAILEDSTATUS
transformerResults[ruleItem] = {'Status': FAILEDSTATUS, 'Result Value': None}
transformersRulesVariables[transformerRules[ruleItem]['action_details']['varname']] = None
elif str(transformerRules[ruleItem]['type']).upper() in ("NUMBER","FREESTYLE") and str(transformerRules[ruleItem]['action']).upper() == "ADD_OR_UPDATE_COLUMN":
# transformers.json asking to add a column that is type number meaning it can be a calculation and the column to be added is NUMBER too
# Where the result of expr1 will be saved initially
dfTargetName = transformerRules[ruleItem]['action_details']['dataframe_name']
columnTargetName = transformerRules[ruleItem]['action_details']['column_name']
ruleCondition = True
try:
ruleConditionString = str(transformerRules[ruleItem]['ifcondition1'])
except KeyError:
ruleConditionString = None
# In case ifcondition1 (transformers.json) is set for the rule
if ruleConditionString is not None and ruleConditionString != "":
try:
ruleCondition = eval (ruleConditionString)
print ('ruleCondition = {}'.format(ruleCondition))
except:
print ('\n Error processing ifcondition1 "{}" for rule "{}". So, this rule will be skipped.\n'.format(ruleConditionString,ruleItem))
continue
if not ruleCondition:
print ('WARNING: This rule "{}" will be skipped because of "ifcondition1" from transformers.json is FALSE.'.format(ruleItem))
continue
try:
dataFrames[str(dfTargetName).upper()][str(columnTargetName).upper()] = execStringExpression(stringExpression,iferrorExpression, dataFrames)
df = dataFrames[str(dfTargetName).upper()]
except KeyError:
print ('\n WARNING: The rule "{}" could not be executed because the variable "{}" used in the transformers.json could not be found.\n'.format(ruleItem, str(dfTargetName).upper()))
continue
newTableName = str(transformerRules[ruleItem]['action_details']['target_dataframe_name']).lower()
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + newTableName + '__' + collectionKey
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformerRules[ruleItem]['action_details'], args, fileName, transformersTablesSchema, newTableName, False)
# Creating the new dataframe
dataFrames[str(newTableName).upper()] = df
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
elif str(transformerRules[ruleItem]['type']).upper() == "FREESTYLE" and str(transformerRules[ruleItem]['action']).upper() == "CREATE_OR_REPLACE_DATAFRAME":
#
df = execStringExpression(stringExpression,iferrorExpression,dataFrames)
if df is None:
print('\n WARNING: The rule "{}" could not be executed because the expression "{}" used in the transformers.json could not be executed.\n'.format(ruleItem,stringExpression))
continue
newTableName = str(transformerRules[ruleItem]['action_details']['dataframe_name']).lower()
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + newTableName + '__' + collectionKey
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformerRules[ruleItem]['action_details'], args, fileName, transformersTablesSchema, newTableName, False)
# Creating the new dataframe
dataFrames[str(transformerRules[ruleItem]['action_details']['dataframe_name']).upper()] = df
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
elif str(transformerRules[ruleItem]['type']).upper() == "FREESTYLE" and str(transformerRules[ruleItem]['action']).upper() == "FREESTYLE":
try:
eval (stringExpression)
except KeyError:
print ('\n WARNING: The rule "{}" could not be executed because the expr1 "{}" used in the transformers.json could not be executed.\n'.format(ruleItem, stringExpression))
continue
newTableName = str(transformerRules[ruleItem]['action_details']['target_dataframe_name']).lower()
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + newTableName + '__' + collectionKey
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformerRules[ruleItem]['action_details'], args, fileName, transformersTablesSchema, newTableName, False)
# Creating the new dataframe
dataFrames[str(newTableName).upper()] = df
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
return transformerResults, transformersRulesVariables, fileList, dataFrames
def execStringExpression(stringExpression,iferrorExpression, dataFrames):
try:
res = eval (stringExpression)
except:
try:
res = eval (iferrorExpression)
except:
res = None
return res
def getParsedRuleExpr(ruleExpr):
# Function to get a clean string to be executed in eval function. The input is a string with many components separated by ; coming from transformers.json
ruleComponents = []
ruleComponents = str(ruleExpr).split(';')
finalExpression = ''
for ruleItem in ruleComponents:
ruleItem = ruleItem.strip()
finalExpression = str(finalExpression) + str(ruleItem) + ' '
return finalExpression
def getRulesFromJSON(jsonFileName):
# Read JSON file from the OS and turn it into a hash table
with open(jsonFileName) as f:
transformerRules = json.load(f)
return transformerRules
def getDataFrameFromCSV(csvFileName,tableName,skipRows,separatorString,transformersTablesSchema):
# Read CSV files from OS and turn it into a dataframe
paramCleanDFHeaders = False
paramGetHeadersFromConfig = True
try:
if paramGetHeadersFromConfig:
if transformersTablesSchema.get(tableName):
try:
tableHeaders = getDFHeadersFromTransformers(tableName,transformersTablesSchema)
tableHeaders = [header.upper() for header in tableHeaders]
df = pd.read_csv(csvFileName, skiprows=skipRows+1, header=None, names=tableHeaders)
except Exception as dataframeHeaderErr:
print ('\nThe filename {} for the table {} could not be imported using the column names {}.\n'.format(csvFileName,tableName,tableHeaders))
paramCleanDFHeaders = True
df = pd.read_csv(csvFileName, skiprows=skipRows)
else:
df = | pd.read_csv(csvFileName, skiprows=skipRows) | pandas.read_csv |
"""
Plot the kinetic reactions of biomass pyrolysis for the Ranzi 2014 kinetic
scheme for biomass pyrolysis.
Reference:
<NAME>, 2014. Chemical Engineering Science, 110, pp 2-12.
"""
import numpy as np
import pandas as pd
# Parameters
# ------------------------------------------------------------------------------
# T = 773 # temperature for rate constants, K
# weight percent (%) cellulose, hemicellulose, lignin for beech wood
# wtcell = 48
# wthemi = 28
# wtlig = 24
# dt = 0.001 # time step, delta t
# tmax = 4 # max time, s
# t = np.linspace(0, tmax, num=int(tmax/dt)) # time vector
# nt = len(t) # total number of time steps
# Functions for Ranzi 2014 Kinetic Scheme
# ------------------------------------------------------------------------------
def ranzicell(wood, wt, T, dt, nt):
"""
Cellulose reactions CELL from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood concentration, kg/m^3
wt = weight percent wood as cellulose, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main = mass concentration of main group, (-)
prod = mass concentration of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
cell = pw*(wt/100) # initial cellulose conc. in wood
g1 = np.zeros(nt) # G1
cella = np.zeros(nt) # CELLA
lvg = np.zeros(nt) # LVG
g4 = np.zeros(nt) # G4
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 4e7 * np.exp(-31000 / (R * T)) # CELL -> G1
K2 = 4e13 * np.exp(-45000 / (R * T)) # CELL -> CELLA
K3 = 1.8 * T * np.exp(-10000 / (R * T)) # CELLA -> LVG
K4 = 0.5e9 * np.exp(-29000 / (R * T)) # CELLA -> G4
# sum of moles in each group, mol
sumg1 = 11 # sum of G1
sumg4 = 4.08 # sum of G4
# calculate concentrations for main groups, kg/m^3
for i in range(1, nt):
r1 = K1 * cell[i-1] # CELL -> G1
r2 = K2 * cell[i-1] # CELL -> CELLA
r3 = K3 * cella[i-1] # CELLA -> LVG
r4 = K4 * cella[i-1] # CELLA -> G4
cell[i] = cell[i-1] - (r1+r2)*dt # CELL
g1[i] = g1[i-1] + r1*dt # G1
cella[i] = cella[i-1] + r2*dt - (r3+r4)*dt # CELLA
lvg[i] = lvg[i-1] + r3*dt # LVG
g4[i] = g4[i-1] + r4*dt # G4
# store main groups in array
main = np.array([cell, g1, cella, lvg, g4])
# total group concentration per total moles in that group, (kg/m^3) / mol
fg1 = g1/sumg1 # fraction of G1
fg4 = g4/sumg4 # fraction of G4
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = 0.16*fg4 # CO
prod[1] = 0.21*fg4 # CO2
prod[2] = 0.4*fg4 # CH2O
prod[3] = 0.02*fg4 # HCOOH
prod[5] = 0.1*fg4 # CH4
prod[6] = 0.2*fg4 # Glyox
prod[8] = 0.1*fg4 # C2H4O
prod[9] = 0.8*fg4 # HAA
prod[11] = 0.3*fg4 # C3H6O
prod[14] = 0.25*fg4 # HMFU
prod[15] = lvg # LVG
prod[18] = 0.1*fg4 # H2
prod[19] = 5*fg1 + 0.83*fg4 # H2O
prod[20] = 6*fg1 + 0.61*fg4 # Char
# return arrays of main groups and products as mass fraction, (-)
return main/wood, prod/wood
def ranzihemi(wood, wt, T, dt, nt):
"""
Hemicellulose reactions HCE from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood density, kg/m^3
wt = weight percent of hemicellulose, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main/wood = mass fraction of main group, (-)
prod/wood = mass fraction of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
hce = pw*(wt/100) # initial hemicellulose conc. in wood
g1 = np.zeros(nt) # G1
g2 = np.zeros(nt) # G2
g3 = np.zeros(nt) # G3
g4 = np.zeros(nt) # G4
xyl = np.zeros(nt) # Xylan
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 0.33e10 * np.exp(-31000 / (R * T)) # HCE -> G1
K2 = 0.33e10 * np.exp(-33000 / (R * T)) # HCE2 -> G2
K3 = 0.05 * T * np.exp(-8000 / (R * T)) # HCE1 -> G3
K4 = 1e9 * np.exp(-32000 / (R * T)) # HCE1 -> G4
K5 = 0.9 * T * np.exp(-11000 / (R * T)) # HCE1 -> Xylan
# sum of moles in each group, mol
sumg2 = 4.625 # sum of G2
sumg3 = 4.875 # sum of G3
sumg4 = 4.775 # sum of G4
# calculate concentrations for main groups, kg/m^3
# where HCE1 as 0.4*g1/(0.4+0.6) and HCE2 as 0.6*g1/(0.4+0.6)
for i in range(1, nt):
r1 = K1 * hce[i-1] # HCE -> G1
r2 = K2 * 0.6*g1[i-1] # HCE2 -> G2
r3 = K3 * 0.4*g1[i-1] # HCE1 -> G3
r4 = K4 * 0.4*g1[i-1] # HCE1 -> G4
r5 = K5 * 0.4*g1[i-1] # HCE1 -> Xylan
hce[i] = hce[i-1] - r1*dt # HCE
g1[i] = g1[i-1] + r1*dt - (r2+r3+r4+r5)*dt # G1
g2[i] = g2[i-1] + r2*dt # G2
g3[i] = g3[i-1] + r3*dt # G3
g4[i] = g4[i-1] + r4*dt # G4
xyl[i] = xyl[i-1] + r5*dt # Xylan
# store main groups in array
main = np.array([hce, g1, g2, g3, g4, xyl])
# total group concentration per total moles in that group, (kg/m^3)/mol
fg2 = g2/sumg2 # fraction of G2
fg3 = g3/sumg3 # fraction of G3
fg4 = g4/sumg4 # fraction of G4
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = 0.175*fg2 + (0.3 + 0.15)*fg3 + 0.5*fg4 # CO
prod[1] = (0.275+0.4)*fg2 + (0.5+0.25)*fg3 + (0.5+0.275)*fg4 # CO2
prod[2] = (0.5+0.925)*fg2 + 1.7*fg3 + (0.8+0.4)*fg4 # CH2O
prod[3] = 0.025*fg2 + 0.05*fg3 + 0.025*fg4 # HCOOH
prod[4] = 0.3*fg2 + (0.1+0.45)*fg4 # CH3OH
prod[5] = 0.25*fg2 + 0.625*fg3 + 0.325*fg4 # CH4
prod[7] = 0.275*fg2 + 0.375*fg3 + 0.25*fg4 # C2H4
prod[9] = 0.2*fg2 # HAA
prod[10] = 0.1*fg2 + 0.125*fg4 # C2H5OH
prod[12] = xyl # Xylan
prod[18] = 0.125*fg4 # H2
prod[19] = 0.2*fg2 + 0.25*fg3 + 0.025*fg4 # H2O
prod[20] = 1*fg2 + 0.675*fg3 + 0.875*fg4 # Char
# return arrays of main groups and products as mass fraction, (-)
return main/wood, prod/wood
def ranziligc(wood, wt, T, dt, nt):
"""
Lignin carbon rich reactions LIG-C from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood density, kg/m^3
wt = weight percent of lignin-c, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main/wood = mass fraction of main group, (-)
prod/wood = mass fraction of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
ligc = pw*(wt/100/3) # initial lignin in wood, assume 1/3 of total lignin
g1 = np.zeros(nt)
g2 = np.zeros(nt)
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 1.33e15 * np.exp(-48500 / (R * T)) # LIG-C -> G1
K2 = 1.6e6 * np.exp(-31500 / (R * T)) # LIG-CC -> G2
# sum of moles in each group, mol
sumg1 = 9.49 # sum of G1
sumg2 = 11.35 # sum of G2
# calculate concentrations for main groups, kg/m^3
for i in range(1, nt):
r1 = K1 * ligc[i-1] # LIG-C -> G1
r2 = K2 * 0.35*g1[i-1]/sumg1 # LIG-CC -> G2
ligc[i] = ligc[i-1] - r1*dt # LIG-C
g1[i] = g1[i-1] + r1*dt - r2*dt # G1
g2[i] = g2[i-1] + r2*dt # G2
# store main groups in array
main = np.array([ligc, g1, g2])
# total group concentration per total moles in that group, (kg/m^3)/mol
fg1 = g1/sumg1 # fraction of G1
fg2 = g2/sumg2 # fraction of G2
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = 0.32*fg1 + (0.4 + 0.4)*fg2 # CO
prod[2] = (0.3 + 0.7)*fg1 + 1*fg2 # CH2O
prod[5] = 0.495*fg1 + 0.65*fg2 # CH4
prod[7] = 0.41*fg1 + 0.6*fg2 # C2H4
prod[9] = 0.35*fg2 # HAA
prod[13] = 0.08*fg1 + 0.2*fg2 # Phenol
prod[16] = 0.1*fg1 + 0.3*fg2 # Coumaryl
prod[19] = 1*fg1 + 0.7*fg2 # H2O
prod[20] = 5.735*fg1 + 6.75*fg2 # Char
# return arrays of main groups and products as mass fractions, (-)
return main/wood, prod/wood
def ranziligh(wood, wt, T, dt, nt):
"""
Lignin hydrogen rich reactions LIG-H from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood density, kg/m^3
wt = weight percent of lignin-h, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main/wood = mass fraction of main group, (-)
prod/wood = mass fraction of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
ligh = pw*(wt/100/3) # initial lignin in wood, assume 1/3 of total lignin
g1 = np.zeros(nt) # G1
g2 = np.zeros(nt) # G2
g3 = np.zeros(nt) # G3
g4 = np.zeros(nt) # G4
g5 = np.zeros(nt) # G4
fe2macr = np.zeros(nt) # FE2MACR
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 0.67e13 * np.exp(-37500 / (R * T)) # LIG-H -> G1
K2 = 33 * np.exp(-15000 / (R * T)) # LIG-OH -> G2
K3 = 0.5e8 * np.exp(-30000 / (R * T)) # LIG-OH -> LIG
K4 = 0.083 * T * np.exp(-8000 / (R * T)) # LIG -> G4
K5 = 0.4e9 * np.exp(-30000 / (R * T)) # LIG -> G5
K6 = 2.4 * T * np.exp(-12000 / (R * T)) # LIG -> FE2MACR
# sum of moles in each group, mol
sumg1 = 2 # sum of G1
sumg2 = 20.7 # sum of G2
sumg3 = 9.85 # sum of G3
sumg4 = 11.1 # sum of G4
sumg5 = 10.7 # sum of G5
# calculate concentrations for main groups, kg/m^3
for i in range(1, nt):
r1 = K1 * ligh[i-1] # LIG-H -> G1
r2 = K2 * 1*g1[i-1]/sumg1 # LIG-OH -> G2
r3 = K3 * 1*g1[i-1]/sumg1 # LIG-OH -> LIG
r4 = K4 * 1*g3[i-1]/sumg3 # LIG -> G4
r5 = K5 * 1*g3[i-1]/sumg3 # LIG -> G5
r6 = K6 * 1*g3[i-1]/sumg3 # LIG -> FE2MACR
ligh[i] = ligh[i-1] - r1*dt # LIG-H
g1[i] = g1[i-1] + r1*dt - (r2+r3)*dt # G1
g2[i] = g2[i-1] + r2*dt # G2
g3[i] = g3[i-1] + r3*dt - (r4+r5+r6)*dt # G3
g4[i] = g4[i-1] + r4*dt # G4
g5[i] = g5[i-1] + r5*dt # G5
fe2macr[i] = fe2macr[i-1] + r6*dt # FE2MACR
# store main groups in array
main = np.array([ligh, g1, g2, g3, g4, g5, fe2macr])
# total group concentration per total moles in that group, (kg/m^3)/mol
fg1 = g1/sumg1 # fraction of G1
fg2 = g2/sumg2 # fraction of G2
fg3 = g3/sumg3 # fraction of G3
fg4 = g4/sumg4 # fraction of G4
fg5 = g5/sumg5 # fraction of G5
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = (0.5 + 1.6)*fg2 + (0.3 + 1)*fg3 + (0.4 + 0.2)*fg4 + (1 + 0.45)*fg5 # CO
prod[1] = 0.05*fg3 # CO2
prod[2] = 3.9*fg2 + 0.6*fg3 + (2 + 0.4)*fg4 + (0.2 + 0.5)*fg5 # CH2O
prod[3] = 0.05*fg3 + 0.05*fg5 # HCOOH
prod[4] = 0.5*fg2 + (0.5 + 0.5)*fg3 + 0.4*fg4 + 0.4*fg5 # CH3OH
prod[5] = (0.1 + 1.65)*fg2 + (0.1 + 0.35)*fg3 + (0.2 + 0.4)*fg4 + (0.2 + 0.4)*fg5 # CH4
prod[6] = 0 # Glyox
prod[7] = 0.3*fg2 + 0.2*fg3 + 0.5*fg4 + 0.65*fg5 # C2H4
prod[8] = 0.2*fg5 # C2H4O
prod[9] = 0 # HAA
prod[10] = 0 # C2H5OH
prod[11] = 1*fg1 + 0.2*fg5 # C3H6O
prod[12] = 0 # Xylan
prod[13] = 0 # Phenol
prod[14] = 0 # HMFU
prod[15] = 0 # LVG
prod[16] = 0 # Coumaryl
prod[17] = fe2macr # FE2MACR
prod[18] = 0.5*fg2 + 0.15*fg3 # H2
prod[19] = 1.5*fg2 + 0.9*fg3 + 0.6*fg4 + 0.95*fg5 # H2O
prod[20] = 10.15*fg2 + 4.15*fg3 + 6*fg4 + 5.5*fg5 # Char
# return arrays of main groups and products as mass fractions, (-)
return main/wood, prod/wood
def ranziligo(wood, wt, T, dt, nt):
"""
Lignin oxygen rich reactions LIG-O from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood density, kg/m^3
wt = weight percent of lignin-h, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main/wood = mass fraction of main group, (-)
prod/wood = mass fraction of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
ligo = pw*(wt/100/3) # initial lignin in wood, assume 1/3 of total lignin
g1 = np.zeros(nt) # G1
g2 = np.zeros(nt) # G2
g3 = np.zeros(nt) # G3
g4 = np.zeros(nt) # G4
g5 = np.zeros(nt) # G4
fe2macr = np.zeros(nt) # FE2MACR
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 0.33e9 * np.exp(-25500 / (R * T)) # LIG-O -> G1
K2 = 33 * np.exp(-15000 / (R * T)) # LIG-OH -> G2
K3 = 0.5e8 * np.exp(-30000 / (R * T)) # LIG-OH -> LIG
K4 = 0.083 * T * np.exp(-8000 / (R * T)) # LIG -> G4
K5 = 0.4e9 * np.exp(-30000 / (R * T)) # LIG -> G5
K6 = 2.4 * T * np.exp(-12000 / (R * T)) # LIG -> FE2MACR
# sum of moles in each group, mol
sumg1 = 2 # sum of G1
sumg2 = 20.7 # sum of G2
sumg3 = 9.85 # sum of G3
sumg4 = 11.1 # sum of G4
sumg5 = 10.7 # sum of G5
# calculate concentrations for main groups, kg/m^3
for i in range(1, nt):
r1 = K1 * ligo[i-1] # LIG-O -> G1
r2 = K2 * 1*g1[i-1]/sumg1 # LIG-OH -> G2
r3 = K3 * 1*g1[i-1]/sumg1 # LIG-OH -> LIG
r4 = K4 * 1*g3[i-1]/sumg3 # LIG -> G4
r5 = K5 * 1*g3[i-1]/sumg3 # LIG -> G5
r6 = K6 * 1*g3[i-1]/sumg3 # LIG -> FE2MACR
ligo[i] = ligo[i-1] - r1*dt # LIG-H
g1[i] = g1[i-1] + r1*dt - (r2+r3)*dt # G1
g2[i] = g2[i-1] + r2*dt # G2
g3[i] = g3[i-1] + r3*dt - (r4+r5+r6)*dt # G3
g4[i] = g4[i-1] + r4*dt # G4
g5[i] = g5[i-1] + r5*dt # G5
fe2macr[i] = fe2macr[i-1] + r6*dt # FE2MACR
# store main groups in array
main = np.array([ligo, g1, g2, g3, g4, g5, fe2macr])
# total group concentration per total moles in that group, (kg/m^3)/mol
fg1 = g1/sumg1 # fraction of G1
fg2 = g2/sumg2 # fraction of G2
fg3 = g3/sumg3 # fraction of G3
fg4 = g4/sumg4 # fraction of G4
fg5 = g5/sumg5 # fraction of G5
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = (0.5 + 1.6)*fg2 + (0.3 + 1)*fg3 + (0.4 + 0.2)*fg4 + (1 + 0.45)*fg5 # CO
prod[1] = 1*fg1 + 0.05*fg3 # CO2
prod[2] = 3.9*fg2 + 0.6*fg3 + (2 + 0.4)*fg4 + (0.2 + 0.5)*fg5 # CH2O
prod[3] = 0.05*fg3 + 0.05*fg5 # HCOOH
prod[4] = 0.5*fg2 + (0.5 + 0.5)*fg3 + 0.4*fg4 + 0.4*fg5 # CH3OH
prod[5] = (0.1 + 1.65)*fg2 + (0.1 + 0.35)*fg3 + (0.2 + 0.4)*fg4 + (0.2 + 0.4)*fg5 # CH4
prod[6] = 0 # Glyox
prod[7] = 0.3*fg2 + 0.2*fg3 + 0.5*fg4 + 0.65*fg5 # C2H4
prod[8] = 0.2*fg5 # C2H4O
prod[9] = 0 # HAA
prod[10] = 0 # C2H5OH
prod[11] = 0.2*fg5 # C3H6O
prod[12] = 0 # Xylan
prod[13] = 0 # Phenol
prod[14] = 0 # HMFU
prod[15] = 0 # LVG
prod[16] = 0 # Coumaryl
prod[17] = fe2macr # FE2MACR
prod[18] = 0.5*fg2 + 0.15*fg3 # H2
prod[19] = 1.5*fg2 + 0.9*fg3 + 0.6*fg4 + 0.95*fg5 # H2O
prod[20] = 10.15*fg2 + 4.15*fg3 + 6*fg4 + 5.5*fg5 # Char
# return arrays of main groups and products as mass fractions, (-)
return main/wood, prod/wood
# Products from Kinetic Scheme
# ------------------------------------------------------------------------------
def run_ranzi_2014(wtcell, wthemi, wtlig, temp, tmax):
step = 0.001 # time step, delta t
# tmax = 4 # max time, s
t = np.linspace(0, tmax, num=int(tmax/step)) # time vector
tot_step = len(t) # total number of time steps
# arrays for Ranzi main groups and products as mass fractions, (-)
pmcell, pcell = ranzicell(1, wt=wtcell, T=temp, dt=step, nt=tot_step) # cellulose
pmhemi, phemi = ranzihemi(1, wt=wthemi, T=temp, dt=step, nt=tot_step) # hemicellulose
pmligc, pligc = ranziligc(1, wt=wtlig, T=temp, dt=step, nt=tot_step) # lignin-c
pmligh, pligh = ranziligh(1, wt=wtlig, T=temp, dt=step, nt=tot_step) # lignin-h
pmligo, pligo = ranziligo(1, wt=wtlig, T=temp, dt=step, nt=tot_step) # lignin-o
# main cellulose groups as mass fraction, (-)
cell = pmcell[0]
g1cell = pmcell[1]
cella = pmcell[2]
lvg = pmcell[3]
g4cell = pmcell[4]
tcell = cell + g1cell + cella + lvg + g4cell # total cellulose
cell_main = {'Time (s)': t, 'cell': cell, 'g1cell': g1cell, 'cella': cella, 'lvg': lvg, 'g4cell': g4cell, 'tcell': tcell}
df_cell=pd.DataFrame(data=cell_main).set_index('Time (s)')
# main hemicellulose groups as mass fraction, (-)
hemi = pmhemi[0]
g1hemi = pmhemi[1]
g2hemi = pmhemi[2]
g3hemi = pmhemi[3]
g4hemi = pmhemi[4]
xyl = pmhemi[5]
themi = hemi + g1hemi + g2hemi + g3hemi + g4hemi + xyl # total hemicellulose
hemi_main = {'Time (s)': t, 'hemi': hemi, 'g1hemi': g1hemi, 'g2hemi': g2hemi, 'g3hemi': g3hemi, 'g4hemi': g4hemi, 'xyl': xyl, 'themi': themi}
df_hemi=pd.DataFrame(data=hemi_main).set_index('Time (s)')
# main lignin-c groups as mass fraction, (-)
ligc = pmligc[0]
g1ligc = pmligc[1]
g2ligc = pmligc[2]
tligc = ligc + g1ligc + g2ligc # total lignin-c
ligc_main = {'Time (s)': t, 'ligc': ligc, 'g1ligc': g1ligc, 'g2ligc': g2ligc, 'tligc': tligc}
df_ligc=pd.DataFrame(data=ligc_main).set_index('Time (s)')
# main lignin-h groups as mass fraction, (-)
ligh = pmligh[0]
g1ligh = pmligh[1]
g2ligh = pmligh[2]
g3ligh = pmligh[3]
g4ligh = pmligh[4]
g5ligh = pmligh[5]
fe2macr1 = pmligh[6]
tligh = ligh + g1ligh + g2ligh + g3ligh + g4ligh + g5ligh + fe2macr1 # lignin-h
ligh_main = {'Time (s)': t, 'ligh': ligh, 'g1ligh': g1ligh, 'g2ligh': g2ligh, 'g3ligh': g3ligh, 'g4ligh': g4ligh, 'g5ligh':
g5ligh, 'fe2marc1': fe2macr1, 'tligh': tligh}
df_ligh = pd.DataFrame(data=ligh_main).set_index('Time (s)')
# main lignin-o groups as mass fraction, (-)
ligo = pmligo[0]
g1ligo = pmligo[1]
g2ligo = pmligo[2]
g3ligo = pmligo[3]
g4ligo = pmligo[4]
g5ligo = pmligo[5]
fe2macr2 = pmligo[6]
tligo = ligo + g1ligo + g2ligo + g3ligo + g4ligo + g5ligo + fe2macr2 # lignin-o
ligo_main = {'Time (s)': t, 'ligo': ligo, 'g1ligo': g1ligo, 'g2ligo': g2ligo , 'g3ligo': g3ligo , 'g4ligo': g4ligo , 'g5ligo': g5ligo , 'fe2macr2': fe2macr2, 'tligo': tligo}
df_ligo = pd.DataFrame(data=ligo_main).set_index('Time (s)')
# Gas, Tar, Char from Cellulose, Hemicellulose, Lignin Reactions
# ------------------------------------------------------------------------------
# chemical species as mass fraction, (-)
co = pcell[0] + phemi[0] + pligc[0] + pligh[0] + pligo[0] # CO
co2 = pcell[1] + phemi[1] + pligc[1] + pligh[1] + pligo[1] # CO2
ch2o = pcell[2] + phemi[2] + pligc[2] + pligh[2] + pligo[2] # CH2O
hcooh = pcell[3] + phemi[3] + pligc[3] + pligh[3] + pligo[3] # HCOOH
ch3oh = pcell[4] + phemi[4] + pligc[4] + pligh[4] + pligo[4] # CH3OH
ch4 = pcell[5] + phemi[5] + pligc[5] + pligh[5] + pligo[5] # CH4
glyox = pcell[6] + phemi[6] + pligc[6] + pligh[6] + pligo[6] # Glyox (C2H2O2)
c2h4 = pcell[7] + phemi[7] + pligc[7] + pligh[7] + pligo[7] # C2H4
c2h4o = pcell[8] + phemi[8] + pligc[8] + pligh[8] + pligo[8] # C2H4O
haa = pcell[9] + phemi[9] + pligc[9] + pligh[9] + pligo[9] # HAA (C2H4O2)
c2h5oh = pcell[10] + phemi[10] + pligc[10] + pligh[10] + pligo[10] # C2H5OH
c3h6o = pcell[11] + phemi[11] + pligc[11] + pligh[11] + pligo[11] # C3H6O
xyl = pcell[12] + phemi[12] + pligc[12] + pligh[12] + pligo[12] # Xylose (C5H10O5)
c6h6o = pcell[13] + phemi[13] + pligc[13] + pligh[13] + pligo[13] # C6H6O
hmfu = pcell[14] + phemi[14] + pligc[14] + pligh[14] + pligo[14] # HMFU (C6H6O3)
lvg = pcell[15] + phemi[15] + pligc[15] + pligh[15] + pligo[15] # LVG (C6H10O2)
coum = pcell[16] + phemi[16] + pligc[16] + pligh[16] + pligo[16] # p-Coumaryl (C9H10O2)
fe2macr = pcell[17] + phemi[17] + pligc[17] + pligh[17] + pligo[17] # FE2MACR (C11H12O4)
h2 = pcell[18] + phemi[18] + pligc[18] + pligh[18] + pligo[18] # H2
h2o = pcell[19] + phemi[19] + pligc[19] + pligh[19] + pligo[19] # H2O
char = pcell[20] + phemi[20] + pligc[20] + pligh[20] + pligo[20] # Char
# groups for gas and tar as mass fraction, (-)
gas = co + co2 + ch4 + c2h4 + h2
tar = ch2o + hcooh + ch3oh + glyox + c2h4o + haa + c2h5oh + c3h6o + xyl + c6h6o + hmfu + lvg + coum + fe2macr
gas_products = {'Time (s)': t, 'co': co, 'co2': co2, 'ch4': ch4 , 'c2h4': c2h4, 'h2': h2, 'total': gas}
df_gasprod = pd.DataFrame(data=gas_products).set_index('Time (s)')
tar_products = {'Time (s)': t, 'ch2o': ch2o , 'hcooh': hcooh , 'ch3oh': ch3oh , 'glyox': glyox , 'c2h4o': c2h4o , 'haa': haa , 'c2h5oh': c2h5oh , 'c3h6o': c3h6o , 'xyl': xyl , 'c6h6o': c6h6o , 'hmfu': hmfu , 'lvg': lvg , 'coum': coum , 'fe2macr': fe2macr, 'total': tar}
df_tarprod = | pd.DataFrame(data=tar_products) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 15 07:21:25 2020
@author: Andrei
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 14:04:52 2020
@author: Andrei
"""
import pandas as pd
from libraries import Logger
from libraries import LummetryObject
import mysql.connector as mysql
from time import time
import matplotlib.pyplot as plt
import seaborn as sns
import os
from datetime import datetime
class ExportEngine(LummetryObject):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.SERVER = self.config_data['SERVER']
self.PORT = self.config_data['PORT']
self.USER = self.config_data['USER']
self.PASS = self.config_data['PASS']
self.DB = self.config_data['DB']
self._rep_base = '_get_report_'
try:
self.connect()
except:
self.P("WARNING! Couldn't connect to the mysql DB!")
return
def connect(self):
self.db = mysql.connect(
host=self.SERVER,
port=int(self.PORT),
user=self.USER,
passwd=self.PASS,
database=self.DB
)
cursor = self.db.cursor()
## getting all the tables which are present in 'datacamp' database
cursor.execute("SHOW TABLES")
tables = cursor.fetchall()
self.D("Connected to '{}'. Found {} tables".format(self.DB, len(tables)))
self._tables = tables
return
def _load_data(self, table):
cursor = self.db.cursor()
cursor.execute('SELECT * from '+table)
data = cursor.fetchall()
cols = cursor.column_names
dct_res = {cols[i]:[x[i] for x in data] for i,c in enumerate(cols)}
return | pd.DataFrame(dct_res) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 21 16:45:25 2019
This script finds the distance between clusters of position obtained with the Linear Discriminant Analysis.
It is applied on csv files that contain the recordings of memory days, and returns an heatmap saved in a folder
that has to be specified in the absolute path.
The file csv has to follow a specific name convention 'Vmh'+ 'number that identifies the mouse'+ 'a letter (H or M)' +
'2 or 1' + '0' + 'a string (dist or loc)' (for example: Vmh19H10loc ).
@author: penna
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import os
from scipy.spatial import distance
import seaborn as sns
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
def reading(file_in, sepfile):
"""
Function that open and read csv file, calculates z-scored values, and creates a list of three categories that correspond
to the position of the mouse during the experiment.
:param file_in: file path of csv file containing memory recordings
:param sepfile: delimiter of csv file
:return: a tupla that contains a dataframe with z-scored values and a list of strings containing the position of the
mouse during the experiment
"""
data_frame = pd.read_csv(file_in, sep=sepfile)
data_frame = data_frame.drop('Frames', 1)
location_column = data_frame.loc[:, 'Beh']
data_frame = data_frame.drop('Beh', 1)
neurons_list = []
for neuron in data_frame:
neurons_list.append(neuron)
data_frame = pd.DataFrame(StandardScaler().fit_transform(data_frame), columns=neurons_list)
for i in range(len(location_column)):
location = location_column[i]
if location == 'corriHome' or location == 'CorriFar':
location_column[i] = 'corridor'
return data_frame, location_column
def find_distance(df1, df2):
"""
Function that finds the averaged distance between the elements of two dataframe.
:param df1: first dataframe
:param df2: second dataframe
:return: averaged distance between df1 and df2
"""
d = 0
count = 0
distances = []
for i in range(len(df1)):
for j in range(len(df2)):
try:
d = d + distance.euclidean((df1[0][i], df1[1][i]), (df2[0][j], df2[1][j]))
except:
print("i= " + str(i) + " j= " + str(j) + " An error occurred...\n exiting the program...")
exit(1)
count = count + 1
d2 = d / count
distances.append(d2)
d = 0
count = 0
dist = sum(distances) / len(distances)
return dist
def create_df_for_hm(list_dist, list_column_name):
"""
Function that creates a dataframe containing the distance between each cluster of position; this dataframe is then
used to create the heatmap that allows to visualize the distance between each cluster
:param list_dist: list that contains the distances between each cluster of position (home vs corridor,
home vs farchamber, farchamber vs corridor)
:param list_column_name: columns name for the dataframe (H, C, FC)
:return: a df that in each cells contains the distance between each cluster of position, and has the same columns and
the same indexes
"""
df = | pd.DataFrame(index=['H', 'C', 'FC']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 2 15:08:22 2015
@author: <NAME>
"""
import pandas as pd
import glob
df = pd.read_csv('source/ConfigFamiliale2009.csv', sep=";")
df.columns = ['Communes', 'Codes_Insee', 'NB_Allocataires_2009',
'COUP_0_ENF_2009', 'COUP_1_ENF_2009', 'COUP_2_ENF_2009', 'COUP_3_ENF_2009', 'COUP_4plus_ENF_2009',
'Homme_Isole_2009', 'Femme_Isolee_2009', 'MONO_1_ENF_2009', 'MONO_2_ENF_2009',
'MONO_3_ENF_2009', 'MONO_4plus_ENF_2009']
files = glob.glob('source/ConfigFamiliale*')
for path_file in files:
year = str(path_file[-8:-4])
if (year != '2009'):
df_temp = pd.read_csv(path_file, sep=';')
# Rename Col with year
year_col = ['Communes', 'Codes_Insee']
features_col = []
for col in df_temp.columns[-12:]:
year_col.append(col +"_"+ year)
features_col.append(col +"_"+ year)
# Adding key for mergeing
features_col.append('Codes_Insee')
df_temp.columns = year_col
df = | pd.merge(df, df_temp[features_col], how='inner', on='Codes_Insee') | pandas.merge |
# -*- coding: utf-8 -*-
"""Functionality that extends on what the base StatsCan api returns in some way
TODO
----
Function to delete tables
Extend getChangedCubeList with a function that returns all tables updated
within a date range
"""
import os
import json
import zipfile
import h5py
import pandas as pd
import numpy as np
import requests
from stats_can.scwds import get_series_info_from_vector
from stats_can.scwds import get_data_from_vectors_and_latest_n_periods
from stats_can.scwds import get_bulk_vector_data_by_range
from stats_can.scwds import get_cube_metadata
from stats_can.scwds import get_full_table_download
from stats_can.helpers import parse_tables
from stats_can.helpers import parse_vectors
def get_tables_for_vectors(vectors):
""" get a list of dicts mapping vectors to tables
Parameters
----------
vectors : list of str or str
Vectors to find tables for
Returns
-------
tables_list: list of dict
keys for each vector number return the table, plus a key for
'all_tables' that has a list of unique tables used by vectors
"""
v_json = get_series_info_from_vector(vectors)
vectors = [j["vectorId"] for j in v_json]
tables_list = {j["vectorId"]: str(j["productId"]) for j in v_json}
tables_list["all_tables"] = []
for vector in vectors:
if tables_list[vector] not in tables_list["all_tables"]:
tables_list["all_tables"].append(tables_list[vector])
return tables_list
def table_subsets_from_vectors(vectors):
"""get a list of dicts mapping tables to vectors
Parameters
----------
vectors : list of str or str
Vectors to find tables for
Returns
-------
tables_dict: list of dict
keys for each table used by the vectors, matched to a list of vectors
"""
start_tables_dict = get_tables_for_vectors(vectors)
tables_dict = {t: [] for t in start_tables_dict["all_tables"]}
vecs = list(start_tables_dict.keys())[:-1] # all but the all_tables key
for vec in vecs:
tables_dict[start_tables_dict[vec]].append(vec)
return tables_dict
def download_tables(tables, path=None, csv=True):
"""Download a json file and zip of data for a list of tables to path
Parameters
----------
tables: list of str
tables to be downloaded
path: str, default: None (will do current directory)
Where to download the table and json
csv: boolean, default True
download in CSV format, if not download SDMX
Returns
-------
downloaded: list
list of tables that were downloaded
"""
metas = get_cube_metadata(tables)
for meta in metas:
product_id = meta["productId"]
zip_url = get_full_table_download(product_id, csv=csv)
if csv:
zip_file = product_id + "-eng.zip"
else:
zip_file = product_id + ".zip"
json_file = product_id + ".json"
if path:
zip_file = os.path.join(path, zip_file)
json_file = os.path.join(path, json_file)
# Thanks http://evanhahn.com/python-requests-library-useragent/
response = requests.get(zip_url, stream=True, headers={"user-agent": None})
# Thanks https://bit.ly/2sPYPYw
with open(json_file, "w") as outfile:
json.dump(meta, outfile)
with open(zip_file, "wb") as handle:
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
downloaded = [meta["productId"] for meta in metas]
return downloaded
def zip_update_tables(path=None, csv=True):
"""check local json, update zips of outdated tables
Grabs the json files in path, checks them against the metadata on
StatsCan and grabs updated tables where there have been changes
There isn't actually a "last modified date" part to the metadata
What I'm doing is comparing the latest reference period. Almost all
data changes will at least include incremental releases, so this should
capture what I want
Parameters
----------
path: str, default: None
where to look for tables to update
csv: boolean, default: True
Downloads updates in CSV form by default, SDMX if false
Returns
-------
update_table_list: list
list of the tables that were updated
"""
local_jsons = list_zipped_tables(path=path)
tables = [j["productId"] for j in local_jsons]
remote_jsons = get_cube_metadata(tables)
update_table_list = []
for local, remote in zip(local_jsons, remote_jsons):
if local["cubeEndDate"] != remote["cubeEndDate"]:
update_table_list.append(local["productId"])
download_tables(update_table_list, path, csv=csv)
return update_table_list
def zip_table_to_dataframe(table, path=None):
"""Reads a StatsCan table into a pandas DataFrame
If a zip file of the table does not exist in path, downloads it
Parameters
----------
table: str
the table to load to dataframe from zipped csv
path: str, default: current working directory when module is loaded
where to download the tables or load them
Returns:
df: pandas.DataFrame
the table as a dataframe
"""
# Parse tables returns a list, can only do one table at a time here though
table = parse_tables(table)[0]
table_zip = table + "-eng.zip"
if path:
table_zip = os.path.join(path, table_zip)
if not os.path.isfile(table_zip):
download_tables([table], path)
csv_file = table + ".csv"
with zipfile.ZipFile(table_zip) as myzip:
with myzip.open(csv_file) as myfile:
col_names = pd.read_csv(myfile, nrows=0).columns
# reopen the file or it misses the first row
with myzip.open(csv_file) as myfile:
types_dict = {"VALUE": float}
types_dict.update({col: str for col in col_names if col not in types_dict})
df = pd.read_csv(myfile, dtype=types_dict)
possible_cats = [
"GEO",
"DGUID",
"STATUS",
"SYMBOL",
"TERMINATED",
"DECIMALS",
"UOM",
"UOM_ID",
"SCALAR_FACTOR",
"SCALAR_ID",
"VECTOR",
"COORDINATE",
"Wages",
"National Occupational Classification for Statistics (NOC-S)",
"Supplementary unemployment rates",
"Sex",
"Age group",
"Labour force characteristics",
"Statistics",
"Data type",
"Job permanency",
"Union coverage",
"Educational attainment",
]
actual_cats = [col for col in possible_cats if col in col_names]
df[actual_cats] = df[actual_cats].astype("category")
try:
df["REF_DATE"] = pd.to_datetime(df["REF_DATE"], format="%Y-%m")
except TypeError:
df["REF_DATE"] = | pd.to_datetime(df["REF_DATE"]) | pandas.to_datetime |
import pytest
import numpy as np
import pandas as pd
EXP_IDX = pd.MultiIndex(levels=[['model_a'], ['scen_a', 'scen_b']],
codes=[[0, 0], [0, 1]], names=['model', 'scenario'])
def test_set_meta_no_name(test_df):
idx = pd.MultiIndex(levels=[['a_scenario'], ['a_model'], ['some_region']],
codes=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = | pd.Series(data=[0.3], index=idx) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[10]:
import pandas as pd
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
def replaceMonthByNumber(x):
x = x.str.replace('*','')
meses = { 'Enero': 1,
'Febrero':2,
'Marzo': 3,
'Abril': 4,
'Mayo':5,
'Junio':6,
'Julio':7,
'Agosto':8,
'Septiembre':9,
'Octubre':10,
'Noviembre':11,
'Diciembre':12}
for mes in meses:
x = x.replace(mes,meses[mes])
return x
def fix_data(df, year_col, month_col, join_col=True):
if join_col:
df.columns = df.columns.map(' - '.join)
df["year"] = pd.to_numeric(df[year_col].ffill(), errors='coerce')
df["month"] = replaceMonthByNumber(df[month_col])
df["day"] = 1
df["Date"] = pd.to_datetime(df[["year", "month", "day"]], errors="coerce")
df = df[df["Date"].notnull()]
df = df.set_index("Date")
del df["year"]
del df["day"]
del df["month"]
del df[year_col]
del df[month_col]
return df
df_merge = pd.DataFrame()
df = | pd.read_excel('https://www.indec.gob.ar/ftp/cuadros/economia/sh_super_mayoristas.xls', sheet_name="Cuadro 1", skiprows=2, header=[0,1]) | pandas.read_excel |
from warnings import simplefilter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import flat_table
from epimargin.estimators import analytical_MPVS
from epimargin.etl.commons import download_data
from epimargin.smoothing import notched_smoothing
from epimargin.utils import days, fillna, setup
simplefilter("ignore")
sns.set(palette="bright", style="darkgrid", font="Helvetica Neue")
sns.despine()
save_columns = [
"state", "date", "Rt", "Rt_upper", "Rt_lower",
"cases", "total_cases",
"recovered", "total_recovered",
"deceased", "total_deceased",
"tested", "total_tested",
"active", "total_active",
"active_per_mn", "total_active_per_mn",
"cfr", "total_cfr",
"infection_rate", "total_infection_rate",
"recovery_rate", "total_recovery_rate"
]
population = {
"AN": 0.397,
"AP": 52.221,
"AR": 1.504,
"AS": 34.293,
"BR": 119.52,
"CH": 1.179,
"CT": 28.724,
"DN": 0.959,
"DL": 19.814,
"GA": 1.54,
"GJ": 67.936,
"HR": 28.672,
"HP": 7.3,
"JK": 13.203,
"JH": 37.403,
"KA": 65.798,
"KL": 35.125,
"LA": 0.293,
"LD": 0.064,
"MP": 82.232,
"MH": 122.153,
"MN": 3.103,
"ML": 3.224,
"MZ": 1.192,
"NL": 2.15,
"OR": 43.671,
"PY": 1.504,
"PB": 29.859,
"RJ": 77.264,
"SK": 0.664,
"TN": 75.695,
"TG": 37.22,
"TR": 3.992,
"UP": 224.979,
"UT": 11.141,
"WB": 96.906,
"TT": 1332.83
}
# pipeline details, options
gamma = 0.2
window = 5 * days
CI = 0.95
smooth = notched_smoothing(window)
start_date = pd.Timestamp(year = 2020, month = 3, day = 1)
time_period = 120
def estimate(time_series: pd.Series) -> pd.DataFrame:
estimates = analytical_MPVS(time_series, CI = CI, smoothing = smooth, totals=True)
return pd.DataFrame(data = {
"date": estimates[0],
"Rt": estimates[1],
"Rt_upper": estimates[2],
"Rt_lower": estimates[3],
"total_cases": estimates[-4][2:],
"new_cases": estimates[-3],
})
data, figs = setup()
download_data(data, 'timeseries.json', "https://api.covid19india.org/v3/")
download_data(data, 'state_wise.csv', "https://api.covid19india.org/v3/")
download_data(data, 'states.csv', "https://api.covid19india.org/v3/")
download_data(data, 'districts.csv', "https://api.covid19india.org/v3/")
# data prep
with (data/'timeseries.json').open("rb") as fp:
df = flat_table.normalize( | pd.read_json(fp) | pandas.read_json |
import datetime as dt
from itertools import product
import sys
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_allclose
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from arch.data import sp500
from arch.tests.univariate.test_variance_forecasting import preserved_state
from arch.univariate import (
APARCH,
ARX,
EGARCH,
FIGARCH,
GARCH,
HARCH,
HARX,
ConstantMean,
ConstantVariance,
EWMAVariance,
MIDASHyperbolic,
RiskMetrics2006,
ZeroMean,
arch_model,
)
from arch.univariate.mean import _ar_forecast, _ar_to_impulse
SP500 = 100 * sp500.load()["Adj Close"].pct_change().dropna()
MEAN_MODELS = [
HARX(SP500, lags=[1, 5]),
ARX(SP500, lags=2),
ConstantMean(SP500),
ZeroMean(SP500),
]
VOLATILITIES = [
ConstantVariance(),
GARCH(),
FIGARCH(),
EWMAVariance(lam=0.94),
MIDASHyperbolic(),
HARCH(lags=[1, 5, 22]),
RiskMetrics2006(),
APARCH(),
EGARCH(),
]
MODEL_SPECS = list(product(MEAN_MODELS, VOLATILITIES))
IDS = [
f"{str(mean).split('(')[0]}-{str(vol).split('(')[0]}" for mean, vol in MODEL_SPECS
]
@pytest.fixture(params=MODEL_SPECS, ids=IDS)
def model_spec(request):
mean, vol = request.param
mean.volatility = vol
return mean
class TestForecasting(object):
@classmethod
def setup_class(cls):
cls.rng = RandomState(12345)
am = arch_model(None, mean="Constant", vol="Constant")
data = am.simulate(np.array([0.0, 10.0]), 1000)
data.index = | pd.date_range("2000-01-01", periods=data.index.shape[0]) | pandas.date_range |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import range
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, NaT, Series, bdate_range, date_range, isna)
from pandas.core import ops
import pandas.core.nanops as nanops
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .common import TestData
class TestSeriesLogicalOps(object):
@pytest.mark.parametrize('bool_op', [operator.and_,
operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_operators_bitwise(self):
# GH#9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
with pytest.raises(TypeError):
s_1111 & 'a'
with pytest.raises(TypeError):
s_1111 & ['a', 'b', 'c', 'd']
with pytest.raises(TypeError):
s_0123 & np.NaN
with pytest.raises(TypeError):
s_0123 & 3.14
with pytest.raises(TypeError):
s_0123 & [0.1, 4, 3.14, 2]
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
with pytest.raises(TypeError):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
assert_series_equal(result, expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pandas-dev/pandas/issues/5284
with pytest.raises(TypeError):
d.__and__(s, axis='columns')
with pytest.raises(TypeError):
s & d
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
@pytest.mark.parametrize('op', [
operator.and_,
operator.or_,
operator.xor,
])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))],
dtype=bool)
result = op(ser, idx2)
assert_series_equal(result, expected)
@pytest.mark.parametrize("op, expected", [
(ops.rand_, pd.Index([False, True])),
(ops.ror_, pd.Index([False, True])),
(ops.rxor, pd.Index([])),
])
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), | Series(np.nan, a.index) | pandas.Series |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert | Timestamp('2000-01-01', tz='US/Eastern') | pandas.Timestamp |
import os
#import dill
import numpy as np
import pandas as pd
from Bio import SeqIO, Seq
import scipy.stats as st
import deepak.globals
import deepak.utilities
from deepak.library import MutationLibrary
from deepak.plot import replace_wt, all_correlations, make_heatmaps, make_fig_dir
pad = 948
target_T3 = ":917*ag"
target_G3 = ":932*ag"
target_T5 = ":50*ag"
target_G5 = ":41*ag"
# MAYBE: calculate common mutations, place in separate data structure
class Quantification:
"""
Class used to turn the Valid.csv output file into a pandas data frame suitable for plotting with sfmap or other
inspection. The resultant data frame has rows corresponding to integer positions in the sequence and columns
corresponding to amino acids.
"""
def __init__(self, config_file, lib_fn, reference_fn, pos):
self.config_file = config_file
self.library = MutationLibrary()
self.library.add_reference_fasta(reference_fn)
self.reference_AA = Seq.translate(self.library.reference)
self.library.construct(lib_fn, pos)
# get library info to create shape of DF
self.counts = None
self.edits = None
def configure(self, config_file):
with open(config_file) as config:
for line in config:
attribute, value = line.split()
def create_df(self):
lib_members = [translate_codon(item, self.library.reference) for item in self.library.keys() if item != "wt"]
start = min(lib_members, key=lambda x: x[0])[0]
end = max(lib_members, key=lambda x: x[0])[0]
self.counts = pd.DataFrame(np.zeros((1+end-start, 20)), index=range(start, end+1), columns=deepak.globals.AA_LIST)
self.edits = self.counts.copy()
def count_csv(self, csv, target):
data = pd.read_csv(csv, header=0, index_col=0)
wt_counts = 0
wt_edits = 0
for i, row in data.iterrows():
identity = row["lib_identity"]
if identity == "wt":
wt_counts += 1
if search_snp_paf(row["cs_tag"], target):
wt_edits += 1
else:
position, aa = translate_codon(identity, self.library.reference)
self.counts.loc[position, aa] += 1
if search_snp_paf(row["cs_tag"], target):
self.edits.loc[position, aa] += 1
self.tally_wt(wt_counts, wt_edits)
return
def tally_wt(self, counts, edits):
for i in self.counts.index:
aa = self.reference_AA[i]
self.counts.loc[i, aa] = counts
self.edits.loc[i, aa] = edits
return
def translate_codon(cs, reference):
""" Translates a cs string into a tuple in the form (position, amino_acid) """
fields = deepak.utilities.chunk_paf(cs)
position = int(fields[0][1:])
idx = position // 3
pad = position % 3
wt_codon = reference[3 * idx:3 * idx + 3]
codon = wt_codon
for item in fields[1:]:
if item[0] == ":":
pad += int(item[1])
continue
elif item[0] == "*":
assert wt_codon[pad] == item[1].upper()
codon = codon[:pad] + item[2].upper() + codon[1 + pad:]
pad += 1
else:
raise Exception("Invalid cs string")
return idx, Seq.translate(codon)
def load_pickled_data(fn):
with open(fn, mode="rb") as infile:
analysis = dill.load(infile)
return analysis
def search_snp_paf(paf_record, target):
target_fields = deepak.utilities.chunk_paf(target)
assert len(target_fields) == 2 # Should be ":n*{ref}{var}"
target_loc = int(target_fields[0][1:])
location = 0
fields = deepak.utilities.chunk_paf(paf_record)
for i, item in enumerate(fields):
if location == target_loc and item == target_fields[1]:
return True
elif item[0] == ":":
location += int(item[1:])
elif item[0] == "*":
location += 1
else:
raise Exception("Disallowed character in CS string, could be indel")
return False
def detect_edits(item, target):
"""
Count reads in item which contain target mutation.
Returns the number of reads containing the target mutation and the total number of reads
*item* is a length 2 tuple comprising a library entry in the form (*name*, *list of PafRecords or cs strings*)
*target* is a cs string specifying the target mutation to search for
"""
name = item[0]
edits = list(map(search_snp_paf, item[1], [target] * len(item[1])))
return np.sum(edits), len(edits)
def decode_paf(paf_str):
global reference_fn, pad
ref = SeqIO.read(reference_fn, "fasta")
fields = deepak.utilities.chunk_paf(paf_str)
dna_loc = int(fields[0][1:])
pos = (dna_loc + pad) // 3
result_dna = ref[:dna_loc]
for mut in fields[1:]:
if mut.startswith("*"):
result_dna += mut[2]
dna_loc += 1
else:
n = int(mut[1:])
result_dna += ref[dna_loc:dna_loc + n]
dna_loc += n
if dna_loc < len(ref):
result_dna += ref[dna_loc:]
aa = result_dna.translate()[pos - (pad // 3)]
return int(pos), aa
def add_seq_info(data_frame):
positions, amino_acids = list(zip(*map(decode_paf, data_frame["name"])))
data_frame["position"] = positions
data_frame["amino_acid"] = amino_acids
return data_frame
def read_analysis(analysis_obj, target_mutation):
data = {"name": [], "edited_counts": [], "counts": []}
for member in analysis_obj.library.items():
edited, counts = detect_edits(member, target_mutation)
data["name"].append(member[0])
data["edited_counts"].append(edited)
data["counts"].append(counts)
df = pd.DataFrame(data)
wt = df.loc[df.name == "wt"]
df = df.loc[df.name != "wt"]
return df, wt
def z(p, n, wt_rate, wt_n, pooled=True, size=1):
if n < size:
return np.nan
if pooled:
combined_p = (wt_rate * wt_n + n * p) / (n + wt_n)
return (p - wt_rate) / np.sqrt(combined_p * (1 - combined_p) * ((1 / n) + (1 / wt_n)))
return (p - wt_rate) / np.sqrt((wt_rate * (1 - wt_rate) / wt_n) + (p * (1 - p) / n))
def add_stats(df, wt_rate, wt_n):
n = 0
while True:
if "rep"+str(n)+"_counts" not in df.columns:
break
n += 1
x_bar = 1
for i in range(1, n):
rep = "rep"+str(i)+"_"
# Zero total counts results in NaN
p = df[rep+"counts"]/df["counts"]
# Members with zero counts in one replicate default to rate of other replicate, i.e. NaN ** 0 == 1
r = (df[rep+"edited_counts"]/df[rep+"counts"]).fillna(0)
x_bar *= np.power(r, p)
df["geom_editing_rate"] = x_bar
df["editing_rate"] = df["edited_counts"] / df["counts"]
df["z-score"] = list(map(z, df["editing_rate"], df["counts"], [wt_rate] * len(df.index), [wt_n] * len(df.index)))
df["p-value"] = st.norm.sf(np.abs(df["z-score"])) * 2 # two-tailed test
combined_p = (wt_rate * wt_n + df["editing_rate"] * df["counts"]) / (df["counts"] + wt_n)
df["std_error"] = np.sqrt(combined_p * (1 - combined_p) * ((1 / df["counts"]) + (1 / wt_n)))
return df
def reference_aa(df, reference):
start = df["position"].min()
end = df["position"].max()
ref = SeqIO.read(reference, "fasta")
wt_aa_seq = str(ref.translate()[int(start - pad // 3):int(end - pad // 3) + 1].seq)
return wt_aa_seq
def fill_aa_seq(df_seq, wt_aa_seq):
x = set(df_seq["position"])
least = min(x)
y = set(range(least, least+len(wt_aa_seq)))
z = x.difference(y)
while len(z) > 0:
item = z.pop()
new_row = pd.DataFrame({"position": [item]*20, "amino_acid": deepak.globals.AA_LIST})
df_seq = pd.concat([df_seq, new_row], sort=False, ignore_index=True)
return df_seq
def get_plotting_frame(df, values):
x = df.pivot(index="position", columns="amino_acid", values=values)
if values == "counts":
x.fillna(value=0)
return x[deepak.globals.AA_LIST]
def read_data_set(obj_fn, target):
analysis = load_pickled_data(obj_fn)
df, wt = read_analysis(analysis, target)
print("Loaded pickled data from {}".format(obj_fn))
return df, wt
def aggregate_data(base, sample, n_replicates, target, append=""):
obj_fns = [base.format(sample+str(i)+append) for i in range(1, n_replicates+1)]
data_sets = list(map(read_data_set, obj_fns, [target]*n_replicates))
return data_sets
def combine_replicates(data_sets):
df = data_sets[0][0].copy()
wt = data_sets[0][1].copy()
for i, d in enumerate(data_sets):
if i >= 1:
# Keeps columns that are in df but not d[0] unlike df += d[0]
df = df.combine(d[0], lambda x, y: x+y if np.issubdtype(x.dtype, np.number) else x, overwrite=False)
wt += d[1] # Does not have any disjoint columns
for new_col in ("edited_counts", "counts"):
name = "rep"+str(i)+"_"+new_col
df[name] = d[0][new_col]
wt[name] = d[1][new_col]
return df, wt
def load_replicate_data(sample, base_dir, n_reps, reference, append):
global reference_fn
reference_fn = reference
if not base_dir.endswith("/"):
base_dir += "/"
base = base_dir+"Pafparser-{}_aln/workspace.pyobj"
if "T" in sample:
if "3" in sample:
target = target_T3
else: # 5
target = target_T5
else: # G
if "3" in sample:
target = target_G3
else: # 5
target = target_G5
# Load data (2 replicates)
data_sets = aggregate_data(base, sample, n_reps, target, append=append)
df, wt = combine_replicates(data_sets)
return df, wt, data_sets
def calculate(df, wt, reference):
df = add_seq_info(df)
wt_n = int(wt["counts"])
wt_rate = float(wt["edited_counts"] / wt_n)
wt_aa_seq = reference_aa(df, reference)
df = add_stats(df, wt_rate, wt_n)
density = replace_wt(get_plotting_frame(df, "counts"), wt_aa_seq, wt_n)
log_pad = 0.000001
geom = get_plotting_frame(df, "geom_editing_rate")
geom_norm = replace_wt(geom / wt_rate, wt_aa_seq, 1)
geom_fold_change = np.log2(geom_norm + log_pad)
rates = get_plotting_frame(df, "editing_rate")
normalized_rates = replace_wt(rates / wt_rate, wt_aa_seq, 1)
log2_fold_change = np.log2(normalized_rates + log_pad)
z_scores = replace_wt(get_plotting_frame(df, "z-score"), wt_aa_seq, 0)
std_err = replace_wt(get_plotting_frame(df, "std_error"), wt_aa_seq, np.nan)
return wt_aa_seq, density, geom_fold_change, log2_fold_change, z_scores, std_err
def pafparser_to_csv(sample, base_dir, n_reps, reference, append):
df, wt, data_sets = load_replicate_data(sample, base_dir, n_reps, reference, append)
for i in range(2):
for item in ("counts", "edited_counts"):
wt["rep{}_{}".format(i, item)] = data_sets[i][1][item]
df_seq = add_seq_info(df)
wt_aa_seq = reference_aa(df, reference)
df_seq = fill_aa_seq(df_seq, wt_aa_seq)
full = | pd.concat([df_seq, wt], sort=False, ignore_index=True) | pandas.concat |
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotation values
if n != "":
rp = roi_base + "/value/" + n
else:
rp = roi_base + "/value"
valrow3 = validate_xpath(subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT"
"_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used by
# value and supporting ROI
for rel_path4 in ["../support/supportingROI/component/"
"boundary",
"../component/annotation/support/"
"supportingROI/component/boundary"]:
roinodes = subsubannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
"./code",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
anngrpid = anngrpid + 1
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups'
f' without an associated beat found')
return aecgannset, valpd
def parse_generalinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts general information
This function parses the `aecg_doc` xml document searching for general
information that includes in the returned `Aecg`: unique identifier (UUID),
ECG date and time of collection (EGDTC), and device information.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# UUID
# =======================================
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"root",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID found: {valrow["VALUE"]}')
aecg.UUID = valrow["VALUE"]
else:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID not found')
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"extension",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension found: {valrow["VALUE"]}')
aecg.UUID += valrow["VALUE"]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID updated to: {aecg.UUID}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension not found')
# =======================================
# EGDTC
# =======================================
valpd = pd.DataFrame()
egdtc_found = False
for n in ["low", "center", "high"]:
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"effectiveTime\"]/"
"*[local-name() = \"" + n + "\"]",
"",
"value",
new_validation_row(aecg.filename, "GENERAL",
"EGDTC_" + n),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
egdtc_found = True
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC {n} found: {valrow["VALUE"]}')
aecg.EGDTC[n] = valrow["VALUE"]
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if not egdtc_found:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# DEVICE
# =======================================
# DEVICE = {"manufacturer": "", "model": "", "software": ""}
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_manufacturer"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer found: {tmp}')
aecg.DEVICE["manufacturer"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_model"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model found: {tmp}')
aecg.DEVICE["model"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"softwareName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_software"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software found: {tmp}')
aecg.DEVICE["software"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
| pd.DataFrame([valrow], columns=VALICOLS) | pandas.DataFrame |
import pandas as pd
import numpy as np
class Stats(object):
'''
Produces stats given a schedule
'''
def __init__(self, games, agg_method, date_col, h_col, a_col, outcome_col, seg_vars = []):
self.games = games
self.agg_method = agg_method
self.date_col = date_col
self.h_col = h_col
self.a_col = a_col
self.outcome_col = outcome_col
self.seg_vars = seg_vars
# Inputs: number of past games, team id, date of current game
# Output: list of most recent n games
def get_last_n_games(self, n, team_id, curr_dt):
#Filter to get past games
games = self.games[self.games[self.date_col]<curr_dt]
#Filters to get past home and away games
a_games = games[games[self.a_col]==team_id]
h_games = games[games[self.h_col] == team_id]
all_games = a_games.append(h_games)
all_games['temp_days'] = [( | pd.to_datetime(curr_dt) | pandas.to_datetime |
# @author <NAME> (<EMAIL>)
# @time 2021/5/31 9:40
# @desc [script description]
""" tmc2gmns
This script aims to transform tmc file into gmns format. Then after map-matching program
of MapMatching4GMNS, the link file generated from tmc file is matched to the underlying network (here is gmns format of osm map).
In the end, the link performance file of underlying network is generated with tmc file and the corresponding reading file.
"""
#!/usr/bin/python
# coding:utf-8
import os
import datetime
import numpy as np
import pandas as pd
import os.path
import MapMatching4GMNS
'''step 1 Convert TMC Data into GMNS Format
Convert TMC Data into GMNS Format
'''
def create_folder(path):
if not os.path.exists(path):
os.makedirs(path)
def Convert_TMC(tmc_path):
'''build node_tmc.csv'''
print('reading tmc data...')
files= os.listdir(tmc_path)
for file in files:
if file[:18] == 'TMC_Identification':
tmc = pd.read_csv(tmc_path + os.sep + file)
break
'''build node.csv'''
print('converting tmc data into gmns format...')
node_tmc = pd.DataFrame()
node_tmc['name'] = None
node_tmc['x_coord'] = None
node_tmc['y_coord'] = None
node_tmc['z_coord'] = None
node_tmc['node_type'] = None
node_tmc['ctrl_type'] = None
node_tmc['zone_id'] = None
node_tmc['parent_node_id'] = None
node_tmc['geometry'] = None
for i in range(0,len(tmc)-1):
if tmc.loc[i+1,'road_order'] > tmc.loc[i,'road_order']:
node_tmc = node_tmc.append({'name': tmc.loc[i,'tmc'],\
'x_coord': tmc.loc[i,'start_longitude'], \
'y_coord': tmc.loc[i,'start_latitude'],\
'z_coord': None,\
'node_type': 'tmc_start',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i,'start_longitude'].astype(str) + " " + tmc.loc[i,'start_latitude'].astype(str) +")"}, ignore_index=True)
else:
node_tmc = node_tmc.append({'name': tmc.loc[i,'tmc'],\
'x_coord': tmc.loc[i,'start_longitude'], \
'y_coord': tmc.loc[i,'start_latitude'],\
'z_coord': None,\
'node_type': 'tmc_start',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i,'start_longitude'].astype(str) + " " + tmc.loc[i,'start_latitude'].astype(str) +")"}, ignore_index=True)
node_tmc = node_tmc.append({'name': tmc.loc[i,'tmc']+'END',\
'x_coord': tmc.loc[i,'end_longitude'], \
'y_coord': tmc.loc[i,'end_latitude'],\
'z_coord': None,\
'node_type': 'tmc_end',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i,'end_longitude'].astype(str) + " " + tmc.loc[i,'end_latitude'].astype(str) +")"}, ignore_index=True)
node_tmc = node_tmc.append({'name': tmc.loc[i+1,'tmc'],\
'x_coord': tmc.loc[i+1,'start_longitude'], \
'y_coord': tmc.loc[i+1,'start_latitude'],\
'z_coord': None,\
'node_type': 'tmc_start',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i+1,'start_longitude'].astype(str) + " " + tmc.loc[i+1,'start_latitude'].astype(str) +")"}, ignore_index=True)
node_tmc = node_tmc.append({'name': tmc.loc[i+1,'tmc']+'END',\
'x_coord': tmc.loc[i+1,'end_longitude'], \
'y_coord': tmc.loc[i+1,'end_latitude'],\
'z_coord': None,\
'node_type': 'tmc_end',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i+1,'end_longitude'].astype(str) + " " + tmc.loc[i+1,'end_latitude'].astype(str) +")"}, ignore_index=True)
node_tmc.index.name = 'node_id'
node_tmc.index += 100000001 #index from 0
node_tmc.to_csv(tmc_path + os.sep + '/node_tmc.csv')
print('node_tmc.csv generated!')
'''build link_tmc.csv'''
link_tmc = pd.DataFrame()
link_tmc['name'] = None
link_tmc['corridor_id'] = None
link_tmc['corridor_link_order'] = None
link_tmc['from_node_id'] = None
link_tmc['to_node_id'] = None
link_tmc['directed'] = None
link_tmc['geometry_id'] = None
link_tmc['geometry'] = None
link_tmc['dir_flag'] = None
link_tmc['parent_link_id'] = None
link_tmc['length'] = None
link_tmc['grade'] = None
link_tmc['facility_type'] = None
link_tmc['capacity'] = None
link_tmc['free_speed'] = None
link_tmc['lanes'] = None
for i in range(0,len(tmc)):
link_tmc = link_tmc.append({'name': tmc.loc[i,'tmc'],\
'corridor_id': tmc.loc[i,'road']+'_'+tmc.loc[i,'direction'],\
'corridor_link_order' : tmc.loc[i,'road_order'],\
'from_node_id': node_tmc[(node_tmc['x_coord']==tmc.loc[i,'start_longitude']) & (node_tmc['y_coord']==tmc.loc[i,'start_latitude'])].index.values[0], \
'to_node_id': node_tmc[(node_tmc['x_coord']==tmc.loc[i,'end_longitude']) & (node_tmc['y_coord']==tmc.loc[i,'end_latitude'])].index.values[0],\
'directed': 1,\
'geometry_id': None,\
'geometry': "LINESTRING (" + tmc.loc[i,'start_longitude'].astype(str) + " " + tmc.loc[i,'start_latitude'].astype(str) + "," +\
tmc.loc[i,'end_longitude'].astype(str) +" "+ tmc.loc[i,'end_latitude'].astype(str) + ")",\
'dir_flag': 1,\
'parent_link_id': None,\
'length': tmc.loc[i,'miles'],\
'grade': None,\
'facility_type': 'interstate' if tmc.loc[i,'road'][0] == 'I'else None ,\
'capacity':None,\
'free_speed':None,\
'lanes': None}, ignore_index=True)
link_tmc.index.name = 'link_id'
link_tmc.index += 100000001
link_tmc.to_csv(tmc_path + os.sep + '/link_tmc.csv')
print('link_tmc.csv generated!')
'''build link_performance_tmc.csv'''
reading = pd.read_csv(tmc_path + os.sep + 'Reading_VA.csv')
# reading = reading[pd.to_datetime(reading['measurement_tstamp'], format='%Y-%m-%d %H:%M:%S')<datetime.datetime.strptime('2015-04-01 02:00:00', '%Y-%m-%d %H:%M:%S')]
reading = reading.loc[0:2000]
link_performance_tmc = pd.DataFrame()
link_performance_tmc['name'] = None
link_performance_tmc['corridor_id'] = None
link_performance_tmc['corridor_link_order'] = None
link_performance_tmc['from_node_id'] = None
link_performance_tmc['to_node_id'] = None
link_performance_tmc['timestamp'] = None
link_performance_tmc['volume'] = None
link_performance_tmc['travel_time'] = None
link_performance_tmc['speed'] = None
link_performance_tmc['reference_speed'] = None
link_performance_tmc['density'] = None
link_performance_tmc['queue'] = None
link_performance_tmc['notes'] = None
gp = reading.groupby('measurement_tstamp')
for key, form in gp:
# print(key)
for i in link_tmc.index:
form_selected = form[form['_vatmc_code']==link_tmc['name'][i]]
if len(form_selected)>0:
# break
link_performance_tmc = link_performance_tmc.append({'name': link_tmc['name'][i],\
'corridor_id': link_tmc['corridor_id'][i],\
'corridor_link_order' : link_tmc['corridor_link_order'][i],\
'from_node_id': link_tmc.loc[i,'from_node_id'], \
'to_node_id': link_tmc.loc[i,'to_node_id'], \
'timestamp': form_selected['measurement_tstamp'].values[0][0:10]+'T'+form_selected['measurement_tstamp'].values[0][11:13]+':'+form_selected['measurement_tstamp'].values[0][14:16],\
'volume': None,\
'travel_time': link_tmc['length'][i]/form_selected['speed'].values[0],\
'speed': form_selected['speed'].values[0],\
'reference_speed': form_selected['reference_speed'].values[0],\
'density': None,\
'queue': None,\
'notes': None }, ignore_index=True)
else:
link_performance_tmc = link_performance_tmc.append({'name': link_tmc['name'][i],\
'corridor_id': link_tmc['corridor_id'][i],\
'corridor_link_order' : link_tmc['corridor_link_order'][i],\
'from_node_id': link_tmc.loc[i,'from_node_id'], \
'to_node_id': link_tmc.loc[i,'to_node_id'], \
'timestamp': None,\
'volume': None,\
'travel_time': None,\
'speed': None,\
'reference_speed': None,\
'density': None,\
'queue': None,\
'notes': None }, ignore_index=True)
link_performance_tmc.to_csv(tmc_path + os.sep +'/link_performance_tmc.csv',index = False)
print('link_performance_tmc.csv generated!')
'''build trace.csv'''
'''trace_id is numeric'''
trace = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind, chisquare, f_oneway, contingency
from scipy import stats
from ..findings import TTestFindings, DependenceFindings, ChiSquaredFindings, TestResult, FindingsList, AnovaFindings
import math
import itertools
def _anova(data, num_col, group_col, groups):
group_samples = []
for i in groups:
group_samples.append(data[data[group_col] == i][num_col])
test_result = f_oneway(*group_samples)
effect_size = _compute_eta_squared(*group_samples)
return test_result, effect_size
def _compute_eta_squared(*args):
# args refer to the samples for each
all_data = np.asarray(list(itertools.chain(*args)))
group_mean = [i.mean() for i in args]
group_mean = np.array(group_mean)
return group_mean.var() / all_data.var()
def _t_test(data, num_col, group_col, group_1=None, group_2=None, **kwargs):
if group_1 is None and group_2 is None:
groups = data[group_col].value_counts()
if len(groups) != 2:
raise ValueError(f"Column {group_col} has more than 2 groups")
else:
group_1 = groups.index[0]
group_2 = groups.index[1]
elif not (group_1 is not None and group_2 is not None):
raise ValueError("Please specify both group_1 and group_2")
first_sample = data[data[group_col] == group_1][num_col]
second_sample = data[data[group_col] == group_2][num_col]
test_result = ttest_ind(a = first_sample,
b = second_sample,
**kwargs)
effect_size = _compute_cohen_es(first_sample, second_sample)
return test_result, effect_size
def _compute_cohen_es(sample_1, sample_2):
cohen_es = abs(sample_1.mean() - sample_2.mean()) / sample_1.std()
return cohen_es
def _compute_phi_es(chi2, n):
return math.sqrt(chi2 / n)
def _chi_squared(data, col_1, expected=None):
# If expected is None, assuming it is about testing for equality.
obs = data[col_1].value_counts().values
test_result = chisquare(obs, expected)
effect_size = _compute_phi_es(test_result.chisq, len(data[col_1]))
return test_result, effect_size
def _chi_squared_dependence(data, col_1, col_2, groups_1, groups_2, min_sample):
if groups_1 is None:
filtered, ignored = _filter_sparse_group(data, col_1, min_sample)
if len(filtered) < 2:
raise ValueError(f"Only one group for {col_1}")
groups_1 = filtered
if groups_2 is None:
filtered, ignored = _filter_sparse_group(data, col_2, min_sample)
if len(filtered) < 2:
raise ValueError(f"Only one group for {col_2}")
groups_2 = filtered
group_1 = data[col_1]
group_1 = group_1[group_1.isin([groups_1]).index]
group_2 = data[col_2]
group_2 = group_2[group_2.isin([groups_2]).index]
vals, count = contingency.crosstab(group_1.values, group_2.values)
test_result = contingency.chi2_contingency(count)
test_result = TestResult(name='chi2 contigency',
statistic=test_result[0],
pvalue=test_result[1],
dof=test_result[2],
expected=test_result[3],
)
effect_size = _compute_phi_es(test_result.statistic, len(data[col_1]))
return test_result, effect_size
def _compare_group(data, col_1, col_2, p_value=0.05, phi_es=0.2, min_sample=20):
groups_1, ignored_1 = _filter_sparse_group(data, col_1, min_sample)
groups_2, ignored_2 = _filter_sparse_group(data, col_2, min_sample)
if len(groups_1) <= 1 or len(groups_2) <= 1:
pass
else:
test_result, effect_size = _chi_squared_dependence(data, col_1, col_2, groups_1, groups_2, min_sample)
if test_result.pvalue <= p_value and effect_size >= phi_es:
return DependenceFindings(data=data,
col_1=col_1,
col_2=col_2,
groups_1=groups_1,
groups_2=groups_2,
test_result=test_result
)
return None
def _compare_mean(data, num_col, group_col, *, cohen_es=0.2, eta=0.06, p_value=0.05, min_sample=20):
groups, ignored = _filter_sparse_group(data, group_col, min_sample)
if not ignored.empty:
print(f"Ignoring groups {list(ignored)} when comparing {num_col} and {group_col}")
if len(groups) == 1:
print(f"Skipping comparing {num_col} and {group_col}, only one group available")
elif len(groups) == 2:
group_1 = groups[0]
group_2 = groups[1]
test_result, effect_size = _t_test(data, num_col, group_col, group_1, group_2)
if test_result.pvalue <= p_value and effect_size >= cohen_es:
return TTestFindings(data=data,
group_col=group_col,
num_col=num_col,
group_1=group_1,
group_2=group_2,
test_result=test_result)
else:
test_result, effect_size = _anova(data, num_col, group_col, groups)
if test_result.pvalue <= p_value and effect_size >= eta:
return AnovaFindings(data=data,
group_col=group_col,
groups=groups,
num_col=num_col,
test_result=test_result
)
return None
def _filter_sparse_group(data, group_col, min_sample):
group_count = data[group_col].value_counts()
ignored = group_count[(group_count < min_sample)]
result = group_count.drop(ignored.index)
return result.index, ignored.index
def _auto_detect(data,
num_col,
cat_col,
cohen_es=0.2,
eta=0.06,
phi_es=0.2,
p_value=0.05,
min_sample=20,
ignore_list=None):
findings_list = []
ignore_list = [] if ignore_list is None else ignore_list
# Compare mean
for n_col, c_col in itertools.product(num_col, cat_col):
# TODO: Check if this is inefficient.
if ((n_col, c_col) in ignore_list) or ((c_col, n_col) in ignore_list):
continue
else:
findings = _compare_mean(data, n_col, c_col, cohen_es=cohen_es, eta=eta, p_value=p_value, min_sample=min_sample)
if findings is not None:
findings_list.append(findings)
# Compare dependency of two cat_col
for col_1, col_2 in itertools.combinations(cat_col, r=2):
# TODO: Check if this is inefficient.
if ((col_1, col_2) in ignore_list) or ((col_2, col_1) in ignore_list):
continue
else:
findings = _compare_group(data, col_1, col_2, p_value=p_value, phi_es=phi_es, min_sample=min_sample)
if findings is not None:
findings_list.append(findings)
return FindingsList(findings_list)
def _diff_group(data, group_col, num_col):
df_group = data.groupby(group_col)[num_col].mean().T
result = pd.DataFrame(index=df_group.index)
for i in itertools.combinations(df_group.columns, 2):
result[f"{i[0]} - {i[1]}"] = df_group[i[0]] - df_group[i[1]]
return result
def _diff(data, *args):
# TODO: Check the len of each args sample.
result = pd.DataFrame(index=pd.RangeIndex(len(args[0])))
for i in itertools.combinations(df_group.columns, 2):
result[f"{i[0]} - {i[1]}"] = df_group[i[0]] - df_group[i[1]]
return result
def _t_test_group(data, group_col, num_col, **kwargs):
test_result = dict()
for i in itertools.combinations(data[group_col].value_counts().index, r=2):
test_result[f"{i[0]} vs {i[1]}"] = ttest_ind(a = df[df[group_col] == i[0]][num_col],
b = df[df[group_col] == i[1]][num_col],
**kwargs)
return test_result
def _locate_outlier_zscore(data, columns, zscore_threshold, any=True, exclude=False):
'''
Locate outliers from numerical columns.
Arguments:
data: pandas DataFrame
columns: A list of column's names for checking outliers. Must be numerical columns
zscore_threshold: Threshold for classifying outliers.
any: If True, classify the data point as outlier if value from one of the column is a outlier.
exclude: If True, return non-outliers. If False, return outliers.
Returns pandas DataFrame.
'''
mean = data[columns].mean(axis=0)
std = data[columns].std(axis=0)
lower_bound = (std * zscore_threshold - mean).rename("Lower_bound")
upper_bound = (std * zscore_threshold + mean).rename("Upper_bound")
outlier_range = pd.concat([lower_bound, upper_bound], axis=1)
# TODO: Make this more efficient
# The above workflow is equivalent to below, at 3 decimal points
mask_include = np.abs(stats.zscore(data[columns])) > zscore_threshold
mask_exclude = np.abs(stats.zscore(data[columns])) < zscore_threshold
if any:
if exclude:
return data[mask_exclude.any(axis=1)]
else:
data = data[mask_include.any(axis=1)]
outlier_field = pd.DataFrame(mask_include, columns=columns)
outlier_field = outlier_field.apply(lambda x: x.replace(True, x.name).replace(False, ""))
outlier_field = outlier_field.apply(lambda x: x.str.cat(sep=''), axis=1)
outlier_field = outlier_field.replace("", np.nan).dropna()
outlier_field.rename("Outlier_field", inplace=True)
assert data.index.equals(outlier_field.index)
return (pd.concat([data, outlier_field], axis=1), outlier_range)
else:
if exclude:
return data[mask_exclude.all(axis=1)]
else:
data = data[mask_include.all(axis=1)]
outlier_field = pd.DataFrame(mask_include, columns=columns)
outlier_field = outlier_field.apply(lambda x: x.replace(True, x.name).replace(False, ""))
outlier_field = outlier_field.apply(lambda x: x.str.cat(sep=''), axis=1)
outlier_field = outlier_field.replace("", np.nan).dropna()
outlier_field.rename("Outlier_field", inplace=True)
assert data.index.equals(outlier_field.index)
return ( | pd.concat([data, outlier_field], axis=1) | pandas.concat |
# #-- -- -- -- Merging DataFrames with pandas
# # Used for Data Scientist Training Path
# #FYI it's a compilation of how to work
# #with different commands.
# ### --------------------------------------------------------
# # # # ------>>>> Reading DataFrames from multiple files
# Import pandas
import pandas as pd
# Read 'Bronze.csv' into a DataFrame: bronze
bronze = pd.read_csv('Bronze.csv')
# Read 'Silver.csv' into a DataFrame: silver
silver = pd.read_csv('Silver.csv')
# Read 'Gold.csv' into a DataFrame: gold
gold = pd.read_csv('Gold.csv')
# Print the first five rows of gold
print(gold.head())
# ### --------------------------------------------------------
# # # # ------>>>> Reading DataFrames from multiple files in a loop
# Import pandas
import pandas as pd
# Create the list of file names: filenames
filenames = ['Gold.csv', 'Silver.csv', 'Bronze.csv']
# Create the list of three DataFrames: dataframes
dataframes = []
for filename in filenames:
dataframes.append(pd.read_csv(filename))
# Print top 5 rows of 1st DataFrame in dataframes
print(dataframes[0].head())
# ### --------------------------------------------------------
# # # # ------>>>> Combining DataFrames from multiple data files
# Import pandas
import pandas as pd
# Make a copy of gold: medals
medals = gold.copy()
# Create list of new column labels: new_labels
new_labels = ['NOC', 'Country', 'Gold']
# Rename the columns of medals using new_labels
medals.columns = new_labels
# Add columns 'Silver' & 'Bronze' to medals
medals['Silver'] = silver['Total']
medals['Bronze'] = bronze['Total']
# Print the head of medals
print(medals.head())
# ### --------------------------------------------------------
# # # # ------>>>> Sorting DataFrame with the Index & columns
# Import pandas
import pandas as pd
# Read 'monthly_max_temp.csv' into a DataFrame: weather1
weather1 = pd.read_csv('monthly_max_temp.csv', index_col='Month')
# Print the head of weather1
print(weather1.head())
# Sort the index of weather1 in alphabetical order: weather2
weather2 = weather1.sort_index()
# Print the head of weather2
print(weather2.head())
# Sort the index of weather1 in reverse alphabetical order: weather3
weather3 = weather1.sort_index(ascending=False)
# Print the head of weather3
print(weather3.head())
# Sort weather1 numerically using the values of 'Max TemperatureF': weather4
weather4 = weather1.sort_values('Max TemperatureF')
# Print the head of weather4
print(weather4.head())
# ### --------------------------------------------------------
# # # # ------>>>> Reindexing DataFrame from a list
# Import pandas
import pandas as pd
# Reindex weather1 using the list year: weather2
weather2 = weather1.reindex(year)
# Print weather2
print(weather2)
# Reindex weather1 using the list year with forward-fill: weather3
weather3 = weather1.reindex(year).ffill()
# Print weather3
print(weather3)
# ### --------------------------------------------------------
# # # # ------>>>> Reindexing using another DataFrame Index
# Import pandas
import pandas as pd
# Reindex names_1981 with index of names_1881: common_names
common_names = names_1981.reindex(names_1881.index)
# Print shape of common_names
print(common_names.shape)
# Drop rows with null counts: common_names
common_names = common_names.dropna()
# Print shape of new common_names
print(common_names.shape)
# ### --------------------------------------------------------
# # # # ------>>>>Adding unaligned DataFrames
# The DataFrames january and february, which have
# been printed in the IPython Shell, represent the
# sales a company made in the corresponding months.
# The Indexes in both DataFrames are called Company,
# identifying which company bought that quantity of
# units. The column Units is the number of units sold.
# If you were to add these two DataFrames by
# executing the command total = january + february,
# how many rows would the resulting DataFrame have?
# Try this in the IPython Shell and find out for yourself.
total = january + february
total
# R/ 6 rows.
# ### --------------------------------------------------------
# # # # ------>>>> Broadcasting in arithmetic formulas
# Extract selected columns from weather as new DataFrame: temps_f
temps_f = weather[['Min TemperatureF', 'Mean TemperatureF', 'Max TemperatureF']]
# Convert temps_f to celsius: temps_c
temps_c = (temps_f - 32) * 5/9
# Rename 'F' in column names with 'C': temps_c.columns
temps_c.columns = temps_c.columns.str.replace('F', 'C')
# Print first 5 rows of temps_c
print(temps_c.head())
# ### --------------------------------------------------------
# # # # ------>>>> Computing percentage growth of GDP
import pandas as pd
# Read 'GDP.csv' into a DataFrame: gdp
gdp = pd.read_csv('GDP.csv', parse_dates=True, index_col='DATE')
# Slice all the gdp data from 2008 onward: post2008
post2008 = gdp.loc['2008':]
# Print the last 8 rows of post2008
print(post2008.tail(8))
# Resample post2008 by year, keeping last(): yearly
yearly = post2008.resample('A').last()
# Print yearly
print(yearly)
# Compute percentage growth of yearly: yearly['growth']
yearly['growth'] = yearly.pct_change() * 100
# Print yearly again
print(yearly)
# ### --------------------------------------------------------
# # # # ------>>>> Converting currency of stocks
# Import pandas
import pandas as pd
# Read 'sp500.csv' into a DataFrame: sp500
sp500 = | pd.read_csv('sp500.csv', parse_dates=True, index_col='Date') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2020 Authors of CryptoMiniSat, see AUTHORS file
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# pylint: disable=invalid-name,line-too-long,too-many-locals,consider-using-sys-exit
import operator
import re
import time
import argparse
import sys
import os
import itertools
import pandas as pd
import pickle
import sklearn
import sklearn.svm
import sklearn.tree
from sklearn.preprocessing import StandardScaler
import numpy as np
import sklearn.metrics
import matplotlib.pyplot as plt
import sklearn.ensemble
import sklearn.linear_model
import helper
import xgboost as xgb
import ast
import functools
import crystalcodegen as ccg
try:
import mlflow
except ImportError:
mlflow_avail = False
else:
mlflow_avail = True
ver = sklearn.__version__.split(".")
if int(ver[1]) < 20:
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
MISSING=np.nan
def check_long_short():
if options.tier is None:
print("ERROR: You must give option '--tier' as short/long/forever")
assert False
exit(-1)
class Learner:
def __init__(self, df, tier):
self.df = df
self.tier = tier
def count_bad_ok(self, df):
files = df[["x.class", "rdb0.dump_no"]].groupby("x.class").count()
if files["rdb0.dump_no"].index[0] == 0:
bad = files["rdb0.dump_no"][0]
ok = files["rdb0.dump_no"][1]
else:
bad = files["rdb0.dump_no"][1]
ok = files["rdb0.dump_no"][0]
assert bad > 0, "No need to train, data only contains BAD(0)"
assert ok > 0, "No need to train, data only contains OK(1)"
return bad, ok
def filter_percentile(self, df, features, perc):
low = df.quantile(perc, axis=0)
high = df.quantile(1.0-perc, axis=0)
df2 = df.copy()
for i in features:
df2 = df2[(df2[i] >= low[i]) & (df2[i] <= high[i])]
print("Filtered to %f on %-30s, shape now: %s" %
(perc, i, df2.shape))
print("Original size:", df.shape)
print("New size:", df2.shape)
return df2
def filtered_conf_matrixes(self, dump_no, data, features, to_predict, clf,
toprint, highlight=False):
# filter test data
if dump_no is not None:
print("\nCalculating confusion matrix -- dump_no == %s" % dump_no)
toprint += " dump no %d" % dump_no
data2 = data[data["rdb0.dump_no"] == dump_no]
else:
print("\nCalculating confusion matrix -- ALL dump_no")
data2 = data
if False:
return helper.conf_matrixes(data2, features, to_predict, clf, toprint,
highlight=highlight)
else:
return helper.calc_regression_error(data2, features, to_predict, clf, toprint,
highlight=highlight)
@staticmethod
def fix_feat_name(x):
x = re.sub(r"dump_no", r"dump_no", x)
x = re.sub(r"^cl_", r"", x)
x = re.sub(r"^rdb0_", r"", x)
if x == "dump_no":
pass
elif x == "last_touched_diff":
pass
elif x == "act_ranking_top_10":
pass
elif x == "act_ranking_rel":
pass
elif x == "time_inside_solver":
pass
elif x == "size":
x = "cl->" + x + "()"
else:
x = "cl->stats." + x
return x
def importance_XGB(self, clf, features):
impdf = []
#print("clf:", clf)
#print("clf-booster:", clf.feature_importances_)
for i in range(len(clf.feature_importances_)):
score = clf.feature_importances_[i]
ft = features[i]
impdf.append({'feature': ft, 'importance': score})
impdf = pd.DataFrame(impdf)
impdf = impdf.sort_values(by='importance', ascending=False).reset_index(drop=True)
impdf['importance'] /= impdf['importance'].sum()
pd.set_option('display.max_rows', None)
| pd.set_option('display.max_columns', None) | pandas.set_option |
import numpy as np
import requests
import json
import pandas as pd
import warnings
from ._utils import convert_to_entrez, get_top_genes, check_data, check_params
class ToppFunAnalysis(object):
""" Provide tools for running Toppfun enrichment analysis for different metagenes.
Parameters
----------
data :
If ``pre_selected = False`` : pandas.DataFrame , shape (n_metagenes , n_genes) or pandas.Series, shape (n_genes)
The column names (or the index keys for a serie) should be valid gene IDs.
If ``pre_selected = True`` : pandas.Series , shape (n_metagenes)
For each metagene the serie contains a list of the IDs of the extreme expressed
genes.
input_type : string, optional.
Type of input gene IDs. Common types are 'entrezgene' , 'symbol' , 'uniprot' , 'ensembl.gene' , 'refseq'...
For the complete list of available types, see https://docs.mygene.info/en/latest/doc/query_service.html#available_fields .
If ``input_type is None``, conversion will not be possible and input IDs will be assumed to be Entrez IDs.
The default is None.
pre_selected : boolean , optional.
Indicate whether the extreme genes have already been selected (see above).
The default is False.
threshold : numeric or array-like of two numerics , optional
See sica.annotate._utils.get_top_genes. The default is 3.
method : {'quantile' , 'std'} , optional
See sica.annotate._utils.get_top_genes. The default is 'std'.
tail : {'left' , 'right' , 'both' , 'heaviest'} , optional
See sica.annotate._utils.get_top_genes. The default is 'heaviest'.
Attributes
----------
top_genes_ : pandas.DataFrame, shape (n_metagenes , 3)
For each metagene the 'inputs' column contains a list of the IDs of the extreme expressed
genes.
References
----------
For more details, please refer to the ToppGene API (see https://toppgene.cchmc.org/API/enrich. ).
Examples
--------
>>> from sica.annotate import toppfun
>>> annotations = toppfun.ToppFunAnalysis(data = Metagenes)
>>> metagene7_annot = annotations.get_analysis(metagene = 'metagene 7')
>>> metagene7_annot.head()
"""
def __init__(
self,
data,
input_type=None,
pre_selected=False,
threshold=3,
method="std",
tail="heaviest",
):
# Check data
check_data(data, pre_selected)
self.input_type = input_type
if self.input_type is None:
warnings.warn(
"If input_type is None the conversion of input IDs to Entrez ids will not be possible. ToppFunAnalysis will assume that the inputs are already Entrez IDs."
)
# Initialization of selt.top_genes_ attribute
self.top_genes_ = pd.DataFrame(
{"inputs": None, "entrezgene": None, "notfound": None}, index=data.index
)
if pre_selected:
self.top_genes_["inputs"] = data.copy()
else:
threshold = check_params(threshold, method, tail)
self.top_genes_["inputs"] = data.apply(
get_top_genes, threshold=threshold, method=method, tail=tail, axis=1
)
def convert_metagenes(self, idx):
""" Convert the IDs of the most expressed genes contained in ``top_genes_``.
Parameters
----------
idx : {"all" , string , list of strings}
If ``idx = "all"`` all the metagenes will be converted.
Otherwise, only the metagenes associated with ``idx`` will be converted. In that case, ``idx`` must correspond to valid
indexes of the input data.
Returns
-------
None
"""
if self.input_type is None:
raise ValueError("Conversion is not possible with self.input_type = None.")
# Define the function that will be applied to the rows of self.top_genes_ dataframe
def fun(row):
if row["entrezgene"] is None:
return convert_to_entrez(row["inputs"], self.input_type)[:2]
else:
return row["entrezgene"], row["notfound"]
# Apply func to the rows of self.top_genes_ associated with the metagenes parameter
if idx == "all":
warnings.warn(
"idx = 'all' : this operation can take quite some time depending on the number of metagenes and the number of most expressed genes."
)
self.top_genes_[["entrezgene", "notfound"]] = self.top_genes_.apply(
fun, axis=1, result_type="expand"
)
elif isinstance(idx, list):
warnings.warn(
"metagenes is a list : this operation can take quite some time depending on the number of metagenes and the number of most expressed genes."
)
self.top_genes_.loc[idx, ["entrezgene", "notfound"]] = (
self.top_genes_.loc[idx].apply(fun, axis=1, result_type="expand")
).values
else:
self.top_genes_.loc[idx, ["entrezgene", "notfound"]] = np.array(
fun(self.top_genes_.loc[idx]), dtype="object"
)
return
def get_analysis(
self,
metagene,
type_list=None,
p_value=0.05,
min_entities=10,
max_entities=500,
maxres=10,
correct="FDR",
):
""" Return the ToppFun enrichment analysis of a given metagene.
Parameters
----------
metagene : object
It must correspond to a valid index of the input data.
type_list: list of strings, optional
List of features to perform enrichment tests. If None, all the available features
will be used (see sica.annotate.toppfun._get_analysis). The default is None.
p_value: float in (0 , 1), optional
P-value maximal threshold to accept enrichments. The default is 0.05
min_entities: int, optional
Minimal number of gene hits to accept in feature categories. The default is 10.
max_entities: int, optional
Maximal number of gene hits to accept in feature categories. The default is 500.
maxres: int, optional
Number of top enrichments to show for each feature. The default is 10.
correct: str {'none', 'FDR', 'Bonferroni'}, optional
P-value correction methods. FDR refers to the Benjamini and Hochberg method.
The default is 'FDR'.
Returns
-------
pandas.DataFrame
Results of the Toppfun enrichment analysis for the given metagene.
"""
if self.input_type is None:
entrez_dict = {
"Genes": [int(id) for id in self.top_genes_.loc[metagene, "inputs"]]
}
else:
self.convert_metagenes(idx=metagene)
entrez_dict = {
"Genes": [int(id) for id in self.top_genes_.loc[metagene, "entrezgene"]]
}
results = []
annotations = _get_analysis(
entrez_dict, type_list, p_value, min_entities, max_entities, maxres, correct
).json()["Annotations"]
for element in annotations:
gene_symbol_list = [gene["Symbol"] for gene in element["Genes"]]
element["Gene_Symbol"] = ",".join(gene_symbol_list)
element.pop("Genes", None)
results.append(element)
return | pd.DataFrame(results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Structures data in ML-friendly ways."""
import re
import copy
import datetime as dt
import random
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV
from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS
from avaml.aggregatedata.time_parameters import to_time_parameters
from varsomdata import getforecastapi as gf
from varsomdata import getmisc as gm
__author__ = 'arwi'
LABEL_PROBLEM_PRIMARY = {
"ext_attr": [
"avalanche_problem_type_id",
"avalanche_problem_type_name",
"avalanche_type_id",
"avalanche_type_name",
"avalanche_ext_id",
"avalanche_ext_name"
],
"values": {
_NONE: [0, "", 0, "", 0, ""],
"new-loose": [3, "Nysnø (løssnøskred)", 20, "Løssnøskred", 10, "Tørre løssnøskred"],
"wet-loose": [5, "Våt snø (løssnøskred)", 20, "Løssnøskred", 15, "Våte løssnøskred"],
"new-slab": [7, "Nysnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"drift-slab": [10, "Fokksnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"pwl-slab": [30, "Vedvarende svakt lag (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"wet-slab": [45, "Våt snø (flakskred)", 10, "Flakskred", 25, "Våte flakskred"],
"glide": [50, "Glideskred", 10, "Flakskred", 25, "Våte flakskred"]
}
}
LABEL_PROBLEM = {
"cause": {
"ext_attr": ["aval_cause_id", "aval_cause_name"],
"values": {
"0": [0, ""],
"new-snow": [10, "Nedføyket svakt lag med nysnø"],
"hoar": [11, "Nedsnødd eller nedføyket overflaterim"],
"facet": [13, "Nedsnødd eller nedføyket kantkornet snø"],
"crust": [14, "Dårlig binding mellom glatt skare og overliggende snø"],
"snowdrift": [15, "Dårlig binding mellom lag i fokksnøen"],
"ground-facet": [16, "Kantkornet snø ved bakken"],
"crust-above-facet": [18, "Kantkornet snø over skarelag"],
"crust-below-facet": [19, "Kantkornet snø under skarelag"],
"ground-water": [20, "Vann ved bakken/smelting fra bakken"],
"water-layers": [22, "Opphopning av vann i/over lag i snødekket"],
"loose": [24, "Ubunden snø"]
}
},
"dsize": {
"ext_attr": ["destructive_size_ext_id", "destructive_size_ext_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "1 - Små"],
'2': [2, "2 - Middels"],
'3': [3, "3 - Store"],
'4': [4, "4 - Svært store"],
'5': [5, "5 - Ekstremt store"]
}
},
"prob": {
"ext_attr": ["aval_probability_id", "aval_probability_name"],
"values": {
'0': [0, "Ikke gitt"],
'2': [2, "Lite sannsynlig"],
'3': [3, "Mulig"],
'5': [5, "Sannsynlig"],
}
},
"trig": {
"ext_attr": ["aval_trigger_simple_id", "aval_trigger_simple_name"],
"values": {
'0': [0, "Ikke gitt"],
'10': [10, "Stor tilleggsbelastning"],
'21': [21, "Liten tilleggsbelastning"],
'22': [22, "Naturlig utløst"]
}
},
"dist": {
"ext_attr": ["aval_distribution_id", "aval_distribution_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "Få bratte heng"],
'2': [2, "Noen bratte heng"],
'3': [3, "Mange bratte heng"],
'4': [4, "De fleste bratte heng"]
}
},
"lev_fill": {
"ext_attr": ["exposed_height_fill"],
"values": {
'0': [0],
'1': [1],
'2': [2],
'3': [3],
'4': [4],
}
}
}
LABEL_PROBLEM_MULTI = {
"aspect": {
"ext_attr": "valid_expositions",
}
}
LABEL_PROBLEM_REAL = {
"lev_max": {
"ext_attr": "exposed_height_1",
},
"lev_min": {
"ext_attr": "exposed_height_2",
}
}
LABEL_GLOBAL = {
"danger_level": {
"ext_attr": ["danger_level", "danger_level_name"],
"values": {
'1': [1, "1 liten"],
'2': [2, "2 Moderat"],
'3': [3, "3 Betydelig"],
'4': [4, "4 Stor"],
'5': [5, "5 Meget stor"]
}
},
"emergency_warning": {
"ext_attr": ["emergency_warning"],
"values": {
"Ikke gitt": ["Ikke gitt"],
"Naturlig utløste skred": ["Naturlig utløste skred"],
}
}
}
COMPETENCE = [0, 110, 115, 120, 130, 150]
class ForecastDataset:
def __init__(self, regobs_types, seasons=('2017-18', '2018-19', '2019-20'), max_file_age=23):
"""
Object contains aggregated data used to generate labeled datasets.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param seasons: Tuple/list of string representations of avalanche seasons to fetch.
"""
self.seasons = sorted(list(set(seasons)))
self.date = None
self.regobs_types = regobs_types
self.weather = {}
self.regobs = {}
self.varsom = {}
self.labels = {}
self.use_label = True
for season in seasons:
varsom, labels = _get_varsom_obs(year=season, max_file_age=max_file_age)
self.varsom = merge(self.varsom, varsom)
self.labels = merge(self.labels, labels)
regobs = _get_regobs_obs(season, regobs_types, max_file_age=max_file_age)
self.regobs = merge(self.regobs, regobs)
weather = _get_weather_obs(season, max_file_age=max_file_age)
self.weather = merge(self.weather, weather)
@staticmethod
def date(regobs_types, date: dt.date, days, use_label=True):
"""
Create a dataset containing just a given day's data.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param date: Date to fetch and create dataset for.
:param days: How many days to fetch before date. This will be max for .label()'s days parameter.
"""
self = ForecastDataset(regobs_types, [])
self.date = date
self.use_label = use_label
self.regobs = _get_regobs_obs(None, regobs_types, date=date, days=days)
self.varsom, labels = _get_varsom_obs(None, date=date, days=days-1 if days > 0 else 1)
self.weather = _get_weather_obs(None, date=date, days=days-2 if days > 2 else 1)
self.labels = {}
for label_keys, label in labels.items():
if label_keys not in self.labels:
self.labels[label_keys] = {}
for (label_date, label_region), label_data in label.items():
if label_date == date.isoformat():
subkey = (label_date, label_region)
self.labels[label_keys][subkey] = label_data
return self
def label(self, days, with_varsom=True):
"""Creates a LabeledData containing relevant label and features formatted either in a flat structure or as
a time series.
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
:return: LabeledData
"""
table = {}
row_weight = {}
df = None
df_weight = None
df_label = pd.DataFrame(self.labels, dtype="U")
days_w = {0: 1, 1: 1, 2: 1}.get(days, days - 1)
days_v = {0: 1, 1: 2, 2: 2}.get(days, days)
days_r = days + 1
varsom_index = pd.DataFrame(self.varsom).index
weather_index = pd.DataFrame(self.weather).index
if len(df_label.index) == 0 and self.use_label:
raise NoBulletinWithinRangeError()
if self.date and not self.use_label:
season = gm.get_season_from_date(self.date)
regions = gm.get_forecast_regions(year=season, get_b_regions=True)
date_region = [(self.date.isoformat(), region) for region in regions]
else:
date_region = df_label.index
for monotonic_idx, entry_idx in enumerate(date_region):
date, region_id = dt.date.fromisoformat(entry_idx[0]), entry_idx[1]
def prev_key(day_dist):
return (date - dt.timedelta(days=day_dist)).isoformat(), region_id
# Just check that we can use this entry.
try:
if with_varsom:
for n in range(1, days_v):
if prev_key(n) not in varsom_index:
raise KeyError()
for n in range(0, days_w):
if prev_key(n) not in weather_index:
raise KeyError()
add_row = True
# We don't check for RegObs as it is more of the good to have type of data
except KeyError:
add_row = False
if add_row:
row = {}
for region in REGIONS:
row[(f"region_id_{region}", "0")] = float(region == region_id)
if with_varsom:
for column in self.varsom.keys():
for n in range(1, days_v):
# We try/except an extra time since single dates may run without a forecast.
row[(column, str(n))] = self.varsom[column][prev_key(n)]
for column in self.weather.keys():
for n in range(0, days_w):
try:
row[(column, str(n))] = self.weather[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
for column in self.regobs.keys():
for n in range(2, days_r):
try:
row[(column, str(n))] = self.regobs[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
try:
weight_sum = self.regobs['accuracy'][prev_key(0)]
if weight_sum < 0:
row_weight[entry_idx] = 1 / 2
elif weight_sum == 0:
row_weight[entry_idx] = 1
elif weight_sum > 0:
row_weight[entry_idx] = 2
except KeyError:
row_weight[entry_idx] = 1
# Some restructuring to make DataFrame parse the dict correctly
for key in row.keys():
if key not in table:
table[key] = {}
table[key][entry_idx] = row[key]
# Build DataFrame iteratively to preserve system memory (floats in dicts are apparently expensive).
if (monotonic_idx > 0 and monotonic_idx % 1000 == 0) or monotonic_idx == len(date_region) - 1:
df_new = pd.DataFrame(table, dtype=np.float32).fillna(0)
df_weight_new = pd.Series(row_weight)
df = df_new if df is None else pd.concat([df, df_new])
df_weight = df_weight_new if df is None else pd.concat([df_weight, df_weight_new])
table = {}
row_weight = {}
if df is None or len(df.index) == 0:
raise NoDataFoundError()
if self.use_label:
df_label = df_label.loc[df.index]
df_label.sort_index(axis=0, inplace=True)
df_label.sort_index(axis=1, inplace=True)
df.sort_index(axis=0, inplace=True)
df_weight.sort_index(axis=0, inplace=True)
else:
df_label = None
return LabeledData(df, df_label, df_weight, days, self.regobs_types, with_varsom, self.seasons)
class LabeledData:
is_normalized = False
with_regions = True
elevation_class = (False, False)
scaler = StandardScaler()
def __init__(self, data, label, row_weight, days, regobs_types, with_varsom, seasons=False):
"""Holds labels and features.
:param data: A DataFrame containing the features of the dataset.
:param label: DataFrame of labels.
:param row_weight: Series containing row weights
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param regobs_types: A tuple/list of strings of types of observations to fetch from RegObs.,
e.g., `("Faretegn")`.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
"""
self.data = data
self.row_weight = row_weight
if label is not None:
self.label = label
self.label = self.label.replace(_NONE, 0)
self.label = self.label.replace(np.nan, 0)
try: self.label['CLASS', _NONE] = self.label['CLASS', _NONE].replace(0, _NONE).values
except KeyError: pass
try: self.label['MULTI'] = self.label['MULTI'].replace(0, "0").values
except KeyError: pass
try: self.label['REAL'] = self.label['REAL'].astype(np.float)
except KeyError: pass
self.pred = label.copy()
for col in self.pred.columns:
self.pred[col].values[:] = 0
try: self.pred['CLASS', _NONE] = _NONE
except KeyError: pass
try: self.pred['MULTI'] = "0"
except KeyError: pass
else:
self.label = None
self.pred = None
self.days = days
self.with_varsom = with_varsom
self.regobs_types = regobs_types
if self.data is not None:
self.scaler.fit(self.data.values)
self.single = not seasons
self.seasons = sorted(list(set(seasons if seasons else [])))
def normalize(self, by=None):
"""Normalize the data feature-wise using MinMax.
:return: Normalized copy of LabeledData
"""
by = by if by is not None else self
if not self.is_normalized:
ld = self.copy()
data = by.scaler.transform(self.data.values)
ld.data = pd.DataFrame(data=data, index=self.data.index, columns=self.data.columns)
ld.is_normalized = by
return ld
elif self.is_normalized != by:
return self.denormalize().normalize(by=by)
else:
return self.copy()
def denormalize(self):
"""Denormalize the data feature-wise using MinMax.
:return: Denormalized copy of LabeledData
"""
if self.is_normalized:
ld = self.copy()
data = self.is_normalized.scaler.inverse_transform(self.data.values)
ld.data = pd.DataFrame(data=data, index=self.data.index, columns=self.data.columns)
ld.is_normalized = False
return ld
else:
return self.copy()
def drop_regions(self):
"""Remove regions from input data"""
if self.with_regions:
ld = self.copy()
region_columns = list(filter(lambda x: re.match(r'^region_id', x[0]), ld.data.columns))
ld.data.drop(region_columns, axis=1, inplace=True)
ld.with_regions = False
ld.scaler.fit(ld.data.values)
return ld
else:
return self.copy()
def stretch_temperatures(self):
"""Stretch out temperatures near zero"""
ld = self.copy()
if self.data is not None:
temp_cols = [bool(re.match(r"^temp_(max|min)$", title)) for title in ld.data.columns.get_level_values(0)]
ld.data.loc[:, temp_cols] = np.sign(ld.data.loc[:, temp_cols]) * np.sqrt(np.abs(ld.data.loc[:, temp_cols]))
ld.scaler.fit(ld.data.values)
return ld
def problem_graph(self):
label = pd.Series(self.label["CLASS", _NONE, "problem_1"], name="label")
pred1 = pd.Series(self.pred["CLASS", _NONE, "problem_1"], name="problem_1")
pred2 = pd.Series(self.pred["CLASS", _NONE, "problem_2"], name="problem_2")
groups = pd.concat([label, pred1, pred2], axis=1).groupby(["label", "problem_1"], dropna=False)
count = groups.count()["problem_2"].rename("count")
p2 = groups["problem_2"].apply(lambda x: pd.Series.mode(x)[0]).replace(0, np.nan)
return pd.concat([count, p2], axis=1)
def statham(self):
"""Make a danger level in the same manner as Statham et al., 2018."""
if self.pred is None:
raise NotPredictedError
label = self.label[("CLASS", _NONE, "danger_level")].apply(np.int)
pred = self.pred[("CLASS", _NONE, "danger_level")].apply(np.int)
ones = pd.Series(np.ones(pred.shape), index=pred.index)
cols = ["label", "diff", "n"]
df = pd.DataFrame(pd.concat([label, label - pred, ones], axis=1).values, columns=cols)
bias = df.groupby(cols[:-1]).count().unstack().droplevel(0, axis=1)
n = df.groupby(cols[0]).count()["n"]
share = bias.divide(n, axis=0)
return pd.concat([n, share], axis=1)
def adam(self):
if self.pred is None:
raise NotPredictedError
touch = pd.DataFrame({
1: {(2, 10): "A", (3, 10): "A", (3, 21): "B", (5, 21): "B", (3, 22): "B", (5, 22): "B"},
2: {(2, 10): "A", (3, 10): "B", (3, 21): "C", (5, 21): "D", (3, 22): "C", (5, 22): "D"},
3: {(2, 10): "B", (3, 10): "C", (3, 21): "D", (5, 21): "E", (3, 22): "D", (5, 22): "E"},
4: {(2, 10): "B", (3, 10): "C", (3, 21): "D", (5, 21): "E", (3, 22): "D", (5, 22): "E"}
})
danger = pd.DataFrame({
1: {"A": 1, "B": 1, "C": 1, "D": 2, "E": 3},
2: {"A": 1, "B": 2, "C": 2, "D": 3, "E": 4},
3: {"A": 2, "B": 2, "C": 3, "D": 3, "E": 4},
4: {"A": 2, "B": 3, "C": 4, "D": 4, "E": 5},
5: {"A": 2, "B": 3, "C": 4, "D": 4, "E": 5}
})
def get_danger(series):
p1 = series["CLASS", _NONE, "problem_1"]
p2 = series["CLASS", _NONE, "problem_2"]
p3 = series["CLASS", _NONE, "problem_2"]
dl = ("CLASS", _NONE, "danger_level")
ew = ("CLASS", _NONE, "emergency_warning")
if p1 == _NONE:
series[dl] = "1"
series[ew] = "Ikke gitt"
else:
p1 = series["CLASS", p1][["prob", "trig", "dist", "dsize"]].apply(np.int)
try:
dl1 = str(danger.loc[touch.loc[(p1["prob"], p1["trig"]), p1["dist"]], p1["dsize"]])
except KeyError:
dl1 = 0
if p2 != _NONE:
p2 = series["CLASS", p2][["prob", "trig", "dist", "dsize"]].apply(np.int)
try:
dl1 = str(danger.loc[touch.loc[(p1["prob"], p1["trig"]), p1["dist"]], p1["dsize"]])
except KeyError:
series[dl] = "2"
series[ew] = "Ikke gitt"
try:
if p1["trig"] == 22 and p1["dsize"] >= 3:
series[ew] = "Naturlig utløste skred"
except KeyError:
pass
return series
ld = self.copy()
ld.pred = ld.pred.apply(get_danger, axis=1)
return ld
def to_elev_class(self, exclude_label=False):
"""Convert all elevations to classes"""
if self.elevation_class == (True, exclude_label):
return self.copy()
elif self.elevation_class == (True, not exclude_label):
return self.from_elev_class().to_elev_class(exclude_label)
MAX_ELEV = 2500
def round_min(series):
region = int(series.name[1])
elev = float(series.values[0])
tl = REGION_ELEV[region][0]
return 0 if abs(elev - 0) <= abs(elev - tl) else 1
def round_max(series):
region = int(series.name[1])
elev = float(series.values[0])
tl = REGION_ELEV[region][1]
return 0 if abs(elev - MAX_ELEV) <= abs(elev - tl) else 1
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
df["CLASS", problem, "lev_min"] = df[[("REAL", problem, "lev_min")]].apply(round_min, axis=1).apply(str)
df["CLASS", problem, "lev_max"] = df[[("REAL", problem, "lev_max")]].apply(round_max, axis=1).apply(str)
df.drop([
("CLASS", problem, "lev_fill"),
("REAL", problem, "lev_min"),
("REAL", problem, "lev_max")
], axis=1, inplace=True)
df.sort_index(inplace=True, axis=1)
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
df[f"{prefix[0]}_min", prefix[1]] = df[[(f"{prefix[0]}_min", prefix[1])]].apply(round_min, axis=1)
df[f"{prefix[0]}_max", prefix[1]] = df[[(f"{prefix[0]}_max", prefix[1])]].apply(round_max, axis=1)
df.drop([
(f"{prefix[0]}_fill_1", prefix[1]),
(f"{prefix[0]}_fill_2", prefix[1]),
(f"{prefix[0]}_fill_3", prefix[1]),
(f"{prefix[0]}_fill_4", prefix[1]),
], axis=1, inplace=True)
range_ld = self.copy().denormalize()
range_ld = range_ld.to_elevation_fmt_4(exclude_label)
if self.label is not None and not exclude_label:
convert_label(range_ld.label)
if self.pred is not None:
convert_label(range_ld.pred)
if self.data is not None:
convert_data(range_ld.data)
range_ld.scaler.fit(range_ld.data)
range_ld.elevation_class = (True, exclude_label)
if self.is_normalized:
return range_ld.normalize()
else:
return range_ld
def from_elev_class(self):
"""Convert all elevation classes to elevations"""
if not self.elevation_class[0]:
return self.copy()
exclude_label = self.elevation_class[1]
MAX_ELEV = 2500
def find_min(series):
region = int(series.name[1])
is_middle = bool(float(series.values[0]))
tl = REGION_ELEV[region][0]
return tl if is_middle else 0
def find_max(series):
region = int(series.name[1])
is_middle = bool(float(series.values[0]))
tl = REGION_ELEV[region][1]
return tl if is_middle else MAX_ELEV
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
df["REAL", problem, "lev_min"] = df[[("CLASS", problem, "lev_min")]].apply(find_min, axis=1).apply(str)
df["REAL", problem, "lev_max"] = df[[("CLASS", problem, "lev_max")]].apply(find_max, axis=1).apply(str)
df["CLASS", problem, "lev_fill"] = "4"
df.drop([
("CLASS", problem, "lev_min"),
("CLASS", problem, "lev_max"),
], axis=1, inplace=True)
df.sort_index(inplace=True, axis=1)
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
df[f"{prefix[0]}_min", prefix[1]] = df[[(f"{prefix[0]}_min", prefix[1])]].apply(find_min, axis=1)
df[f"{prefix[0]}_max", prefix[1]] = df[[(f"{prefix[0]}_max", prefix[1])]].apply(find_max, axis=1)
df[f"{prefix[0]}_fill_1", prefix[1]] = 0
df[f"{prefix[0]}_fill_2", prefix[1]] = 0
df[f"{prefix[0]}_fill_3", prefix[1]] = 0
df[f"{prefix[0]}_fill_4", prefix[1]] = 1
df.sort_index(inplace=True, axis=1)
range_ld = self.copy().denormalize()
if self.label is not None and not exclude_label:
convert_label(range_ld.label)
if self.pred is not None:
convert_label(range_ld.pred)
if self.data is not None:
convert_data(range_ld.data)
range_ld.scaler.fit(range_ld.data)
range_ld.elevation_class = (False, False)
if self.is_normalized:
return range_ld.normalize()
else:
return range_ld
def to_elevation_fmt_1(self, exclude_label=False):
"""Convert all elevations to format 1"""
MAX_ELEV = 2500
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
fill = df["CLASS", problem, "lev_fill"].apply(str)
twos = fill == "2"
threes = fill == "3"
fours = fill == "4"
df.loc[np.logical_or(twos, threes), ("REAL", problem, "lev_max")] = 0
df.loc[np.logical_or(twos, threes), ("REAL", problem, "lev_min")] = 0
df.loc[np.logical_or(twos, threes), ("CLASS", problem, "lev_fill")] = "1"
df.loc[fours, ("REAL", problem, "lev_max")] = df.loc[fours, ("REAL", problem, "lev_min")]
df.loc[fours, ("REAL", problem, "lev_min")] = 0
df.loc[fours, ("CLASS", problem, "lev_fill")] = "1"
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
ones = df[(f"{prefix[0]}_fill_1", prefix[1])].apply(np.bool)
twos = df[(f"{prefix[0]}_fill_2", prefix[1])].apply(np.bool)
threes = df[(f"{prefix[0]}_fill_3", prefix[1])].apply(np.bool)
fours = df[(f"{prefix[0]}_fill_4", prefix[1])].apply(np.bool)
df.loc[np.logical_or(twos, threes), (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[np.logical_or(twos, threes), (f"{prefix[0]}_max", prefix[1])] = 0
df.loc[np.logical_or(twos, threes), (f"{prefix[0]}_fill_1", prefix[1])] = 1
df[(f"{prefix[0]}_fill_2", prefix[1])] = np.zeros(twos.shape)
df[(f"{prefix[0]}_fill_3", prefix[1])] = np.zeros(threes.shape)
df.loc[fours, (f"{prefix[0]}_max", prefix[1])] = df.loc[fours, (f"{prefix[0]}_min", prefix[1])]
df.loc[fours, (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[threes == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_3", prefix[1])] = np.zeros(threes.shape)
ld = self.copy().denormalize()
if self.label is not None and not exclude_label:
convert_label(ld.label)
if self.pred is not None:
convert_label(ld.pred)
if self.data is not None:
convert_data(ld.data)
ld.scaler.fit(ld.data)
if self.is_normalized:
return ld.normalize()
else:
return ld
def to_elevation_fmt_4(self, exclude_label=False):
"""Convert all elevations to ranges"""
MAX_ELEV = 2500
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
fill = df["CLASS", problem, "lev_fill"].apply(str)
ones = fill == "1"
twos = fill == "2"
threes = fill == "3"
df.loc[ones, ("REAL", problem, "lev_min")] = df.loc[ones, ("REAL", problem, "lev_max")]
df.loc[ones, ("REAL", problem, "lev_max")] = MAX_ELEV
df.loc[ones, ("CLASS", problem, "lev_fill")] = "4"
df.loc[twos, ("REAL", problem, "lev_min")] = 0
df.loc[twos, ("CLASS", problem, "lev_fill")] = "4"
df.loc[threes, ("REAL", problem, "lev_min")] = 0
df.loc[threes, ("REAL", problem, "lev_max")] = MAX_ELEV
df.loc[threes, ("CLASS", problem, "lev_fill")] = "4"
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
ones = df[(f"{prefix[0]}_fill_1", prefix[1])].apply(np.bool)
twos = df[(f"{prefix[0]}_fill_2", prefix[1])].apply(np.bool)
threes = df[(f"{prefix[0]}_fill_3", prefix[1])].apply(np.bool)
fours = df[(f"{prefix[0]}_fill_4", prefix[1])].apply(np.bool)
df.loc[ones, (f"{prefix[0]}_min", prefix[1])] = df.loc[ones, (f"{prefix[0]}_max", prefix[1])]
df.loc[ones, (f"{prefix[0]}_max", prefix[1])] = MAX_ELEV
df.loc[ones == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_1", prefix[1])] = np.zeros(ones.shape)
df.loc[twos, (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[twos == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_2", prefix[1])] = np.zeros(twos.shape)
df.loc[threes, (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[threes, (f"{prefix[0]}_max", prefix[1])] = MAX_ELEV
df.loc[threes == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_3", prefix[1])] = np.zeros(threes.shape)
ld = self.copy().denormalize()
if self.label is not None and not exclude_label:
convert_label(ld.label)
if self.pred is not None:
convert_label(ld.pred)
if self.data is not None:
convert_data(ld.data)
ld.scaler.fit(ld.data)
if self.is_normalized:
return ld.normalize()
else:
return ld
def valid_pred(self):
"""Makes the bulletins internally coherent. E.g., removes problem 3 if problem 2 is blank."""
if self.pred is None:
raise NotPredictedError
ld = self.copy()
if self.elevation_class:
ld = ld.from_elev_class()
# Handle Problem 1-3
prob_cols = []
for n in range(1, 4):
if f"problem_{n}" in list(ld.pred["CLASS", _NONE].columns):
prob_cols.append(("CLASS", _NONE, f"problem_{n}"))
prev_eq = np.zeros((ld.pred.shape[0], len(prob_cols)), dtype=bool)
for n, col in enumerate(prob_cols):
for mcol in prob_cols[0:n]:
# If equal to problem_n-1/2, set to _NONE.
prev_eq[:, n] = np.logical_or(
prev_eq[:, n],
np.equal(ld.pred[mcol], ld.pred[col])
)
# Set to None if problem_n-1/2 was _NONE.
prev_eq[:, n] = np.logical_or(
prev_eq[:, n],
ld.pred[mcol] == _NONE
)
ld.pred.loc[prev_eq[:, n], col] = _NONE
# Delete subproblem solutions that are irrelevant
for subprob in PROBLEMS.values():
rows = np.any(np.char.equal(ld.pred.loc[:, prob_cols].values.astype("U"), subprob), axis=1) == False
columns = [name == subprob for name in ld.pred.columns.get_level_values(1)]
ld.pred.loc[rows, columns] = _NONE
# Set problem_amount to the right number
ld.pred['CLASS', _NONE, 'problem_amount'] = np.sum(ld.pred.loc[:, prob_cols] != _NONE, axis=1).astype(str)
# If lev_fill is "3" or "4", lev_min is always "0"
for subprob in PROBLEMS.values():
if "lev_fill" in ld.pred["CLASS", subprob].columns:
fill = ld.pred.astype(str)["CLASS", subprob, "lev_fill"]
if "lev_min" in ld.pred["REAL", subprob]:
ld.pred.loc[np.logical_or(fill == "1", fill == "2"), ("REAL", subprob, "lev_min")] = "0"
if "lev_min" in ld.pred["REAL", subprob] and "lev_max" in ld.pred["REAL", subprob]:
real = ld.pred["REAL", subprob].replace("", np.nan).astype(np.float)
reversed_idx = real["lev_min"] > real["lev_max"]
average = real.loc[reversed_idx, "lev_min"] + real.loc[reversed_idx, "lev_max"] / 2
ld.pred.loc[reversed_idx, ("REAL", subprob, "lev_min")] = average
ld.pred.loc[reversed_idx, ("REAL", subprob, "lev_max")] = average
ld.pred.loc[:, ["CLASS", "MULTI"]] = ld.pred.loc[:, ["CLASS", "MULTI"]].astype(str)
ld.pred["REAL"] = ld.pred["REAL"].replace("", np.nan).astype(np.float)
return ld
def split(self, rounds=3, seed="<PASSWORD>"):
"""Returns a split of the object into a training set, a test set and a validation set.
Parameters rounds and seed are not used any more.
Use as:
for test, train, eval in ld.split():
model.fit(test)
model.predict(train)
model.predict(eval)
"""
train_regions = [3007, 3012, 3010, 3009, 3013, 3017, 3014, 3032, 3027, 3029, 3022, 3031, 3023, 3037, 3024, 3028]
test_regions = [3011, 3016, 3035]
eval_regions = [3006, 3015, 3034]
split = []
for regions in [train_regions, test_regions, eval_regions]:
ld = self.copy()
ld.data = ld.data.iloc[[region in regions for region in ld.data.index.get_level_values(1)]]
ld.label = ld.label.iloc[[region in regions for region in ld.label.index.get_level_values(1)]]
ld.pred = ld.pred.iloc[[region in regions for region in ld.pred.index.get_level_values(1)]]
ld.row_weight = ld.row_weight.iloc[[region in regions for region in ld.row_weight.index.get_level_values(1)]]
split.append(ld)
return [tuple(split)]
def f1(self):
"""Get F1, precision, recall and RMSE of all labels.
:return: Series with scores of all possible labels and values.
"""
if self.label is None or self.pred is None:
raise DatasetMissingLabel()
dummies = self.to_dummies()
old_settings = np.seterr(divide='ignore', invalid='ignore')
df_idx = | pd.MultiIndex.from_arrays([[], [], [], []]) | pandas.MultiIndex.from_arrays |
# -------------------------------------------------------------------------------------------------------------------- #
# standard distribution imports
# -----------------------------
import importlib
import logging
import random
import time
import datetime
# import traceback
from abc import abstractmethod
from tqdm import tqdm
import typing as tp
from pathlib import Path
from multiprocessing import Manager
from src.python_plots.plot_classes import PyPlot
# additional module imports (> requirements)
# ------------------------------------------
import pandas as pd
import numpy as np
# from IPython import embed
# src imports
# -----------
from src.misc.init_modules import load_fleet_control_module, load_routing_engine
from src.demand.demand import Demand, SlaveDemand
from src.routing.NetworkBase import return_position_str
from src.simulation.Vehicles import SimulationVehicle
if tp.TYPE_CHECKING:
from src.fleetctrl.FleetControlBase import FleetControlBase
from src.routing.NetworkBase import NetworkBase
# -------------------------------------------------------------------------------------------------------------------- #
# global variables
# ----------------
# set log level to logging.DEBUG or logging.INFO for single simulations
from src.misc.globals import *
DEFAULT_LOG_LEVEL = logging.INFO
LOG = logging.getLogger(__name__)
BUFFER_SIZE = 10
PROGRESS_LOOP = "demand"
PROGRESS_LOOP_VEHICLE_STATUS = [VRL_STATES.IDLE,VRL_STATES.CHARGING,VRL_STATES.REPOSITION]
# check for computation on LRZ cluster
if os.environ.get('SLURM_PROCID'):
PROGRESS_LOOP = "off"
# -------------------------------------------------------------------------------------------------------------------- #
# functions
# ---------
def create_or_empty_dir(dirname):
if os.path.isdir(dirname):
"Removes all files from top"
if(dirname == '/' or dirname == "\\"): return
else:
for root, dirs, files in os.walk(dirname, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except Exception as err:
print(err)
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except Exception as err:
print(err)
else:
os.makedirs(dirname)
def build_operator_attribute_dicts(parameters, n_op, prefix="op_"):
"""
Extracts elements of parameters dict whose keys begin with prefix and generates a list of dicts.
The values of the relevant elements of parameters must be either single values or a list of length n_op, or else
an exception will be raised.
:param parameters: dict (or dict-like config object) containing a superset of operator parameters
:type parameters: dict
:param n_op: number of operators expected
:type n_op: int
:param prefix: prefix by which to filter out operator parameters
:type prefix: str
"""
list_op_dicts = [dict() for i in range(n_op)] # initialize list of empty dicts
for k in [x for x in parameters if x.startswith(prefix)]:
# if only a single value is given, use it for all operators
if type(parameters[k]) in [str, int, float, bool, type(None), dict]:
for di in list_op_dicts:
di[k] = parameters[k]
# if a list of values is given and the length matches the number of operators, use them respectively
elif len(parameters[k]) == n_op:
for i, op in enumerate(list_op_dicts):
op[k] = parameters[k][i]
elif k == G_OP_REPO_TH_DEF: # TODO # lists as inputs for op
for di in list_op_dicts:
di[k] = parameters[k]
# if parameter has invalid number of values, raise exception
else:
raise ValueError("Number of values for parameter", k, "equals neither n_op nor 1.", type(parameters[k]))
return list_op_dicts
# -------------------------------------------------------------------------------------------------------------------- #
# main
# ----
class FleetSimulationBase:
def __init__(self, scenario_parameters: dict):
self.t_init_start = time.perf_counter()
# config
self.scenario_name = scenario_parameters[G_SCENARIO_NAME]
print("-"*80 + f"\nSimulation of scenario {self.scenario_name}")
LOG.info(f"General initialization of scenario {self.scenario_name}...")
self.dir_names = self.get_directory_dict(scenario_parameters)
self.scenario_parameters: dict = scenario_parameters
# check whether simulation already has been conducted -> use final_state.csv to check
final_state_f = os.path.join(self.dir_names[G_DIR_OUTPUT], "final_state.csv")
if self.scenario_parameters.get("keep_old", False) and os.path.isfile(final_state_f):
prt_str = f"Simulation {self.scenario_name} results available and keep_old flag is True!" \
f" Not starting the simulation!"
print(prt_str)
LOG.info(prt_str)
self._started = True
return
else:
self._started = False
# general parameters
self.start_time = self.scenario_parameters[G_SIM_START_TIME]
self.end_time = self.scenario_parameters[G_SIM_END_TIME]
self.time_step = self.scenario_parameters.get(G_SIM_TIME_STEP, 1)
self.check_sim_env_spec_inputs(self.scenario_parameters)
self.n_op = self.scenario_parameters[G_NR_OPERATORS]
self._manager: tp.Optional[Manager] = None
self._shared_dict: dict = {}
self._plot_class_instance: tp.Optional[PyPlot] = None
self.realtime_plot_flag = self.scenario_parameters.get(G_SIM_REALTIME_PLOT_FLAG, 0)
# build list of operator dictionaries # TODO: this could be eliminated with a new YAML-based config system
self.list_op_dicts: tp.Dict[str,str] = build_operator_attribute_dicts(scenario_parameters, self.n_op,
prefix="op_")
# take care of random seeds at beginning of simulations
random.seed(self.scenario_parameters[G_RANDOM_SEED])
np.random.seed(self.scenario_parameters[G_RANDOM_SEED])
# empty output directory
create_or_empty_dir(self.dir_names[G_DIR_OUTPUT])
# write scenario config file in output directory
self.save_scenario_inputs()
# remove old log handlers (otherwise sequential simulations only log to first simulation)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# start new log file
logging.VERBOSE = 5
logging.addLevelName(logging.VERBOSE, "VERBOSE")
logging.Logger.verbose = lambda inst, msg, *args, **kwargs: inst.log(logging.VERBOSE, msg, *args, **kwargs)
logging.LoggerAdapter.verbose = lambda inst, msg, *args, **kwargs: inst.log(logging.VERBOSE, msg, *args, **kwargs)
logging.verbose = lambda msg, *args, **kwargs: logging.log(logging.VERBOSE, msg, *args, **kwargs)
if self.scenario_parameters.get("log_level", "info"):
level_str = self.scenario_parameters["log_level"]
if level_str == "verbose":
log_level = logging.VERBOSE
elif level_str == "debug":
log_level = logging.DEBUG
elif level_str == "info":
log_level = logging.INFO
elif level_str == "warning":
log_level = logging.WARNING
else:
log_level = DEFAULT_LOG_LEVEL
else:
log_level = DEFAULT_LOG_LEVEL
pd.set_option("mode.chained_assignment", None)
self.log_file = os.path.join(self.dir_names[G_DIR_OUTPUT], f"00_simulation.log")
if log_level < logging.INFO:
streams = [logging.FileHandler(self.log_file), logging.StreamHandler()]
else:
print("Only minimum output to console -> see log-file")
streams = [logging.FileHandler(self.log_file)]
# TODO # log of subsequent simulations is saved in first simulation log
logging.basicConfig(handlers=streams,
level=log_level, format='%(process)d-%(name)s-%(levelname)s-%(message)s')
# set up output files
self.user_stat_f = os.path.join(self.dir_names[G_DIR_OUTPUT], f"1_user-stats.csv")
self.network_stat_f = os.path.join(self.dir_names[G_DIR_OUTPUT], f"3_network-stats.csv")
self.pt_stat_f = os.path.join(self.dir_names[G_DIR_OUTPUT], "4_pt_stats.csv")
# init modules
# ------------
# zone system
# TODO # after ISTTT: enable multiple zone systems
# TODO # after ISTTT: bring init of modules in extra function (-> parallel processing)
self.zones = None
if self.dir_names.get(G_DIR_ZONES, None) is not None:
if self.scenario_parameters.get(G_FC_TYPE) and self.scenario_parameters[G_FC_TYPE] == "perfect":
from src.infra.PerfectForecastZoning import PerfectForecastZoneSystem
self.zones = PerfectForecastZoneSystem(self.dir_names[G_DIR_ZONES], self.scenario_parameters, self.dir_names)
else:
from src.infra.Zoning import ZoneSystem
self.zones = ZoneSystem(self.dir_names[G_DIR_ZONES], self.scenario_parameters, self.dir_names)
# routing engine
LOG.info("Initialization of network and routing engine...")
network_type = self.scenario_parameters[G_NETWORK_TYPE]
network_dynamics_file = self.scenario_parameters.get(G_NW_DYNAMIC_F, None)
# TODO # check consistency of scenario inputs / another way to refactor add_init_data ?
self.routing_engine: NetworkBase = load_routing_engine(network_type, self.dir_names[G_DIR_NETWORK],
network_dynamics_file_name=network_dynamics_file)
if network_type == "NetworkDynamicNFDClusters":
self.routing_engine.add_init_data(self.start_time, self.time_step,
self.scenario_parameters[G_NW_DENSITY_T_BIN_SIZE],
self.scenario_parameters[G_NW_DENSITY_AVG_DURATION], self.zones,
self.network_stat_f)
# public transportation module
LOG.info("Initialization of line-based public transportation...")
pt_type = self.scenario_parameters.get(G_PT_TYPE)
self.gtfs_data_dir = self.dir_names.get(G_DIR_PT)
if pt_type is None or self.gtfs_data_dir is None:
self.pt = None
elif pt_type == "PTMatrixCrowding":
pt_module = importlib.import_module("src.pubtrans.PtTTMatrixCrowding")
self.pt = pt_module.PublicTransportTravelTimeMatrixWithCrowding(self.gtfs_data_dir, self.pt_stat_f,
self.scenario_parameters,
self.routing_engine, self.zones)
elif pt_type == "PtCrowding":
pt_module = importlib.import_module("src.pubtrans.PtCrowding")
self.pt = pt_module.PublicTransportWithCrowding(self.gtfs_data_dir, self.pt_stat_f, self.scenario_parameters,
self.routing_engine, self.zones)
else:
raise IOError(f"Public transport module {pt_type} not defined for current simulation environment.")
# attribute for demand, charging and zone module
self.demand = None
self.cdp = None
self._load_demand_module()
self._load_charging_modules()
# take care of charging stations, depots and initially inactive vehicles
if self.dir_names.get(G_DIR_INFRA):
depot_fname = self.scenario_parameters.get(G_INFRA_DEP)
if depot_fname is not None:
depot_f = os.path.join(self.dir_names[G_DIR_INFRA], depot_fname)
else:
depot_f = os.path.join(self.dir_names[G_DIR_INFRA], "depots.csv")
pub_cs_fname = self.scenario_parameters.get(G_INFRA_PBCS)
if pub_cs_fname is not None:
pub_cs_f = os.path.join(self.dir_names[G_DIR_INFRA], pub_cs_fname)
else:
pub_cs_f = os.path.join(self.dir_names[G_DIR_INFRA], "public_charging_stations.csv")
from src.infra.ChargingStation import ChargingAndDepotManagement
self.cdp = ChargingAndDepotManagement(depot_f, pub_cs_f, self.routing_engine, self.scenario_parameters,
self.list_op_dicts)
LOG.info("charging stations and depots initialzied!")
else:
self.cdp = None
# attributes for fleet controller and vehicles
self.sim_vehicles: tp.Dict[tp.Tuple[int, int], SimulationVehicle] = {}
self.sorted_sim_vehicle_keys: tp.List[tp.Tuple[int, int]] = sorted(self.sim_vehicles.keys())
self.operators: tp.List[FleetControlBase] = []
self.op_output = {}
self._load_fleetctr_vehicles()
# call additional simulation environment specific init
LOG.info("Simulation environment specific initializations...")
self.init_blocking = True
self.add_init(self.scenario_parameters)
# load initial state depending on init_blocking attribute
# HINT: it is important that this is done at the end of initialization!
LOG.info("Creating or loading initial vehicle states...")
np.random.seed(self.scenario_parameters[G_RANDOM_SEED])
self.load_initial_state()
LOG.info(f"Initialization of scenario {self.scenario_name} successful.")
# self.routing_engine.checkNetwork()
def _load_demand_module(self):
""" Loads some demand modules """
# demand module
LOG.info("Initialization of travelers...")
if self.scenario_parameters[G_SIM_ENV] != "MobiTopp":
self.demand = Demand(self.scenario_parameters, self.user_stat_f, self.routing_engine, self.zones)
self.demand.load_demand_file(self.scenario_parameters[G_SIM_START_TIME],
self.scenario_parameters[G_SIM_END_TIME], self.dir_names[G_DIR_DEMAND],
self.scenario_parameters[G_RQ_FILE], self.scenario_parameters[G_RANDOM_SEED],
self.scenario_parameters.get(G_RQ_TYP1, None),
self.scenario_parameters.get(G_RQ_TYP2, {}),
self.scenario_parameters.get(G_RQ_TYP3, {}),
simulation_time_step=self.time_step)
if self.scenario_parameters.get(G_PA_RQ_FILE) is not None:
self.demand.load_parcel_demand_file(self.scenario_parameters[G_SIM_START_TIME],
self.scenario_parameters[G_SIM_END_TIME], self.dir_names[G_DIR_DEMAND],
self.scenario_parameters[G_PA_RQ_FILE], self.scenario_parameters[G_RANDOM_SEED],
self.scenario_parameters.get(G_PA_RQ_TYP1, None),
self.scenario_parameters.get(G_PA_RQ_TYP2, {}),
self.scenario_parameters.get(G_PA_RQ_TYP3, {}),
simulation_time_step=self.time_step)
else:
self.demand = SlaveDemand(self.scenario_parameters, self.user_stat_f)
if self.zones is not None:
self.zones.register_demand_ref(self.demand)
def _load_charging_modules(self):
""" Loads necessary modules for charging """
# take care of charging stations, depots and initially inactive vehicles
if self.dir_names.get(G_DIR_INFRA):
depot_fname = self.scenario_parameters.get(G_INFRA_DEP)
if depot_fname is not None:
depot_f = os.path.join(self.dir_names[G_DIR_INFRA], depot_fname)
else:
depot_f = os.path.join(self.dir_names[G_DIR_INFRA], "depots.csv")
pub_cs_fname = self.scenario_parameters.get(G_INFRA_PBCS)
if pub_cs_fname is not None:
pub_cs_f = os.path.join(self.dir_names[G_DIR_INFRA], pub_cs_fname)
else:
pub_cs_f = os.path.join(self.dir_names[G_DIR_INFRA], "public_charging_stations.csv")
from src.infra.ChargingStation import ChargingAndDepotManagement
self.cdp = ChargingAndDepotManagement(depot_f, pub_cs_f, self.routing_engine, self.scenario_parameters,
self.list_op_dicts)
LOG.info("charging stations and depots initialzied!")
else:
self.cdp = None
def _load_fleetctr_vehicles(self):
""" Loads the fleet controller and vehicles """
# simulation vehicles and fleet control modules
LOG.info("Initialization of MoD fleets...")
route_output_flag = self.scenario_parameters.get(G_SIM_ROUTE_OUT_FLAG, True)
replay_flag = self.scenario_parameters.get(G_SIM_REPLAY_FLAG, False)
veh_type_list = []
for op_id in range(self.n_op):
operator_attributes = self.list_op_dicts[op_id]
operator_module_name = operator_attributes[G_OP_MODULE]
self.op_output[op_id] = [] # shared list among vehicles
if not operator_module_name == "PtFleetControl":
fleet_composition_dict = operator_attributes[G_OP_FLEET]
list_vehicles = []
vid = 0
for veh_type, nr_veh in fleet_composition_dict.items():
for _ in range(nr_veh):
veh_type_list.append([op_id, vid, veh_type])
tmp_veh_obj = SimulationVehicle(op_id, vid, self.dir_names[G_DIR_VEH], veh_type,
self.routing_engine, self.demand.rq_db,
self.op_output[op_id], route_output_flag,
replay_flag)
list_vehicles.append(tmp_veh_obj)
self.sim_vehicles[(op_id, vid)] = tmp_veh_obj
vid += 1
OpClass = load_fleet_control_module(operator_module_name)
self.operators.append(OpClass(op_id, operator_attributes, list_vehicles, self.routing_engine, self.zones,
self.scenario_parameters, self.dir_names, self.cdp))
else:
from src.pubtrans.PtFleetControl import PtFleetControl
OpClass = PtFleetControl(op_id, self.gtfs_data_dir, self.routing_engine, self.zones, self.scenario_parameters, self.dir_names, charging_management=self.cdp)
init_vids = OpClass.return_vehicles_to_initialize()
list_vehicles = []
for vid, veh_type in init_vids.items():
tmp_veh_obj = SimulationVehicle(op_id, vid, self.dir_names[G_DIR_VEH], veh_type,
self.routing_engine, self.demand.rq_db,
self.op_output[op_id], route_output_flag,
replay_flag)
list_vehicles.append(tmp_veh_obj)
self.sim_vehicles[(op_id, vid)] = tmp_veh_obj
OpClass.continue_init(list_vehicles, self.start_time)
self.operators.append(OpClass)
veh_type_f = os.path.join(self.dir_names[G_DIR_OUTPUT], "2_vehicle_types.csv")
veh_type_df = pd.DataFrame(veh_type_list, columns=[G_V_OP_ID, G_V_VID, G_V_TYPE])
veh_type_df.to_csv(veh_type_f, index=False)
@staticmethod
def get_directory_dict(scenario_parameters):
"""
This function provides the correct paths to certain data according to the specified data directory structure.
:param scenario_parameters: simulation input (pandas series)
:return: dictionary with paths to the respective data directories
"""
return get_directory_dict(scenario_parameters)
def save_scenario_inputs(self):
config_f = os.path.join(self.dir_names[G_DIR_OUTPUT], G_SC_INP_F)
config = {"scenario_parameters": self.scenario_parameters, "list_operator_attributes": self.list_op_dicts,
"directories": self.dir_names}
with open(config_f, "w") as fh_config:
json.dump(config, fh_config, indent=4)
def evaluate(self):
"""Runs standard and simulation environment specific evaluations over simulation results."""
output_dir = self.dir_names[G_DIR_OUTPUT]
# standard evaluation
from src.evaluation.standard import standard_evaluation
standard_evaluation(output_dir)
self.add_evaluate()
def initialize_operators_and_vehicles(self):
""" this function loads and initialzie all operator classes and its vehicle objects
and sets corresponding outputs"""
veh_type_list = []
route_output_flag = self.scenario_parameters.get(G_SIM_ROUTE_OUT_FLAG, True)
replay_flag = self.scenario_parameters.get(G_SIM_REPLAY_FLAG, False)
for op_id in range(self.n_op):
self.op_output[op_id] = [] # shared list among vehicles
operator_attributes = self.list_op_dicts[op_id]
operator_module_name = operator_attributes[G_OP_MODULE]
fleet_composition_dict = operator_attributes[G_OP_FLEET]
list_vehicles = []
vid = 0
for veh_type, nr_veh in fleet_composition_dict.items():
for _ in range(nr_veh):
veh_type_list.append([op_id, vid, veh_type])
tmp_veh_obj = SimulationVehicle(op_id, vid, self.dir_names[G_DIR_VEH], veh_type,
self.routing_engine, self.demand.rq_db,
self.op_output[op_id], route_output_flag,
replay_flag)
list_vehicles.append(tmp_veh_obj)
self.sim_vehicles[(op_id, vid)] = tmp_veh_obj
vid += 1
OpClass = load_fleet_control_module(operator_module_name)
self.operators.append(OpClass(op_id, operator_attributes, list_vehicles, self.routing_engine, self.zones,
self.scenario_parameters, self.dir_names, self.cdp))
veh_type_f = os.path.join(self.dir_names[G_DIR_OUTPUT], "2_vehicle_types.csv")
veh_type_df = | pd.DataFrame(veh_type_list, columns=[G_V_OP_ID, G_V_VID, G_V_TYPE]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 23:34:57 2019
@author: reynaldo.espana.rey
Web scrapping algorithm to build data set for text generator
source: https://towardsdatascience.com/how-to-web-scrape-with-python-in-4-minutes-bc49186a8460
"""
# =============================================================================
# Libraries
# =============================================================================
import numpy as np
import pandas as pd
import requests
import re
import time
import os
from bs4 import BeautifulSoup
import string
# =============================================================================
# Functions
# =============================================================================
# request page and make it BeautifulSoup
def get_page(url, verbose=0):
# get page
response = requests.get(url)
if verbose:
print('Successful:', str(response) =='<Response [200]>')
if str(response) =='<Response [200]>':
# BeautifulSoup data structure
soup = BeautifulSoup(response.text, 'html.parser')
return soup
return str(response)
# function to retrieve links from inspector gadget pluggin
def get_href(url, attr):
# get page
soup = get_page(url)
# get data links
data = soup.select(attr)
links = np.unique([x['href'] for x in data])
return links
def get_text(url, attr):
# get page
soup = get_page(url)
# get data links
data = soup.select(attr)
return data
# valid file name
def valid_name(value):
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
return value
# funtion to remove chars
def remove_chars(doc, chars_2remove=None):
if chars_2remove is None:
# list of character not UTF-8 to be remove from doc
chars_2remove = ['\x85', '\x91', '\x92', '\x93', '\x94', '\x96',
'\x97', '\xa0']
# as reggex expression
chars_2remove = '[' + ''.join(chars_2remove) + ']'
# erase
doc = re.sub(chars_2remove, ' ', doc)
doc = re.sub(' +', ' ', doc).strip()
return doc
# =============================================================================
# Spanish poems
# =============================================================================
#### Spanish poems web page source
# root source
url_root = 'https://www.poemas-del-alma.com/'
## Path to use
## Retrieve poems and save it in .txt
path_poems = '../data/DB/spanish poems/'
# save list of poems links
path_poems_links = '../data/DB/poems_list.csv'
# =============================================================================
# Poems
# =============================================================================
##### POETS #####
# poems by author in alphabetial order
alphabet = [x for x in string.ascii_uppercase]
# get list of poets
poets = pd.DataFrame()
for letter in alphabet:
print(letter)
links = get_href(url_root + letter + '.html', attr='#content li a')
authors = pd.DataFrame({'author': [x.split('/')[-1].split('.')[0] for x in links],
'link': links})
poets = poets.append(authors)
time.sleep(.5)
poets = poets.reset_index(drop=True)
print('Poests found:', len(poets))
##### POEMS #####
# go throgh all the poems in poets
# run only for poems not already in folder
poems = pd.read_csv(path_poems_links)
# filter poets to scrap
poets['in_disk'] = poets['author'].isin(poems['author'])
# filter songs df
print ('Files in disk already:', poets.groupby(['in_disk']).size())
# loop to remaining poets
poets_2scrap = poets[poets['in_disk']==False]
# shuffle, else all errors will be first
poets_2scrap = poets_2scrap.sample(frac=1).reset_index(drop=True)
# loop for each poet link
for index, row in poets_2scrap.iterrows():
if (index % 25 == 0):
print('\n\n- Progress %:', index/len(poets_2scrap), '- Total poems:', len(poems))
time.sleep(5)
try:
# get page with poems links
links = get_href(row['link'], attr='#block-poems a')
time.sleep(.5)
links = | pd.DataFrame({'poem': links}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
import gc
import os
warnings.filterwarnings('ignore')
from datetime import datetime
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import StratifiedKFold, RandomizedSearchCV
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.utils import class_weight, resample
from keras.models import Sequential
from keras.layers import Dense, Dropout, GaussianNoise, Conv1D
from keras.layers import LSTM
from keras.layers import Conv1D, MaxPooling1D, TimeDistributed
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, BatchNormalization
from keras.layers import Flatten, Reshape
from keras.layers import Embedding, Input
from keras.models import Sequential
from keras.models import load_model
from keras import optimizers
from keras.regularizers import L1L2
from keras.callbacks import ModelCheckpoint
from keras.utils import to_categorical, np_utils
from keras.callbacks import Callback, EarlyStopping
from keras.initializers import RandomUniform
import keras.backend as K
import tensorflow as tf
from xgboost import XGBClassifier
scaler = StandardScaler()
def plot_new_feature_distribution(df1, df2, label1, label2, features):
i = 0
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(2,4,figsize=(18,8))
for feature in features:
i += 1
plt.subplot(2,4,i)
sns.kdeplot(df1[feature], bw=0.5,label=label1)
sns.kdeplot(df2[feature], bw=0.5,label=label2)
plt.xlabel(feature, fontsize=11)
locs, labels = plt.xticks()
plt.tick_params(axis='x', which='major', labelsize=8)
plt.tick_params(axis='y', which='major', labelsize=8)
plt.show();
# define roc_callback, inspired by https://github.com/keras-team/keras/issues/6050#issuecomment-329996505
def auc_roc(y_true, y_pred):
# any tensorflow metric
value, update_op = tf.contrib.metrics.streaming_auc(y_pred, y_true)
# find all variables created for this metric
metric_vars = [i for i in tf.local_variables() if 'auc_roc' in i.name.split('/')[1]]
# Add metric variables to GLOBAL_VARIABLES collection.
# They will be initialized for new session.
for v in metric_vars:
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)
# force to update metric values
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
def resampling(df_train, ratio):
train_freq = df_train['target'].value_counts()
print(train_freq)
train_freq_mean = train_freq[1]
# Under & Over Sampling store_nbr
df_list = []
target_max = 2
multiple = ratio
for i in range(0, target_max):
df_list.append(df_train[df_train['target']==i])
for i in range(0, target_max):
if i==0:
df_list[i] = df_list[i].sample(n=int(train_freq_mean*multiple), random_state=123, replace=True)
else:
df_list[i] = df_list[i].sample(n=train_freq_mean, random_state=123, replace=True)
df_sampling_train = pd.concat(df_list)
train_freq = df_sampling_train['target'].value_counts()
return pd.DataFrame(df_sampling_train)
def CNN1D(train, test):
X_train = train.drop(['ID_code', 'target'], axis=1)
gc.collect()
X_columns = X_train.columns
Y_columns = ['target']
Y_train = train['target']
X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train,
test_size=0.10, stratify=Y_train,
random_state=123, shuffle=True)
X_train = pd.DataFrame(X_train, columns=X_columns)
Y_train = pd.DataFrame(Y_train, columns=Y_columns)
print(X_train.describe())
X_train = pd.concat([X_train, Y_train], axis=1)
print(X_train.describe())
#X_train = resampling(X_train, 1)
Y_train = X_train['target']
X_train = X_train.drop('target', axis=1)
print(Y_train.value_counts())
X_train = scaler.fit_transform(X_train)
X_valid = scaler.fit_transform(X_valid)
y_integers = Y_train
#print(y_integers)
class_weights = class_weight.compute_class_weight(None, np.unique(y_integers), y_integers)
print(class_weights)
d_class_weights = dict(enumerate(class_weights))
d_class_weights = {0:1.0, 1:1.0}
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_valid = X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1)
#hyperparameters
input_dimension = 226
learning_rate = 0.0025
momentum = 0.85
hidden_initializer = RandomUniform(seed=123)
dropout_rate = 0.3
optimizer=optimizers.Adam()
# create model
model = Sequential()
model.add(Conv1D(nb_filter=32, filter_length=3, input_shape=X_train.shape[1:3], activation='relu'))
model.add(Conv1D(nb_filter=16, filter_length=1, activation='relu'))
model.add(Flatten())
model.add(Dropout(dropout_rate))
model.add(Dense(128, input_dim=input_dimension, kernel_initializer=hidden_initializer, activation='relu'))
model.add(Dense(64, kernel_initializer=hidden_initializer, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(1, kernel_initializer=hidden_initializer, activation='sigmoid'))
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', auc_roc])
model.summary()
history = model.fit(X_train, Y_train, batch_size=1000, epochs=10,
verbose=1, class_weight = d_class_weights,
validation_data=(X_valid, Y_valid), shuffle=True)
train_loss = history.history['loss']
val_loss = history.history['val_loss']
x_epochs = range(1, len(train_loss) + 1)
plt.plot(x_epochs, train_loss, 'b', label='Training loss')
plt.plot(x_epochs, val_loss, 'r', label='Validation loss')
plt.title('Loss')
plt.legend()
plt.show()
score = model.evaluate(X_valid, Y_valid, batch_size=100)
print(score)
Y_pred = model.predict(X_valid)
Y_pred = np.where(Y_pred > 0.5, 1, 0)
#Y_pred = np.argmax(Y_pred, axis=1).reshape(-1,1)
#Y_test = np.argmax(Y_test, axis=1).reshape(-1,1)
print(confusion_matrix(Y_valid, Y_pred))
print(classification_report(Y_valid, Y_pred, labels=[0, 1]))
ID_test = test['ID_code'].values
test = test.drop('ID_code', axis=1)
test = scaler.transform(test)
test = test.reshape(test.shape[0], test.shape[1], 1)
pred = model.predict(test)
result = pd.DataFrame({"ID_code": ID_test})
result["target"] = pred
result.to_csv("submission.csv", index=False)
return result['target']
def CNN2D(train, test):
X_train = train.drop(['ID_code', 'target'], axis=1)
gc.collect()
X_columns = X_train.columns
Y_columns = ['target']
Y_train = train['target']
X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train,
test_size=0.10, stratify=Y_train,
random_state=123, shuffle=True)
X_train = pd.DataFrame(X_train, columns=X_columns)
Y_train = pd.DataFrame(Y_train, columns=Y_columns)
print(X_train.describe())
X_train = pd.concat([X_train, Y_train], axis=1)
print(X_train.describe())
#X_train = resampling(X_train, 1)
Y_train = X_train['target']
X_train = X_train.drop('target', axis=1)
print(Y_train.value_counts())
X_train = scaler.fit_transform(X_train)
X_valid = scaler.fit_transform(X_valid)
y_integers = Y_train
#print(y_integers)
class_weights = class_weight.compute_class_weight(None, np.unique(y_integers), y_integers)
print(class_weights)
d_class_weights = dict(enumerate(class_weights))
d_class_weights = {0:1.0, 1:1.0}
X_train = X_train.reshape(X_train.shape[0], 104, 6, 1)
X_valid = X_valid.reshape(X_valid.shape[0], 104, 6, 1)
#hyperparameters
input_dimension = 226
learning_rate = 0.0025
momentum = 0.85
hidden_initializer = RandomUniform(seed=123)
dropout_rate = 0.3
kernel_size = (3, 3)
strides = (1, 1)
optimizer=optimizers.Adam()
# create model
model = Sequential()
model.add(Conv2D(nb_filter=32, kernel_size = kernel_size, strides = strides,
input_shape=X_train.shape[1:4], activation='relu'))
model.add(Conv2D(nb_filter=16, kernel_size = kernel_size, strides = strides,
activation='relu'))
model.add(Flatten())
model.add(Dropout(dropout_rate))
model.add(Dense(128, input_dim=input_dimension, kernel_initializer=hidden_initializer, activation='relu'))
model.add(Dense(64, kernel_initializer=hidden_initializer, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(1, kernel_initializer=hidden_initializer, activation='sigmoid'))
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', auc_roc])
model.summary()
history = model.fit(X_train, Y_train, batch_size=1000, epochs=10,
verbose=1, class_weight = d_class_weights,
validation_data=(X_valid, Y_valid), shuffle=True)
train_loss = history.history['loss']
val_loss = history.history['val_loss']
x_epochs = range(1, len(train_loss) + 1)
plt.plot(x_epochs, train_loss, 'b', label='Training loss')
plt.plot(x_epochs, val_loss, 'r', label='Validation loss')
plt.title('Loss')
plt.legend()
plt.show()
score = model.evaluate(X_valid, Y_valid, batch_size=100)
print(score)
Y_pred = model.predict(X_valid)
Y_pred = np.where(Y_pred > 0.5, 1, 0)
#Y_pred = np.argmax(Y_pred, axis=1).reshape(-1,1)
#Y_test = np.argmax(Y_test, axis=1).reshape(-1,1)
print(confusion_matrix(Y_valid, Y_pred))
print(classification_report(Y_valid, Y_pred, labels=[0, 1]))
ID_test = test['ID_code'].values
test = test.drop('ID_code', axis=1)
test = scaler.transform(test)
test = test.reshape(test.shape[0], 104, 6, 1)
pred = model.predict(test)
result = pd.DataFrame({"ID_code": ID_test})
result["target"] = pred
result.to_csv("submission.csv", index=False)
return result['target']
def XGBBest(train, test):
X = train.drop(['ID_code', 'target'], axis=1)
y = train.target.values
test_df = test
test = test.drop('ID_code', axis=1)
xgb = XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.6,
early_stopping_rounds=70, gamma=2, learning_rate=0.03,
max_delta_step=0, max_depth=7, min_child_weight=10, missing=None,
n_estimator=500, n_estimators=100, n_jobs=1, nthread=1,
num_boost_round=500, objective='binary:logistic',
predictor='gpu_predictor', random_state=0, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, seed=None, silent=True,
subsample=0.8, tree_method='gpu_hist', verbosity=1)
xgb.fit(X, y)
y_test = xgb.predict_proba(test)
results_df = pd.DataFrame(data={'ID_code':test_df['ID_code'], 'target':y_test[:,1]})
results_df.to_csv('submission.csv', index=False)
return results_df['target']
if __name__ == '__main__':
df_train = pd.read_csv("data/train.csv", engine='c')
df_test = pd.read_csv("data/test.csv", engine='c')
print("train shape: ", df_train.shape)
print("test shape: ", df_test.shape)
print("df_train is null: ", df_train.isnull().sum().sum())
print("df_test is null: ", df_test.isnull().sum().sum())
# Feature Engineering
# Correlation
features = df_train.columns.values[2:202]
correlations = df_train[features].corr().abs().unstack().sort_values(kind="quicksort").reset_index()
correlations = correlations[correlations['level_0'] != correlations['level_1']]
print(correlations.head(10))
print(correlations.tail(10))
# Duplicate check
features = df_train.columns.values[2:202]
unique_max_train = []
unique_max_test = []
for feature in features:
values = df_train[feature].value_counts()
unique_max_train.append([feature, values.max(), values.idxmax()])
values = df_test[feature].value_counts()
unique_max_test.append([feature, values.max(), values.idxmax()])
dup_train = np.transpose((pd.DataFrame(unique_max_train, columns=['Feature', 'Max duplicates', 'Value'])).
sort_values(by = 'Max duplicates', ascending=False).head(15))
dup_test = np.transpose(( | pd.DataFrame(unique_max_test, columns=['Feature', 'Max duplicates', 'Value']) | pandas.DataFrame |
import pandas as pd
import logging
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
from sklearn.cluster import dbscan
from requests.exceptions import HTTPError
from .modules.constants import FASTKRAKEN2_NAMES
from .modules.parse_utils import (
proportions,
run_pca,
parse_taxa_report,
group_taxa_report,
)
from .data_utils import sample_module_field
logger = logging.getLogger(__name__)
def sample_has_modules(sample):
has_all = True
for module_name, field, _, _, _ in [FASTKRAKEN2_NAMES]:
try:
sample_module_field(sample, module_name, field)
except KeyError:
has_all = False
return has_all
def pc1_median(samples, taxa_matrix):
pc1 = run_pca(taxa_matrix, n_comp=1)['C0']
for sample in samples:
pcval = 'Not Found in PC1'
if pc1[sample.name] >= pc1.median():
pcval = 'Above PC1 Median'
elif pc1[sample.name] < pc1.median():
pcval = 'Below PC1 Median'
sample.mgs_metadata['MGS - PC1'] = pcval
def pca_dbscan(samples, taxa_matrix):
pca = run_pca(taxa_matrix, n_comp=min(10, taxa_matrix.shape[1]))
_, cluster_labels = dbscan(pca, eps=0.1, min_samples=3)
for i, sample in enumerate(samples):
label_val = cluster_labels[i]
label = f'Cluster {label_val}'
if label_val < 0:
label = f'Noise'
sample.mgs_metadata['MGS - PCA-DBSCAN'] = label
def add_taxa_auto_metadata(samples, grp):
samples = [sample for sample in samples if sample_has_modules(sample)]
try:
taxa_matrix = group_taxa_report(
grp,
module_name='cap2::capalyzer::kraken2_taxa',
field_name='read_counts',
)(samples)
except HTTPError:
taxa_matrix = group_taxa_report(
grp,
module_name='cap2::capalyzer::fast_kraken2_taxa',
field_name='report',
)(samples)
parsed_sample_names = set(taxa_matrix.index.to_list())
samples = [sample for sample in samples if sample.name in parsed_sample_names]
logger.info('Adding PCA median variable...')
pc1_median(samples, taxa_matrix)
logger.info('done.')
logger.info('Adding PCA DBSCAN variable...')
pca_dbscan(samples, taxa_matrix)
logger.info('done.')
def regularize_metadata(samples):
logger.info('Regularizing metadata...')
meta = pd.DataFrame.from_dict(
{sample.name: sample.metadata for sample in samples},
orient='index'
)
def regularize_numeric(col):
try:
col = pd.qcut(col, 3, labels=["low", "medium", "high"], duplicates='drop')
except ValueError:
pass
col = col.astype(str)
col = col.map(lambda x: 'Unknown' if x.lower() == 'nan' else x)
col = col.fillna('Unknown')
return col
def regularize_categorical(col):
col = col.fillna('Unknown')
min_size = max(2, col.shape[0] // 100)
counts = col.value_counts()
others = list(counts[counts < min_size].index)
col = col.map(lambda el: 'Other' if el in others else el)
return col
def regularize_col(col):
if is_numeric_dtype(col):
return regularize_numeric(col)
if | is_string_dtype(col) | pandas.api.types.is_string_dtype |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": | pandas.StringDtype() | pandas.StringDtype |
# -*- coding:utf-8 -*-
"""
宏观经济数据类
Created on 2019/01/09
@author: TabQ
@group : gugu
@contact: <EMAIL>
"""
import pandas as pd
import numpy as np
import re
import json
import time
from gugu.utility import Utility
from gugu.base import Base, cf
import sys
class Macro(Base):
def gdpYear(self, retry=3, pause=0.001):
"""
获取年度国内生产总值数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'gdp':, ...}, ...]
year :统计年度
gdp :国内生产总值(亿元)
pc_gdp :人均国内生产总值(元)
gnp :国民生产总值(亿元)
pi :第一产业(亿元)
si :第二产业(亿元)
industry :工业(亿元)
cons_industry :建筑业(亿元)
ti :第三产业(亿元)
trans_industry :交通运输仓储邮电通信业(亿元)
lbdy :批发零售贸易及餐饮业(亿元)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK4224641560861/MacPage_Service.get_pagedata?cate=nation&event=0&from=0&num=70&condition=&_=4224641560861
datastr = self.__parsePage('nation', 0, 70, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_YEAR_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def gdpQuarter(self, retry=3, pause=0.001):
"""
获取季度国内生产总值数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'quarter':, 'gdp':, ...}, ...]
quarter :季度
gdp :国内生产总值(亿元)
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业增加值(亿元)
pi_yoy:第一产业增加值同比增长(%)
si :第二产业增加值(亿元)
si_yoy :第二产业增加值同比增长(%)
ti :第三产业增加值(亿元)
ti_yoy :第三产业增加值同比增长(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK3935140379887/MacPage_Service.get_pagedata?cate=nation&event=1&from=0&num=250&condition=&_=3935140379887
datastr = self.__parsePage('nation', 1, 250, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_QUARTER_COLS)
self._data['quarter'] = self._data['quarter'].astype(object)
self._data[self._data==0] = np.NaN
return self._result()
def demandsToGdp(self, retry=3, pause=0.001):
"""
获取三大需求对GDP贡献数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'cons_to':, ...}, ...]
year :统计年度
cons_to :最终消费支出贡献率(%)
cons_rate :最终消费支出拉动(百分点)
asset_to :资本形成总额贡献率(%)
asset_rate:资本形成总额拉动(百分点)
goods_to :货物和服务净出口贡献率(%)
goods_rate :货物和服务净出口拉动(百分点)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK3153587567694/MacPage_Service.get_pagedata?cate=nation&event=4&from=0&num=80&condition=&_=3153587567694
datastr = self.__parsePage('nation', 4, 80, retry, pause)
datastr = datastr.replace('"','').replace('null','0')
js = json.loads(datastr)
self._data = pd.DataFrame(js,columns=cf.GDP_FOR_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def idsPullToGdp(self, retry=3, pause=0.001):
"""
获取三大产业对GDP拉动数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'gdp_yoy':, ...}, ...]
year :统计年度
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业拉动率(%)
si :第二产业拉动率(%)
industry:其中工业拉动(%)
ti :第三产业拉动率(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK1083239038283/MacPage_Service.get_pagedata?cate=nation&event=5&from=0&num=60&condition=&_=1083239038283
datastr = self.__parsePage('nation', 5, 60, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_PULL_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def idsCtbToGdp(self, retry=3, pause=0.001):
"""
获取三大产业贡献率数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'gdp_yoy':, ...}, ...]
year :统计年度
gdp_yoy :国内生产总值
pi :第一产业献率(%)
si :第二产业献率(%)
industry:其中工业献率(%)
ti :第三产业献率(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK4658347026358/MacPage_Service.get_pagedata?cate=nation&event=6&from=0&num=60&condition=&_=4658347026358
datastr = self.__parsePage('nation', 6, 60, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_CONTRIB_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def cpi(self, retry=3, pause=0.001):
"""
获取居民消费价格指数数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'month':, 'cpi':,}, ...]
month :统计月份
cpi :价格指数
"""
self._data = pd.DataFrame()
datastr = self.__parsePage('price', 0, 600, retry, pause)
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.CPI_COLS)
self._data['cpi'] = self._data['cpi'].astype(float)
return self._result()
def ppi(self, retry=3, pause=0.001):
"""
获取工业品出厂价格指数数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'month':, 'ppiip':, ...}, ...]
month :统计月份
ppiip :工业品出厂价格指数
ppi :生产资料价格指数
qm:采掘工业价格指数
rmi:原材料工业价格指数
pi:加工工业价格指数
cg:生活资料价格指数
food:食品类价格指数
clothing:衣着类价格指数
roeu:一般日用品价格指数
dcg:耐用消费品价格指数
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK6734345383111/MacPage_Service.get_pagedata?cate=price&event=3&from=0&num=600&condition=&_=6734345383111
datastr = self.__parsePage('price', 3, 600, retry, pause)
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.PPI_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, np.NaN, x))
if i != 'month':
self._data[i] = self._data[i].astype(float)
return self._result()
def depositRate(self, retry=3, pause=0.001):
"""
获取存款利率数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'date':, 'deposit_type':, ...}, ...]
date :变动日期
deposit_type :存款种类
rate:利率(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK1250640915421/MacPage_Service.get_pagedata?cate=fininfo&event=2&from=0&num=600&condition=&_=1250640915421
datastr = self.__parsePage('fininfo', 2, 600, retry, pause)
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.DEPOSIT_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, '--', x))
return self._result()
def loanRate(self, retry=3, pause=0.001):
"""
获取贷款利率数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'date':, 'loan_type':, ...}, ...]
date :执行日期
loan_type :存款种类
rate:利率(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK7542659823280/MacPage_Service.get_pagedata?cate=fininfo&event=3&from=0&num=800&condition=&_=7542659823280
datastr = self.__parsePage('fininfo', 3, 800, retry, pause)
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.LOAN_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, '--', x))
return self._result()
def rrr(self, retry=3, pause=0.001):
"""
获取存款准备金率数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'date':, 'before':, ...}, ...]
date :变动日期
before :调整前存款准备金率(%)
now:调整后存款准备金率(%)
changed:调整幅度(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK8028217046046/MacPage_Service.get_pagedata?cate=fininfo&event=4&from=0&num=100&condition=&_=8028217046046
datastr = self.__parsePage('fininfo', 4, 100, retry, pause)
datastr = datastr if self._PY3 else datastr.decode('gbk')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.RRR_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, '--', x))
return self._result()
def moneySupply(self, retry=3, pause=0.001):
"""
获取货币供应量数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'month':, 'm2':, ...}, ...]
month :统计时间
m2 :货币和准货币(广义货币M2)(亿元)
m2_yoy:货币和准货币(广义货币M2)同比增长(%)
m1:货币(狭义货币M1)(亿元)
m1_yoy:货币(狭义货币M1)同比增长(%)
m0:流通中现金(M0)(亿元)
m0_yoy:流通中现金(M0)同比增长(%)
cd:活期存款(亿元)
cd_yoy:活期存款同比增长(%)
qm:准货币(亿元)
qm_yoy:准货币同比增长(%)
ftd:定期存款(亿元)
ftd_yoy:定期存款同比增长(%)
sd:储蓄存款(亿元)
sd_yoy:储蓄存款同比增长(%)
rests:其他存款(亿元)
rests_yoy:其他存款同比增长(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK9019314616219/MacPage_Service.get_pagedata?cate=fininfo&event=1&from=0&num=600&condition=&_=9019314616219
datastr = self.__parsePage('fininfo', 1, 600, retry, pause)
datastr = datastr if self._PY3 else datastr.decode('gbk')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.MONEY_SUPPLY_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, '--', x))
return self._result()
def moneySupplyBal(self, retry=3, pause=0.001):
"""
获取货币供应量(年底余额)数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'm2':, ...}, ...]
year :统计年度
m2 :货币和准货币(亿元)
m1:货币(亿元)
m0:流通中现金(亿元)
cd:活期存款(亿元)
qm:准货币(亿元)
ftd:定期存款(亿元)
sd:储蓄存款(亿元)
rests:其他存款(亿元)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK3430820865181/MacPage_Service.get_pagedata?cate=fininfo&event=0&from=0&num=200&condition=&_=3430820865181
datastr = self.__parsePage('fininfo', 0, 200, retry, pause)
datastr = datastr if self._PY3 else datastr.decode('gbk')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.MONEY_SUPPLY_BLA_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, '--', x))
return self._result()
def __parsePage(self, cate='', event=0, num=0, retry=3, pause=0.001):
for _ in range(retry):
time.sleep(pause)
try:
rdInt = Utility.random()
request = self._session.get( cf.MACRO_URL % (rdInt, cate, event, num, rdInt), timeout=10 )
if self._PY3:
request.encoding = 'gbk'
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(request.text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
except Exception as e:
print(e)
else:
return datastr
raise IOError(cf.NETWORK_URL_ERROR_MSG)
def shibor(self, year=None):
"""
获取上海银行间同业拆放利率
Parameters
------
year:年份(int)
Return
------
DataFrame or List: [{'date':, 'ON':, ...}, ...]
date:日期
ON:隔夜拆放利率
1W:1周拆放利率
2W:2周拆放利率
1M:1个月拆放利率
3M:3个月拆放利率
6M:6个月拆放利率
9M:9个月拆放利率
1Y:1年拆放利率
"""
self._data = pd.DataFrame()
lab = cf.SHIBOR_TYPE['Shibor']
# http://www.shibor.org/shibor/web/html/downLoad.html?nameNew=Historical_Shibor_Data_2018.xls&downLoadPath=data&nameOld=Shibor数据2018.xls&shiborSrc=http://www.shibor.org/shibor/
self._data = self.__parseExcel(year, 'Shibor', lab, cf.SHIBOR_COLS)
return self._result()
def shiborQuote(self, year=None):
"""
获取Shibor银行报价数据
Parameters
------
year:年份(int)
Return
------
DataFrame or List: [{'date':, 'bank':, ...}, ...]
date:日期
bank:报价银行名称
ON:隔夜拆放利率
1W:1周拆放利率
2W:2周拆放利率
1M:1个月拆放利率
3M:3个月拆放利率
6M:6个月拆放利率
9M:9个月拆放利率
1Y:1年拆放利率
"""
self._data = pd.DataFrame()
lab = cf.SHIBOR_TYPE['Quote']
# http://www.shibor.org/shibor/web/html/downLoad.html?nameNew=Historical_Quote_Data_2018.xls&downLoadPath=data&nameOld=报价数据2018.xls&shiborSrc=http://www.shibor.org/shibor/
self._data = self.__parseExcel(year, 'Quote', lab, cf.QUOTE_COLS)
return self._result()
def shiborMa(self, year=None):
"""
获取Shibor均值数据
Parameters
------
year:年份(int)
Return
------
DataFrame or List: [{'date':, 'ON_5':, ...}, ...]
date:日期
其它分别为各周期5、10、20均价
"""
self._data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from decisionengine.framework.modules import Source
PRODUCES = ["provisioner_resource_spot_prices"]
class AWSSpotPrice(Source.Source):
def __init__(self, *args, **kwargs):
pass
def produces(self, schema_id_list):
return PRODUCES
# The DataBlock given to the source is t=0
def acquire(self):
resource_list = [
{"ResourceName": "AWS1", "SpotPrice": 1.},
{"ResourceName": "AWS2", "SpotPrice": 2.},
{"ResourceName": "AWS3", "SpotPrice": 2.},
{"ResourceName": "AWS4", "SpotPrice": 1.},
{"ResourceName": "AWS5", "SpotPrice": 2.}
]
resource_keys = resource_list[0].keys()
pandas_data = {}
for key in resource_keys:
pandas_data[key] = pd.Series([d[key] for d in resource_list])
return {"provisioner_resource_spot_prices": | pd.DataFrame(pandas_data) | pandas.DataFrame |
# Loading Python libraries
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.api as sm
import statsmodels.stats.multicomp as multi
from statsmodels.formula.api import ols
from IPython.display import Markdown
#%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
#sns.set()
#pd.options.display.float_format = '{:.3f}'.format
#np.set_printoptions(precision=3, suppress=True)
# Statistics functions
def parammct(data=None, independent=None, dependent=None):
independent = str(independent)
dependent = str(dependent)
if input_check_numerical_categorical(data, independent, dependent):
return
parammct_df = pd.DataFrame()
for value in pd.unique(data[independent]):
mean = data[dependent][data[independent]==value].mean()
stdev = data[dependent][data[independent]==value].std()
n = data[dependent][data[independent]==value].count()
sdemean = stdev/np.sqrt(n)
ci = 1.96*sdemean
lowerboundci = mean-ci
upperboundci = mean+ci
parammct_df[value] = pd.Series([mean, stdev, n, sdemean, lowerboundci, upperboundci],
index = ['Mean','SD','n','SEM','Lower bound CI', 'Upper bound CI'])
return parammct_df
def non_parammct(data=None, independent=None, dependent=None):
independent = str(independent)
dependent = str(dependent)
if input_check_numerical_categorical(data, independent, dependent):
return
non_parammct_df = pd.DataFrame()
for value in pd.unique(data[independent]):
median = data[dependent][data[independent]==value].median()
minimum = data[dependent][data[independent]==value].quantile(0)
q25 = data[dependent][data[independent]==value].quantile(0.25)
q75 = data[dependent][data[independent]==value].quantile(0.75)
maximum = data[dependent][data[independent]==value].quantile(1)
n = data[dependent][data[independent]==value].count()
non_parammct_df[value] = pd.Series([median, minimum, q25,q75, maximum, n],
index = ['Median', 'Minimum', 'Lower bound IQR', 'Upper bound IQR',
'Maximum', 'n'])
return non_parammct_df
def histograms(data=None, independent=None, dependent=None):
independent = str(independent)
dependent = str(dependent)
if input_check_numerical_categorical(data, independent, dependent):
return
for value in pd.unique(data[independent]):
sns.distplot(data[dependent][data[independent]==value], fit=stats.norm, kde=False)
plt.title(dependent + ' by ' + independent + '(' + str(value).lower() + ')',
fontweight='bold', fontsize=16)
plt.ylabel('Frequency', fontsize=14)
plt.xlabel(dependent, fontsize=14)
plt.show()
return
def t_test(data=None, independent=None, dependent=None):
pd.set_eng_float_format(accuracy=3, use_eng_prefix=False)
independent_groups = pd.unique(data[independent])
if len(independent_groups)>2:
print('There are more than 2 groups in the independent variable')
print('t-test is not the correct statistical test to run in that circumstance,')
print('consider running an ANOVA')
return
mct = parammct(data=data, independent=independent, dependent=dependent)
t_test_value, p_value = stats.ttest_ind(data[dependent][data[independent] == independent_groups[0]],
data[dependent][data[independent] == independent_groups[1]])
difference_mean = np.abs(mct.loc['Mean'][0] - mct.loc['Mean'][1])
pooled_sd = np.sqrt( ( ((mct.loc['n'][0]-1)*mct.loc['SD'][0]**2) + ((mct.loc['n'][1]-1)*mct.loc['SD'][1]**2) ) /
(mct.loc['n'][0] + mct.loc['n'][1] - 2) )
sedifference = pooled_sd * np.sqrt( (1/mct.loc['n'][0]) + (1/mct.loc['n'][1]) )
difference_mean_ci1 = difference_mean + (t_test_value * sedifference)
difference_mean_ci2 = difference_mean - (t_test_value * sedifference)
if difference_mean_ci1>difference_mean_ci2:
difference_mean_cilower = difference_mean_ci2
difference_mean_ciupper = difference_mean_ci1
else:
difference_mean_cilower = difference_mean_ci1
difference_mean_ciupper = difference_mean_ci2
cohend = difference_mean / pooled_sd
t_test_result= pd.DataFrame ([difference_mean, sedifference, t_test_value, p_value,
difference_mean_cilower, difference_mean_ciupper, cohend],
index = ['Difference between means', 'SE difference', 't-test', 'p-value',
'Lower bound difference CI', 'Upper bound difference CI', 'Cohen\'s d'],
columns=['Value'])
return t_test_result
def anova(data=None, independent=None, dependent=None):
| pd.set_eng_float_format(accuracy=3, use_eng_prefix=False) | pandas.set_eng_float_format |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# In[ ]:
### Install Requirement
get_ipython().system("pip install -r requirements.txt")
# In[98]:
get_ipython().run_line_magic("load_ext", "autoreload")
get_ipython().run_line_magic("autoreload", "")
get_ipython().run_line_magic("matplotlib", "inline")
get_ipython().run_line_magic("config", "IPCompleter.greedy=True")
import gc
import logging
import os
import warnings
from datetime import datetime
import numpy as np
import pandas as pd
#### CatbOost
import catboost as cb
import lightgbm as lgb
import matplotlib.pyplot as plt
### Pandas Profiling for features
# !pip install https://github.com/pandas-profiling/pandas-profiling/archive/master.zip
import pandas_profiling as pp
import seaborn as sns
import shap
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_squared_error, roc_auc_score, roc_curve
from sklearn.model_selection import KFold, StratifiedKFold
### MLP Classifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from tqdm import tqdm_notebook
from util_feature import *
from util_model import *
warnings.filterwarnings("ignore")
# In[64]:
print("ok")
# # Data Loading, basic profiling
# In[3]:
folder = os.getcwd() + "/"
# In[4]:
df = pd.read_csv(folder + "/data/address_matching_data.csv")
df.head(5)
# In[5]:
df.describe()
# In[6]:
df.columns, df.dtypes
# In[7]:
profile = df.profile_report(title="Pandas Profiling Report")
profile.to_file(output_file="output.html")
colexclude = profile.get_rejected_variables(threshold=0.98)
colexclude
# In[8]:
# In[ ]:
# # Column selection by type
# In[11]:
colid = "id"
colnum = [
"name_levenshtein_simple",
"name_trigram_simple",
"name_levenshtein_term",
"name_trigram_term",
"city_levenshtein_simple",
"city_trigram_simple",
"city_levenshtein_term",
"city_trigram_term",
"zip_levenshtein_simple",
"zip_trigram_simple",
"zip_levenshtein_term",
"zip_trigram_term",
"street_levenshtein_simple",
"street_trigram_simple",
"street_levenshtein_term",
"street_trigram_term",
"website_levenshtein_simple",
"website_trigram_simple",
"website_levenshtein_term",
"website_trigram_term",
"phone_levenshtein",
"phone_trigram",
"fax_levenshtein",
"fax_trigram",
"street_number_levenshtein",
"street_number_trigram",
]
colcat = ["phone_equality", "fax_equality", "street_number_equality"]
coltext = []
coldate = []
coly = "is_match"
colall = colnum + colcat + coltext
"""
dfnum, dfcat, dfnum_bin,
dfnum_binhot, dfcat_hot
colnum, colcat, coltext,
colnum_bin, colnum_binhot,
"""
print(colall)
# In[ ]:
# In[ ]:
# # Data type normalization, Encoding process (numerics, category)
# In[28]:
# Normalize to NA, NA Handling
df = df.replace("?", np.nan)
# In[29]:
### colnum procesing
for x in colnum:
df[x] = df[x].astype("float32")
print(df.dtypes)
# In[30]:
##### Colcat processing
colcat_map = pd_colcat_mapping(df, colcat)
for col in colcat:
df[col] = df[col].apply(lambda x: colcat_map["cat_map"][col].get(x))
print(df[colcat].dtypes, colcat_map)
# In[74]:
# # Data Distribution after encoding/ data type normalization
# In[31]:
#### ColTarget Distribution
coly_stat = pd_stat_distribution(df[["id", coly]], subsample_ratio=1.0)
coly_stat
# In[ ]:
# In[ ]:
# In[32]:
#### Col numerics distribution
colnum_stat = pd_stat_distribution(df[colnum], subsample_ratio=0.6)
colnum_stat
# In[ ]:
# In[33]:
#### Col stats distribution
colcat_stat = pd_stat_distribution(df[colcat], subsample_ratio=0.3)
colcat_stat
# In[ ]:
# In[ ]:
# # Feature processing (strategy 1)
# In[16]:
### BAcKUP data before Pre-processing
dfref = copy.deepcopy(df)
print(dfref.shape)
# In[27]:
df = copy.deepcopy(dfref)
# In[22]:
## Map numerics to Category bin
dfnum, colnum_map = pd_colnum_tocat(
df, colname=colnum, colexclude=None, colbinmap=None, bins=5, suffix="_bin", method=""
)
print(colnum_map)
# In[37]:
colnum_bin = [x + "_bin" for x in list(colnum_map.keys())]
print(colnum_bin)
# In[38]:
dfnum[colnum_bin].head(7)
# In[39]:
### numerics bin to One Hot
dfnum_hot = pd_col_to_onehot(dfnum[colnum_bin], colname=colnum_bin, returncol=0)
colnum_hot = list(dfnum_hot.columns)
dfnum_hot.head(10)
# In[202]:
# In[40]:
dfcat_hot = pd_col_to_onehot(df[colcat], colname=colcat, returncol=0)
colcat_hot = list(dfcat_hot.columns)
dfcat_hot.head(5)
# In[ ]:
# In[ ]:
# In[ ]:
#
# # Train data preparation
# In[67]:
#### Train data preparation
dfX = pd.concat((dfnum_hot, dfcat_hot), axis=1)
colX = list(dfX.columns)
X = dfX.values
yy = df[coly].values
Xtrain, Xtest, ytrain, ytest = train_test_split(X, yy, random_state=42, test_size=0.5, shuffle=True)
print(Xtrain.shape, Xtest.shape, colX)
# In[ ]:
# In[203]:
0
# # Model evaluation
# In[ ]:
# In[42]:
### Baseline : L2 penalty to reduce overfitting
clf_log = sk.linear_model.LogisticRegression(penalty="l2", class_weight="balanced")
# In[43]:
clf_log, dd = sk_model_eval_classification(clf_log, 1, Xtrain, ytrain, Xtest, ytest)
# In[92]:
sk_model_eval_classification_cv(clf_log, X, yy, test_size=0.5, ncv=3)
# In[44]:
clf_log_feat = sk_feature_impt_logis(clf_log, colX)
clf_log_feat
# In[208]:
# In[44]:
1
# In[96]:
### Light GBM
clf_lgb = lgb.LGBMClassifier(
learning_rate=0.125,
metric="l2",
max_depth=15,
n_estimators=50,
objective="binary",
num_leaves=38,
njobs=-1,
)
# In[97]:
clf_lgb, dd_lgb = sk_model_eval_classification(clf_lgb, 1, Xtrain, ytrain, Xtest, ytest)
# In[98]:
shap.initjs()
dftest = pd.DataFrame(columns=colall, data=Xtest)
explainer = shap.TreeExplainer(clf_lgb)
shap_values = explainer.shap_values(dftest)
# visualize the first prediction's explanation (use matplotlib=True to avoid Javascript)
shap.force_plot(explainer.expected_value, shap_values[0, :], dftest.iloc[0, :])
# In[ ]:
# visualize the training set predictions
# shap.force_plot(explainer.expected_value, shap_values, dftest)
# Plot summary_plot as barplot:
# shap.summary_plot(shap_values, Xtest, plot_type='bar')
# In[112]:
lgb_feature_imp = pd.DataFrame(
sorted(zip(clf_lgb.feature_importances_, colall)), columns=["value", "feature"]
)
lgb_feature_imp = lgb_feature_imp.sort_values("value", ascending=0)
print(lgb_feature_imp)
plotbar(
lgb_feature_imp.iloc[:10, :],
colname=["value", "feature"],
title="feature importance",
savefile="lgb_feature_imp.png",
)
# In[118]:
kf = StratifiedKFold(n_splits=3, shuffle=True)
# partially based on https://www.kaggle.com/c0conuts/xgb-k-folds-fastai-pca
clf_list = []
for itrain, itest in kf.split(X, yy):
print("###")
Xtrain, Xval = X[itrain], X[itest]
ytrain, yval = yy[itrain], yy[itest]
clf_lgb.fit(Xtrain, ytrain, eval_set=[(Xval, yval)], early_stopping_rounds=20)
clf_list.append(clf_lgb)
# In[122]:
for i, clfi in enumerate(clf_list):
print(i)
clf_lgbi, dd_lgbi = sk_model_eval_classification(clfi, 0, Xtrain, ytrain, Xtest, ytest)
# In[4]:
def np_find_indice(v, x):
for i, j in enumerate(v):
if j == x:
return i
return -1
def col_getnumpy_indice(colall, colcat):
return [np_find_indice(colall, x) for x in colcat]
# In[7]:
colcat_idx = col_getnumpy_indice(colall, colcat)
clf_cb = cb.CatBoostClassifier(
iterations=1000,
depth=8,
learning_rate=0.02,
loss_function="Logloss",
eval_metric="AUC",
random_seed=42,
rsm=0.2, # features subsample
od_type="Iter", # early stopping odwait = 100, # early stopping
verbose=100,
l2_leaf_reg=20, # regularisation
)
# In[8]:
# clf_cb, dd_cb = sk_model_eval_classification(clf_cb, 1,
# Xtrain, ytrain, Xtest, ytest)
clf_cb.fit(
Xtrain,
ytrain,
eval_set=(Xtest, ytest),
cat_features=np.arange(0, Xtrain.shape[1]),
use_best_model=True,
)
# In[ ]:
# In[ ]:
# In[123]:
# Fitting a SVM
clf_svc = SVC(C=1.0, probability=True) # since we need probabilities
clf_svc, dd_svc = sk_model_eval_classification(clf_svc, 1, Xtrain, ytrain, Xtest, ytest)
# In[228]:
# In[231]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[54]:
clf_nn = MLPClassifier(
hidden_layer_sizes=(50,),
max_iter=80,
alpha=1e-4,
activation="relu",
solver="adam",
verbose=10,
tol=1e-4,
random_state=1,
learning_rate_init=0.1,
early_stopping=True,
validation_fraction=0.2,
)
# In[55]:
clf_nn, dd_nn = sk_model_eval_classification(clf_nn, 1, Xtrain, ytrain, Xtest, ytest)
# # Feature selection
#
# In[ ]:
### Feature Selection (reduce over-fitting)
# Pre model feature selection (sometimes some features are useful even with low variance....)
# Post model feature selection
# In[59]:
### Model independant Selection
colX_kbest = sk_model_eval_feature(
clf_nn, method="f_classif", colname=colX, kbest=50, Xtrain=Xtrain, ytrain=ytrain
)
print(colX_kbest)
# In[99]:
clf_log_feat
# In[80]:
clf_log.fit(dfX[colX].values, df[coly].values)
# In[100]:
feat_eval = sk_feature_evaluation(
clf_log, dfX, 30, colname_best=clf_log_feat.feature.values, dfy=df[coly]
)
# In[95]:
feat_eval
# # Ensembling
# In[ ]:
# In[54]:
clf_list = []
clf_list.append(("clf_log", clf_log))
clf_list.append(("clf_lgb", clf_lgb))
clf_list.append(("clf_svc", clf_svc))
clf_ens1 = VotingClassifier(clf_list, voting="soft") # Soft is required
print(clf_ens1)
# In[55]:
sk_model_eval_classification(clf_ens1, 1, Xtrain, ytrain, Xtest, ytest)
# In[ ]:
# In[ ]:
# In[ ]:
# # Predict values
# In[129]:
dft = | pd.read_csv(folder + "/data/address_matching_data.csv") | pandas.read_csv |
'''
Open Power System Data
Time series Datapackage
read.py : read time series files
'''
import pytz
import yaml
import os
import sys
import numpy as np
import pandas as pd
import logging
from datetime import datetime, date, time, timedelta
import xlrd
from xml.sax import ContentHandler, parse
from .excel_parser import ExcelHandler
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
def read_entso_e_transparency(
areas,
filepath,
dataset_name,
headers,
cols,
stacked,
unstacked,
append_headers,
**kwargs):
'''
Read a .csv file from ENTSO-E TRansparency into a DataFrame.
Parameters
----------
filepath : str
Directory path of file to be read
dataset_name : str
Name of variable, e.g. ``solar``
url : str
URL linking to the source website where this data comes from
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
cols : dict
A mapping of of columnnames to use from input file and a new name to
rename them to. The new name is the header level whose corresponding
values are specified in that column
stacked : list
List of strings indicating the header levels that are reported
column-wise in the input files
unstacked
One strings indicating the header level that is reported row-wise in the
input files
append_headers: dict
Map of header levels and values to append to Multiindex
kwargs: dict
placeholder for further named function arguments
Returns
----------
df: pandas.DataFrame
The content of one file from ENTSO-E Transparency
'''
df_raw = pd.read_csv(
filepath,
sep='\t',
encoding='utf-16',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['DateTime']},
date_parser=None,
dayfirst=False,
decimal='.',
thousands=None,
usecols=cols.keys(),
)
# rename columns to comply with other data
df_raw.rename(columns=cols, inplace=True)
if dataset_name == 'Actual Generation per Production Type':
# keep only renewables columns
renewables = {
'Solar': 'solar',
'Wind Onshore': 'wind_onshore',
'Wind Offshore': 'wind_offshore'
}
df_raw = df_raw[df_raw['variable'].isin(renewables.keys())]
df_raw.replace({'variable': renewables}, inplace=True)
if dataset_name == 'Day-ahead Prices':
# Omit polish price data reported in EUR (keeping PLN prices)
# (Before 2017-03-02, the data is very messy)
no_polish_euro = ~(
(df_raw['region'] == 'PSE SA BZ') &
(df_raw.index < pd.to_datetime('2017-03-02 00:00:00')))
df_raw = df_raw.loc[no_polish_euro]
if dataset_name in ['Actual Total Load', 'Day-ahead Total Load Forecast']:
# Zero load is highly unlikely. Such occurences are actually NaNs
df_raw['load'].replace(0, np.nan, inplace=True)
# keep only entries for selected geographic entities as specified in
# areas.csv
area_filter = areas['primary AreaName ENTSO-E'].dropna()
df_raw = df_raw.loc[df_raw['region'].isin(area_filter)]
# based on the AreaName column, map the area names used throughout OPSD
lookup = areas.set_index('primary AreaName ENTSO-E')['area ID'].dropna()
lookup = lookup[~lookup.index.duplicated()]
df_raw['region'] = df_raw['region'].map(lookup)
dfs = {}
for res in ['15', '30', '60']:
df = (df_raw.loc[df_raw['resolution'] == 'PT' + res + 'M', :]
.copy().sort_index(axis='columns'))
df = df.drop(columns=['resolution'])
# DST-handling
# Hours 2-3 of the DST-day in March are both labelled 3:00, with no possibility
# to distinguish them. We have to delete both
dst_transitions_spring = [d.replace(hour=3, minute=m)
for d in pytz.timezone('Europe/Paris')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3
for m in [0, 15, 30, 45]]
df = df.loc[~df.index.isin(dst_transitions_spring)]
# juggle the index and columns
df.set_index(stacked, append=True, inplace=True)
# at this point, only the values we are intereseted in are left as
# columns
df.columns.rename(unstacked, inplace=True)
df = df.unstack(stacked)
# keep only columns that have at least some nonzero values
df = df.loc[:, (df > 0).any(axis=0)]
# add source, url and unit to the column names.
# Note: pd.concat inserts new MultiIndex values infront of the old ones
df = pd.concat([df],
keys=[tuple(append_headers.values())],
names=append_headers.keys(),
axis='columns')
# reorder and sort columns
df = df.reorder_levels(headers, axis=1)
dfs[res + 'min'] = df
return dfs
def read_pse(filepath):
'''
Read a .csv file from PSE into a DataFrame.
Parameters
----------
filepath : str
Directory path of file to be read
url : str
URL linking to the source website where this data comes from
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
Returns
----------
df: pandas.DataFrame
The content of one file from PSE
'''
df = pd.read_csv(
filepath,
sep=';',
encoding='cp1250',
header=0,
index_col=None,
parse_dates=None,
date_parser=None,
dayfirst=False,
decimal=',',
thousands=None,
# hours are indicated by their ending time. During fall DST,
# UTC 23:00-00:00 = CEST 1:00-2:00 is indicated by '02',
# UTC 00:00-01:00 = CEST 2:00-3:00 is indicated by '02A',
# UTC 01:00-02:00 = CET 2:00-3:00 is indicated by '03'.
# regular hours require backshifting by 1 period
converters={
'Time': lambda x: '2:00' if x == '2A' else str(int(x) - 1) + ':00'
}
)
# Create a list of spring-daylight savings time (DST)-transitions
dst_transitions_spring = [
d.replace(hour=2)
for d in pytz.timezone('Europe/Warsaw')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3]
# Account for an error where an hour is jumped in the data, incrementing
# the hour by one
#time_int = df['Time'].str[:-3].astype(int)
# if (time_int time_int.shift(1) - 1).
# if (time_int == 24).any():
# logger.info(filepath)
# df = df[time_int != 24]
if df['Date'][0] == 20130324:
df['Time'] = [str(num) + ':00' for num in range(24)]
# The hour from 01:00 - 02:00 (CET) should by PSE's logic be indexed
# by "02:00" (the endpoint), but at DST day in spring they use "03:00" in
# the files. Our routine requires it to be "01:00" (the start point).
df['proto_timestamp'] = pd.to_datetime(
df['Date'].astype(str) + ' ' + df['Time'])
slicer = df['proto_timestamp'].isin(dst_transitions_spring)
df.loc[slicer, 'Time'] = '1:00'
# create the actual timestamp from the corrected "Date"-column
df.index = pd.to_datetime(df['Date'].astype(str) + ' ' + df['Time'])
# DST-handling
# 'ambiguous' refers to how the October dst-transition hour is handled.
# 'infer' will attempt to infer dst-transition hours based on order.
df.index = df.index.tz_localize('Europe/Warsaw', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_ceps(filepath):
'''Read a file from CEPS into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=2,
parse_dates=True,
dayfirst=True,
skiprows=None,
index_col=0,
usecols=[0, 1, 2]
)
# DST-handling
df.index = df.index.tz_localize('Europe/Prague', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_elia(filepath):
'''Read a file from Elia into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=3,
parse_dates={'timestamp': ['DateTime']},
dayfirst=True,
index_col='timestamp',
usecols=None
)
# DST handling
df.index = df.index.tz_localize('Europe/Brussels', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_energinet_dk(filepath):
'''Read a file from energinet.dk into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=2, # the column headers are taken from 3rd row.
# 2nd row also contains header info like in a multiindex,
# i.e. wether the colums are price or generation data.
# However, we will make our own columnnames below.
# Row 3 is enough to unambiguously identify the columns
skiprows=None,
index_col=None,
parse_dates=True,
dayfirst=False,
usecols=None, # None means: parse all columns
thousands=',',
# hours in 2nd column run from 1-24, we need 0-23:
# (converters seem not to work in combination with parse_dates)
converters={1: lambda x: x - 1}
)
# Create the timestamp column and set as index
df.index = df.iloc[:, 0] + pd.to_timedelta(df.iloc[:, 1], unit='h')
# DST-handling
# Create a list of spring-daylight savings time (DST)-transitions
dst_transitions_spring = [
d.replace(hour=2)
for d in pytz.timezone('Europe/Copenhagen')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3]
# Drop 3rd hour for (spring) DST-transition from df.
df = df[~df.index.isin(dst_transitions_spring)]
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=1)
# Conform index to UTC
dst_arr = np.ones(len(df.index), dtype=bool)
df.index = df.index.tz_localize('Europe/Copenhagen', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
return df
def read_entso_e_statistics(filepath,):
'''Read a file from ENTSO-E into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=18,
usecols='A, B, G, K, L, N, P:AU'
)
# rename columns
# According to the specific national considerations, GB data reflects the
# whole UK including Northern Ireland since 2016
renamer = {df.columns[0]: 'date', df.columns[1]: 'time', 'GB': 'GB_UKM'}
df.rename(columns=renamer, inplace=True)
# Zero load is highly unlikely. Such occurences are actually NaNs
df.replace(0, np.nan, inplace=True)
# Construct the index and set timezone
# for some reason, the 'date' column has already been parsed to datetime
df['date'] = df['date'].fillna(method='ffill').dt.strftime('%Y-%m-%d')
df.index = pd.to_datetime(df.pop('date') + ' ' + df.pop('time').str[:5])
# DST-handling
df.index = df.index.tz_localize('Europe/Brussels', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_entso_e_portal(filepath):
'''Read a file from the old ENTSO-E Data Portal into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=3, # 0 indexed, so the column names are actually in the 4th row
skiprows=None,
# create MultiIndex from first 2 columns ['date', 'Country']
index_col=[0, 1],
parse_dates={'date': ['Year', 'Month', 'Day']},
dayfirst=False,
usecols=None, # None means: parse all columns
)
# The "Coverage ratio"-column specifies for some countries scaling factor
# with which we should upscale the reported values
df = df.divide(df.pop('Coverage ratio'), axis='index') * 100
# The original data has days and countries in the rows and hours in the
# columns. This rearranges the table, mapping hours on the rows and
# countries on the columns.
df.columns.names = ['hour']
df = df.stack(level='hour').unstack(level='Country').reset_index()
# Create the timestamp column and set as index
df.index = df.pop('date') + pd.to_timedelta(df.pop('hour'), unit='h')
# DST-handling
# Delete values in DK and FR that should not exist
df = df.loc[df.index != '2015-03-29 02:00', :]
# Delete values in DK that are obviously twice as high as they should be
df.loc[df.index.isin(['2014-10-26 02:00:00', '2015-10-25 02:00:00']),
'DK'] = np.nan
# Delete values in UK that are all zero except for one day
df.loc[(df.index.year == 2010) & (df.index.month == 1), 'GB'] = np.nan
# Delete values in CY that are mostly zero but not always
df.loc[(df.index.year < 2013), 'CY'] = np.nan
# Zero load is highly unlikely. Such occurences are actually NaNs
df.replace(0, np.nan, inplace=True)
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=1)
# Conform index to UTC
dst_arr = np.ones(len(df.index), dtype=bool)
df.index = df.index.tz_localize('CET', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
# Rename regions to comply with naming conventions
renamer = {'DK_W': 'DK_1', 'UA_W': 'UA_west', 'NI': 'GB_NIR', 'GB': 'GB_GBN'}
df.rename(columns=renamer, inplace=True)
# Calculate load for whole UK from Great Britain and Northern Ireland data
df['GB_UKM'] = df['GB_GBN'].add(df['GB_NIR'])
return df
def read_hertz(filepath, dataset_name):
'''Read a file from 50Hertz into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=3,
index_col='timestamp',
parse_dates={'timestamp': ['Datum', 'Von']},
date_parser=None,
dayfirst=True,
decimal=',',
thousands='.',
# truncate values in 'time' column after 5th character
converters={'Von': lambda x: x[:5]},
)
# Wind onshore
if dataset_name == 'wind generation_actual pre-offshore':
df['wind_onshore'] = df['MW']
# Until 2006, and in 2015 (except for wind_generation_pre-offshore),
# during the fall dst-transistion, only the
# wintertime hour (marked by a B in the data) is reported, the summertime
# hour, (marked by an A) is missing in the data.
# dst_arr is a boolean array consisting only of "False" entries, telling
# python to treat the hour from 2:00 to 2:59 as wintertime.
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=2)
# Conform index to UTC
if (pd.to_datetime(df.index.values[0]).year not in [2005, 2006, 2015] or
(dataset_name == 'wind generation_actual pre-offshore' and
pd.to_datetime(df.index.values[0]).year == 2015)):
check_dst(df.index, autumn_expect=2)
df.index = df.index.tz_localize('Europe/Berlin', ambiguous='infer')
else:
dst_arr = np.zeros(len(df.index), dtype=bool)
check_dst(df.index, autumn_expect=1)
df.index = df.index.tz_localize('Europe/Berlin', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
variable, attribute = dataset_name.split(' ')[:2]
# Since 2016, wind data has an aditional column for offshore.
# Baltic 1 has been producing since 2011-05-02 and Baltic2 since
# early 2015 (source: Wikipedia) so it is probably not correct that
# 50Hertz-Wind data pre-2016 is only onshore. Maybe we can ask at
# 50Hertz directly.
return df
def read_amprion(filepath, dataset_name):
'''Read a file from Amprion into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['Datum', 'Uhrzeit']},
date_parser=None,
dayfirst=True,
decimal=',',
thousands=None,
# Truncate values in 'time' column after 5th character.
converters={'Uhrzeit': lambda x: x[:5]},
)
# Wind onshore
if dataset_name == 'wind':
df['wind_onshore'] = df['Online Hochrechnung [MW]']
# DST-Handling:
# In the years after 2009, during the fall dst-transistion, only the
# summertime hour is reported, the wintertime hour is missing in the data.
# dst_arr is a boolean array consisting only of "True" entries, telling
# python to treat the hour from 2:00 to 2:59 as summertime.
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=0)
index1 = df.index[df.index.year >= 2018]
index1 = index1.tz_localize('Europe/Berlin', ambiguous='infer')
index2 = df.index[df.index.year < 2018]
dst_arr = np.ones(len(index2), dtype=bool)
index2 = index2.tz_localize('Europe/Berlin', ambiguous=dst_arr)
df.index = index2.append(index1)
df.index = df.index.tz_convert(None)
return df
def read_tennet(filepath, dataset_name):
'''Read a file from TenneT into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
encoding='latin_1',
header=3,
index_col=False,
parse_dates=False,
date_parser=None,
dayfirst=True,
thousands=None,
converters=None,
)
# Wind onshore
if dataset_name == 'wind':
# Calculate onshore wind-generation
df['wind_onshore'] = df['tatsächlich [MW]'].sub(
df['Anteil Offshore [MW]'])
# Construct the datetime-inex
renamer = {'Datum': 'date', 'Position': 'pos'}
df.rename(columns=renamer, inplace=True)
df['date'].fillna(method='ffill', limit=100, inplace=True)
# Check the rows for irregularities
for i, row in df.iterrows():
# there must not be more than 100 quarter-hours in a day
if row['pos'] > 100:
logger.warning('%s th quarter-hour at %s, position %s',
row['pos'], row['date'], i)
# On the day in March when summertime begins, shift the data forward by
# 1 hour, beginning with the 9th quarter-hour, so the index runs again
# up to 96
elif (row['pos'] == 92 and (
(i == len(df.index) - 1) or (df['pos'][i + 1] == 1))):
slicer = df[(df['date'] == row['date']) & (df['pos'] >= 9)].index
df.loc[slicer, 'pos'] = df['pos'] + 4
# Instead of having the quarter-hours' index run up to 100, we want
# to have it set back by 1 hour beginning from the 13th
# quarter-hour, ending at 96
elif row['pos'] == 100: # True when summertime ends in October
slicer = df[(df['date'] == row['date']) & (df['pos'] >= 13)].index
df.loc[slicer, 'pos'] = df['pos'] - 4
# Compute timestamp from position and generate datetime-index
df['hour'] = (np.trunc((df['pos'] - 1) / 4)).astype(int).astype(str)
df['minute'] = (((df['pos'] - 1) % 4) * 15).astype(int).astype(str)
df.index = pd.to_datetime(
df['date'] + ' ' + df['hour'] + ':' + df['minute'], dayfirst=True)
# DST-handling
df.index = df.index.tz_localize('Europe/Berlin', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_transnetbw(filepath, dataset_name):
'''Read a file from TransnetBW into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=0,
index_col=None,
parse_dates=None, # {'timestamp': ['Datum von', 'Uhrzeit von']},
date_parser=None,
dayfirst=True,
decimal=',',
thousands=None,
converters=None,
)
# Wind onshore
if dataset_name == 'wind':
df['wind_onshore'] = df['Ist-Wert (MW)']
# rename columns
renamer = {'Datum von': 'date', 'Uhrzeit von': 'time'}
df.rename(columns=renamer, inplace=True)
# DST-handling
# timestamp 01:45 just before spring DST transistion has been falsely set to
# 3:45, which we correct here
slicer = (df['time'] == '03:45') & (df['time'].shift(periods=1) == '01:30')
df.loc[slicer, 'time'] = '01:45'
df.index = pd.to_datetime(df['date'] + ' ' + df['time'], dayfirst=True)
dst_arr = np.zeros(len(df.index), dtype=bool)
df.index = df.index.tz_localize('Europe/Berlin', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_opsd(filepath, param_dict, headers):
'''Read a file from OPSD into a DataFrame'''
df = pd.read_csv(
filepath,
sep=',',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['day']},
date_parser=None,
dayfirst=False,
decimal='.',
thousands=None,
converters=None,
)
# Split the colname after the first "_"
cols = [(col_name.split('_')[0], '_'.join(col_name.split('_')[1:-1]))
for col_name in df.columns]
df.columns = pd.MultiIndex.from_tuples(cols)
# Keep only wind and solar
keep = ['wind', 'wind_onshore', 'wind_offshore', 'solar']
df = df.loc[:, (slice(None), keep)]
# The capacities data only has one entry per day, which pandas
# interprets as 00:00h. We will broadcast the dayly data for
# all quarter-hours of the day until the next given data point.
# For this, we we expand the index so it reaches to 23:59 of
# the last day, not only 00:00.
last = pd.to_datetime([df.index[-1]]) + timedelta(days=1, minutes=59)
until_last = df.index.append(last).rename('timestamp')
df = df.reindex(index=until_last, method='ffill')
df = df.loc[(2005 <= df.index.year) & (df.index.year <= 2019)]
dfs = {}
for timezone, res, ddf in [
('CET', '15min', df.loc[:, ['DE']]),
('WET', '30min', df.loc[:, ['GB-UKM', 'GB-GBN', 'GB-NIR']]),
('CET', '60min', df.loc[:, ['CH', 'DK', 'SE']])]:
# DST-handling
ddf.index = ddf.index.tz_localize(timezone).tz_convert(None)
ddf = ddf.resample(res).ffill().round(0)
# Create the MultiIndex
cols = [tuple(param_dict['colmap'][col_name[0]][level]
.format(variable=col_name[1].lower())
for level in headers) for col_name in ddf.columns]
ddf.columns = pd.MultiIndex.from_tuples(cols, names=headers)
dfs[res] = ddf
return dfs
def read_svenska_kraftnaet(filepath, dataset_name):
'''Read a file from Svenska Kraftnät into a DataFrame'''
if dataset_name in ['wind_solar_1', 'wind_solar_2']:
skip = 4
cols = {0: 'date', 1: 'hour', 2: 'load', 3: 'wind'}
else:
if dataset_name == 'wind_solar_4':
skip = 5
else:
skip = 7
cols = {0: 'timestamp', 1: 'load', 2: 'wind', 8: 'solar'}
df = pd.read_excel(
io=filepath,
# read the last sheet (in some years,
# there are hidden sheets that would cause errors)
sheet_name=-1,
header=None,
skiprows=skip,
index_col=None,
usecols=cols.keys(),
names=cols.values()
)
if dataset_name in ['wind_solar_1', 'wind_solar_2']:
# in 2009 there is a row below the table for the sums that we don't
# want to read in
df = df[df['date'].notnull()]
df.index = pd.to_datetime(
df['date'].astype(int).astype(str) + ' ' +
df['hour'].astype(int).astype(str).str.replace('00', '') + ':00',
dayfirst=False,
infer_datetime_format=True)
else:
# in 2011 there is a row below the table for the sums that we don't
# want to read in
df = df[((df['timestamp'].notnull()) &
(df['timestamp'].astype(str) != 'Tot summa GWh'))]
df.index = pd.to_datetime(df['timestamp'], dayfirst=True)
# The timestamp ("Tid" in the original) gives the time without
# daylight savings time adjustments (normaltid). To convert to UTC,
# one hour has to be deducted
df.index = df.index - timedelta(hours=1) # + pd.offsets.Hour(-1)
return df
def read_apg(filepath):
'''Read a file from APG into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
encoding='latin_1',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['Von']},
dayfirst=True,
decimal=',',
thousands='.',
# Format of the raw_hour-column is normally is 01:00:00, 02:00:00 etc.
# throughout the year, but 3A:00:00, 3B:00:00 for the (possibly
# DST-transgressing) 3rd hour of every day in October, we truncate the
# hours column after 2 characters and replace letters which are there to
# indicate the order during fall DST-transition.
converters={'Von': lambda x: str(x).replace('A', '').replace('B', '')}
)
# Correct column names
df.rename(columns=lambda x: x.replace(' ', ' '), inplace=True)
# DST-handling
df.index = df.index.tz_localize('Europe/Vienna', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_rte(filepath):
'''Read a file from RTE into a DataFrame'''
cols = ['Date', 'Heure', 'Consommation (MW)', 'Prévision J-1 (MW)',
'Eolien (MW)', 'Solaire (MW)']
df = pd.read_csv(
filepath,
sep=';',
encoding='utf-8',
header=0,
index_col='timestamp',
# there eis also a column with UTC but it is incorrect
parse_dates={'timestamp': ['Date', 'Heure']},
dayfirst=True,
usecols=cols
)
# filter out quarter-hourly oberservations and sort the index
df = df.loc[df.index.minute.isin([0, 30]), :]
df.sort_index(axis='index', inplace=True)
# DST handling
# drop 1 hour after spring dst as it contains inconsistent data (copy of
# hour before). The 1 hour will later be interpolated
dst_transitions_spring = [
dd for d in pytz.timezone('Europe/Paris')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3
for dd in (d.replace(hour=2, minute=0), d.replace(hour=2, minute=30))]
df = df.loc[~df.index.isin(dst_transitions_spring)]
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=1)
# Conform index to UTC
dst_arr = np.zeros(len(df.index), dtype=bool)
df.index = df.index.tz_localize('Europe/Paris', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
return df
def read_GB(filepath):
'''Read a file from National Grid or Elexon into a DataFrame'''
time_cols = {
'#Settlement Date': 'date', # Elexon
'Settlement Period': 'pos', # Elexon
'SETTLEMENT_DATE': 'date', # National Grid
'SETTLEMENT_PERIOD': 'pos' # National Grid
}
df = pd.read_csv(
filepath,
header=0,
usecols=None,
dayfirst=True
)
df.rename(columns=time_cols, inplace=True)
for i, row in df.iterrows():
# there must not be more than 50 half-hours in a day
if row['pos'] > 50:
logger.warning('%s th half-hour at %s, position %s',
row['pos'], row['date'], i)
# On the day in March when summertime begins, shift the data forward by
# 1 hour, beginning with the 5th half-hour, so the index runs again
# up to 48
elif (row['pos'] == 46 and (
(i == len(df.index) - 1) or (df['pos'][i + 1] == 1))):
slicer = df[(df['date'] == row['date']) & (df['pos'] >= 3)].index
df.loc[slicer, 'pos'] = df['pos'] + 2
# Instead of having the half-hours' index run up to 50, we want
# to have it set back by 1 hour beginning from the 5th
# half-hour, ending at 48
elif row['pos'] == 50: # True when summertime ends in October
slicer = df[(df['date'] == row['date']) & (df['pos'] >= 5)].index
df.loc[slicer, 'pos'] = df['pos'] - 2
# Compute timestamp from position and generate datetime-index
df['hour'] = (np.trunc((df['pos'] - 1) / 2)).astype(int).astype(str)
df['minute'] = (((df['pos'] - 1) % 2) * 30).astype(int).astype(str)
df.index = pd.to_datetime(
df['date'] + ' ' + df['hour'] + ':' + df['minute'], dayfirst=True)
# DST-handling
df.index = df.index.tz_localize('Europe/London', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def terna_file_to_initial_dataframe(filepath):
'''
Parse the xml or read excel directly,
returning the data from the file in a simple-index dataframe.
Some files are formated as xml, some are pure excel files.
This function handles both cases.
Parameters:
----------
filepath: str
The path of the file to process
Returns:
----------
df: pandas.DataFrame
A pandas dataframe containing the data from the specified file.
'''
# First, we'll try to parse the file as if it is xml.
try:
excelHandler = ExcelHandler()
parse(filepath, excelHandler)
# Create the dataframe from the parsed data
df = pd.DataFrame(excelHandler.tables[0][2:],
columns=excelHandler.tables[0][1])
# Convert the "Generation [MWh]"-column to numeric
df['Generation [MWh]'] = pd.to_numeric(df['Generation [MWh]'])
except:
# In the case of an exception, treat the file as excel.
try:
df = pd.read_excel(filepath, header=1)
except xlrd.XLRDError:
df = pd.DataFrame()
return df
def read_terna(filepath, filedate, param_dict, headers):
'''
Read a file from Terna into a dataframe
Parameters:
----------
filepath: str
The path of the file to read.
url:
The url of the Terna page.
headers:
Levels for the MultiIndex.
Returns:
----------
df: pandas.DataFrame
A pandas multi-index dataframe containing the data from the specified
file.
'''
# Reading the file into a pandas dataframe
df = terna_file_to_initial_dataframe(filepath)
if df.empty:
return df
# Rename columns to match conventions
renamer = {
'Date/Hour': 'timestamp',
'Bidding Area': 'region',
'Type': 'variable',
'Generation [MWh]': 'values'
}
df.rename(columns=renamer, inplace=True)
# Casting the timestamp column to datetime and set it as index
df.index = pd.to_datetime(df['timestamp'])
# Some files contain data for different date than they should, in which
# case the link to the file had a different date than what we see after
# opening the file. So for the day they are supposed to represent there is
# no data and for the day they contain there is duplicate data.
# We skip these files alltogether.
if not (df.index.date == filedate).all():
return pd.DataFrame()
# Renaming the bidding area names to conform to the codes from areas.csv
df['region'] = 'IT_' + df['region']
# Renaming and filtering out wind and solar
# "PV Estimated" are solar panels connected to the distribution grid
# "PV Measured" are those connected to transmission grid
renewables = {
'Wind': ('wind_onshore', 'generation_actual'),
'Photovoltaic Estimated': ('solar', 'generation_actual_tso'),
'Photovoltaic Measured': ('solar', 'generation_actual_dso')
}
df = df.loc[df['variable'].isin(renewables.keys()), :]
for k, v in renewables.items():
df.loc[df['variable'] == k, 'attribute'] = v[1]
df.loc[df['variable'] == k, 'variable'] = v[0]
# Reshaping the data so that each combination of a bidding area and type
# is represented as a column of its own.
stacked = ['region', 'variable', 'attribute']
df.set_index(stacked, append=True, inplace=True)
df = df['values'].unstack(stacked)
# DST-handling
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=2)
# drop autumn dst hours as they contain inconsistent data
# (apparently 2:00 and 3:00 are added up and reported as value for 2:00).
# The 2 hours will later be interpolated
dst_transitions_autumn = [
d.replace(hour=2)
for d in pytz.timezone('Europe/Rome')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 10]
df = df.loc[~df.index.isin(dst_transitions_autumn)]
# Covert to UTC
df.index = df.index.tz_localize('Europe/Rome')
df.index = df.index.tz_convert(None)
# Create the MultiIndex
cols = [tuple(param_dict['colmap'][level]
.format(region=col_name[0],
variable=col_name[1],
attribute=col_name[2])
for level in headers) for col_name in df.columns]
df.columns = pd.MultiIndex.from_tuples(cols, names=headers)
# # add source and url to the columns.
# append_headers = {'source': 'Terna', 'unit': 'MW'}
# # Note: pd.concat inserts new MultiIndex values infront of the old ones
# df = pd.concat([df],
# keys=[tuple([*append_headers.values(), url])],
# names=[*append_headers.keys(), 'web'],
# axis='columns')
# # reorder and sort columns
# df = df.reorder_levels(headers, axis=1)
# df.sort_index(axis='columns', inplace=True)
# # Common headers for calculated colmns
# c_h = ('own calculation from Terna', url, 'MW')
# # Italian bidding zones
# bz_it = ['IT_CNOR', 'IT_CSUD', 'IT_NORD', 'IT_SARD', 'IT_SICI', 'IT_SUD']
# # Aggregate DSO and TSO level measurements for solar
# for region in bz_it:
# #import pdb; pdb.set_trace()
# df[(region, 'solar', 'generation_actual', *c_h)] = (
# df[(region, 'solar', 'generation_actual_dso')] +
# df[(region, 'solar', 'generation_actual_tso')])
# # Aggregate all regions
# for variable in ['solar', 'wind_onshore']:
# #for attribute in
# df[('IT', variable, 'generation_actual', *c_h)] = (
# df.loc[:, (bz_it, variable, 'generation_actual')]
# .sum(axis='columns', skipna=True))
return df
def read(
sources,
data_path,
parsed_path,
areas,
headers,
start_from_user,
end_from_user,
testmode=False):
# For each source in the source dictionary
for source_name, source_dict in sources.items():
# For each dataset from source_name
for dataset_name, param_dict in source_dict.items():
if source_name == '<NAME>':
param_dict['colmap'] = source_dict['wind_solar_1']['colmap']
read_dataset(
source_name,
dataset_name,
param_dict,
data_path,
parsed_path,
areas,
headers,
start_from_user=start_from_user,
end_from_user=end_from_user,
testmode=testmode)
return
def read_dataset(
source_name,
dataset_name,
param_dict,
data_path,
parsed_path,
areas,
headers,
start_from_user=None,
end_from_user=None,
testmode=False):
'''
For the sources specified in the sources.yml file, pass each downloaded
file to the correct read function.
Parameters
----------
source_name : str
Name of source to read files from
dataset_name : str
Indicator for subset of data available together in the same files
param_dict : dict
Dictionary of further parameters, i.e. the URL of the Source to be
placed in the column-MultiIndex
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
data_path : str, default: 'original_data'
Base download directory in which to save all downloaded files
parsed_path : str
Directory where to store parsed data as pickle files
areas : pandas.DataFrame
Contains mapping of available geographical areas showing how
countries, bidding zones, control areas relate to each other
start_from_user : datetime.date, default None
Start of period for which to read the data
end_from_user : datetime.date, default None
End of period for which to read the data
testmode : bool
If True, only read one file per source. Use for testing purposes.
Returns
----------
data_set: pandas.DataFrame
A DataFrame containing the combined data for dataset_name
'''
# The cumulated dict will store parsed data from all files from one dataset
cumulated = {'15min': pd.DataFrame(),
'30min': | pd.DataFrame() | pandas.DataFrame |
# %% [markdown]
# pip install -r pykrx
# %%
from datetime import datetime, timedelta
import FinanceDataReader as fdr
import yfinance as yf
import numpy as np
import pandas as pd
from pykrx import stock
import time
import bt
import warnings
# from tqdm import tqdm
warnings.filterwarnings(action='ignore')
# pd.options.display.float_format = '{:.4f}'.format
# %matplotlib inline
from IPython.display import display, HTML
#하나의 cell에서 multiple output을 출력을 가능하게 하는 코드
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Pandas Dataframe의 사이즈가 큰 경우, 어떻게 화면에 출력을 할지를 세팅하는 코드
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.set_option('max_columns', None)
# %%
#from strategy import*
#from utils import *
# %%
def 장중이냐(now):
return (9 <= now.hour <= 14) or (now.hour == 15 and (now.minute <= 30))
# %%
def AMS(x):
''' x : Series (DataFrame의 컬럼)
x[-1] : 기준일. x의 현재값
(오늘날짜/과거날짜 - 1) > 0 보다 크면 1, 아니면 0
=> 오늘날짜/과거날짜 > 1 => 오늘날짜 > 과거날짜 => x[-1] > x
'''
# print(f"{list(np.where(x[-1]>x, 1, 0)[:-1])}, {len(np.where(x[-1]>x, 1, 0)[:-1])}")
return np.mean(np.where(x[-1]>x, 1, 0)[:-1]) # 당일 날짜 비교는 제외해준다 [:-1]
# %%
# get_data
# code_list is tickers['code']
# start : before_13months
# end : baseday
def get_data(code_list, start, end):
df = pd.DataFrame()
tot = len(code_list)
count = 0
for code in code_list: # tqdm(code_list)
count += 1
print(f"{count}/{tot} : {code}")
t = fdr.DataReader(code, start, end)['Close'].rename(code)
# t = stock.get_market_ohlcv_by_date(start, end, code)['종가'].rename(code)
df = bt.merge(df, t)
time.sleep(0.75)
# 맨마지막 값이 NaN인 컬럼을 삭제한다.
for c in df.columns:
if pd.isna(df.iloc[-1][c]):
print(f"drop : {c}")
df.drop(c, axis=1, inplace=True)
return df
# %%
def 종목명(code, df):
""" 사용예) 종목명('A153130', tickers) or 종목명('153130', tickers)
"""
if code.startswith('A'):
return df[df['종목코드'] == code]['종목명'].values[0]
else:
return df[df['code'] == code]['종목명'].values[0]
def 종목코드(name, df):
""" A를 제외한 종목코드를 반환한다. FinanceDataReader에서 사용
사용예: 종목코드("KODEX달러선물레버리지", tickers)
"""
_df = df.copy()
_df['종목명'] = _df['종목명'].str.replace(' ', '')
return _df[_df['종목명'] == name.replace(' ', '')]['code'].values[0]
# %%
def pickup(df, 제외직전개월수=1):
"""df에서 모멘텀이 가장 좋은 3종목을 선택한다.
Args :
- df : 가격 데이터프레임
- 제외직전개월수 : df에서 제외할 데이터 개월 수
- now : 가격 데이터프레임의 가장 아래 시간
"""
t0 = df.index[-1]
제외 = t0 - pd.DateOffset(months=제외직전개월수)
m6 = t0 - | pd.DateOffset(months=6) | pandas.DateOffset |
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from util.utils import create_userids, print_list
from util.normalization import normalize_rows
from sklearn import metrics
import util.settings as st
import warnings
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings("ignore")
def plot_scores(positive_scores, negative_scores, filename='scores.png', title='Score distribution'):
set_style()
plt.clf()
df = pd.DataFrame([positive_scores, negative_scores])
BINS = np.linspace(df.min(), df.max(), 31)
sns.distplot(positive_scores, norm_hist=True, color='green', bins=31)
sns.distplot(negative_scores, norm_hist=True, color='red', bins=31)
# plt.legend(loc='upper left')
plt.legend(['Genuine', 'Impostor'], loc='best')
plt.xlabel('Score')
plt.title(title)
plt.show()
# plt.savefig(filename + '.png')
def set_style():
# This sets reasonable defaults for font size for
# a figure that will go in a paper
sns.set_context("paper", font_scale = 2)
# Set the font to be serif, rather than sans
sns.set(font='serif')
# Make the background white, and specify the
# specific font family
sns.set_style("white", {
"font.family": "serif",
"font.serif": ["Times", "Palatino", "serif"]
})
sns.set_style("ticks")
sns.set_style("whitegrid")
def plot_ROC_single(ee_file, title = 'ROC curve'):
set_style()
ee_data = pd.read_csv(ee_file)
auc_ee = metrics.auc(ee_data['FPR'], ee_data['TPR'])
plt.clf()
plt.title(title)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive rate')
plt.plot(ee_data['FPR'], ee_data['TPR'], '-', label = 'AUC_EE = %0.2f' % auc_ee)
label_ee = 'AUC = %0.2f' % auc_ee
legend_str = [label_ee]
plt.legend(legend_str)
plt.show()
# create a boxplot from a dataframe
#
def csv2boxplot(df, columns, title, ylabel, outputfilename):
myFig = plt.figure()
res = df.boxplot(column=columns, return_type='axes')
plt.title(title)
plt.xlabel('Type of features')
plt.ylabel(ylabel)
myFig.savefig('output_png/boxplot_sapimouse.png', format = 'png')
myFig.savefig(outputfilename + '.png', format='png')
# myFig.savefig(outputfilename + '.eps', format='eps')
# plt.show(res)
def plot_ROC_filelist(filelist, title = 'ROC curve', outputfilename='roc.png'):
set_style()
plt.clf()
counter = 1
labels = []
for file in filelist:
data = | pd.read_csv(file) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 09:04:46 2017
@author: <NAME>
pygemfxns_plotting.py produces figures of simulation results
"""
# Built-in Libraries
import os
import collections
# External Libraries
import numpy as np
import pandas as pd
#import netCDF4 as nc
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
import matplotlib.patches as mpatches
import scipy
from scipy import stats
from scipy.ndimage import uniform_filter
import cartopy
#import geopandas
import xarray as xr
from osgeo import gdal, ogr, osr
import pickle
# Local Libraries
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
import pygemfxns_massbalance as massbalance
import pygemfxns_gcmbiasadj as gcmbiasadj
import class_mbdata
import class_climate
#import run_simulation
# Script options
option_plot_cmip5_normalizedchange = 1
option_plot_cmip5_runoffcomponents = 0
option_plot_cmip5_map = 0
option_output_tables = 0
option_subset_GRACE = 0
option_plot_modelparam = 0
option_plot_era_normalizedchange = 1
option_compare_GCMwCal = 0
option_plot_mcmc_errors = 0
option_plot_maxloss_issues = 0
option_plot_individual_glaciers = 0
option_plot_degrees = 0
option_plot_pies = 0
option_plot_individual_gcms = 0
#%% ===== Input data =====
netcdf_fp_cmip5 = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/simulations/spc/'
netcdf_fp_era = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/simulations/ERA-Interim/ERA-Interim_1980_2017_nochg'
#mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_allglac_1ch_tn_20190108/'
#mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190222_adjp10/'
mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190308_adjp12/cal_opt2/'
figure_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/figures/cmip5/'
csv_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/csv/cmip5/'
cal_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190308_adjp12/cal_opt2/'
# Regions
rgi_regions = [13, 14, 15]
#rgi_regions = [13]
# Shapefiles
rgiO1_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/RGI/rgi60/00_rgi60_regions/00_rgi60_O1Regions.shp'
watershed_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/HMA_basins_20181018_4plot.shp'
kaab_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/kaab2015_regions.shp'
srtm_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA.tif'
srtm_contour_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA_countours_2km_gt3000m_smooth.shp'
rgi_glac_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA.shp'
#kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_w_watersheds_kaab.csv'
#kaab_csv = pd.read_csv(kaab_dict_fn)
#kaab_dict = dict(zip(kaab_csv.RGIId, kaab_csv.kaab))
# GCMs and RCP scenarios
#gcm_names = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR',
# 'IPSL-CM5A-MR', 'MIROC5', 'MRI-CGCM3', 'NorESM1-M']
gcm_names = ['CanESM2']
#gcm_names = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR',
# 'MPI-ESM-LR', 'NorESM1-M']
rcps = ['rcp26', 'rcp45', 'rcp85']
#rcps = ['rcp26']
# Grouping
grouping = 'all'
#grouping = 'rgi_region'
#grouping = 'watershed'
#grouping = 'kaab'
# Variable name
vn = 'mass_change'
#vn = 'volume_norm'
#vn = 'peakwater'
# Group dictionaries
watershed_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_watershed.csv'
watershed_csv = pd.read_csv(watershed_dict_fn)
watershed_dict = dict(zip(watershed_csv.RGIId, watershed_csv.watershed))
kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_kaab.csv'
kaab_csv = pd.read_csv(kaab_dict_fn)
kaab_dict = dict(zip(kaab_csv.RGIId, kaab_csv.kaab_name))
# GRACE mascons
mascon_fp = input.main_directory + '/../GRACE/GSFC.glb.200301_201607_v02.4/'
mascon_fn = 'mascon.txt'
mascon_cns = ['CenLat', 'CenLon', 'LatWidth', 'LonWidth', 'Area_arcdeg', 'Area_km2', 'location', 'basin',
'elevation_flag']
mascon_df = pd.read_csv(mascon_fp + mascon_fn, header=None, names=mascon_cns, skiprows=14,
delim_whitespace=True)
mascon_df = mascon_df.sort_values(by=['CenLat', 'CenLon'])
mascon_df.reset_index(drop=True, inplace=True)
degree_size = 0.25
peakwater_Nyears = 10
# Plot label dictionaries
title_dict = {'Amu_Darya': 'Amu Darya',
'Brahmaputra': 'Brahmaputra',
'Ganges': 'Ganges',
'Ili': 'Ili',
'Indus': 'Indus',
'Inner_Tibetan_Plateau': 'Inner TP',
'Inner_Tibetan_Plateau_extended': 'Inner TP ext',
'Irrawaddy': 'Irrawaddy',
'Mekong': 'Mekong',
'Salween': 'Salween',
'Syr_Darya': 'Syr Darya',
'Tarim': 'Tarim',
'Yangtze': 'Yangtze',
'inner_TP': 'Inner TP',
'Karakoram': 'Karakoram',
'Yigong': 'Yigong',
'Yellow': 'Yellow',
'Bhutan': 'Bhutan',
'Everest': 'Everest',
'West Nepal': 'West Nepal',
'Spiti Lahaul': 'Spiti Lahaul',
'tien_shan': 'Tien Shan',
'Pamir': 'Pamir',
'pamir_alai': 'Pamir Alai',
'Kunlun': 'Kunlun',
'Hindu Kush': 'Hindu Kush',
13: 'Central Asia',
14: 'South Asia West',
15: 'South Asia East',
'all': 'HMA'
}
title_location = {'Syr_Darya': [68, 46.1],
'Ili': [83.6, 45.5],
'Amu_Darya': [64.6, 36.9],
'Tarim': [83.0, 39.2],
'Inner_Tibetan_Plateau_extended': [100, 40],
'Indus': [70.7, 31.9],
'Inner_Tibetan_Plateau': [85, 32.4],
'Yangtze': [106.0, 29.8],
'Ganges': [81.3, 26.6],
'Brahmaputra': [92.0, 26],
'Irrawaddy': [96.2, 23.8],
'Salween': [98.5, 20.8],
'Mekong': [103.8, 17.5],
'Yellow': [106.0, 36],
13: [83,39],
14: [70.8, 30],
15: [81,26.8],
'inner_TP': [89, 33.5],
'Karakoram': [68.7, 33.5],
'Yigong': [97.5, 26.2],
'Bhutan': [92.1, 26],
'Everest': [85, 26.3],
'West Nepal': [76.5, 28],
'Spiti Lahaul': [72, 31.9],
'tien_shan': [80, 42],
'Pamir': [67.3, 36.5],
'pamir_alai': [65.2, 40.2],
'Kunlun': [79, 37.5],
'Hindu Kush': [65.3, 35]
}
vn_dict = {'volume_glac_annual': 'Normalized Volume [-]',
'volume_norm': 'Normalized Volume Remaining [-]',
'runoff_glac_annual': 'Normalized Runoff [-]',
'peakwater': 'Peak Water [yr]',
'temp_glac_annual': 'Temperature [$^\circ$C]',
'prec_glac_annual': 'Precipitation [m]',
'precfactor': 'Precipitation Factor [-]',
'tempchange': 'Temperature bias [$^\circ$C]',
'ddfsnow': 'DDFsnow [mm w.e. d$^{-1}$ $^\circ$C$^{-1}$]'}
rcp_dict = {'rcp26': '2.6',
'rcp45': '4.5',
'rcp60': '6.0',
'rcp85': '8.5'}
# Colors list
colors_rgb = [(0.00, 0.57, 0.57), (0.71, 0.43, 1.00), (0.86, 0.82, 0.00), (0.00, 0.29, 0.29), (0.00, 0.43, 0.86),
(0.57, 0.29, 0.00), (1.00, 0.43, 0.71), (0.43, 0.71, 1.00), (0.14, 1.00, 0.14), (1.00, 0.71, 0.47),
(0.29, 0.00, 0.57), (0.57, 0.00, 0.00), (0.71, 0.47, 1.00), (1.00, 1.00, 0.47)]
gcm_colordict = dict(zip(gcm_names, colors_rgb[0:len(gcm_names)]))
rcp_colordict = {'rcp26':'b', 'rcp45':'k', 'rcp60':'m', 'rcp85':'r'}
rcp_styledict = {'rcp26':':', 'rcp45':'--', 'rcp85':'-.'}
east = 60
west = 110
south = 15
north = 50
xtick = 5
ytick = 5
xlabel = 'Longitude [$^\circ$]'
ylabel = 'Latitude [$^\circ$]'
#%% FUNCTIONS
def select_groups(grouping, main_glac_rgi_all):
"""
Select groups based on grouping
"""
if grouping == 'rgi_region':
groups = main_glac_rgi_all.O1Region.unique().tolist()
group_cn = 'O1Region'
elif grouping == 'watershed':
groups = main_glac_rgi_all.watershed.unique().tolist()
group_cn = 'watershed'
elif grouping == 'kaab':
groups = main_glac_rgi_all.kaab.unique().tolist()
group_cn = 'kaab'
groups = [x for x in groups if str(x) != 'nan']
elif grouping == 'degree':
groups = main_glac_rgi_all.deg_id.unique().tolist()
group_cn = 'deg_id'
elif grouping == 'mascon':
groups = main_glac_rgi_all.mascon_idx.unique().tolist()
groups = [int(x) for x in groups]
group_cn = 'mascon_idx'
else:
groups = ['all']
group_cn = 'all_group'
try:
groups = sorted(groups, key=str.lower)
except:
groups = sorted(groups)
return groups, group_cn
def partition_multimodel_groups(gcm_names, grouping, vn, main_glac_rgi_all, rcp=None):
"""Partition multimodel data by each group for all GCMs for a given variable
Parameters
----------
gcm_names : list
list of GCM names
grouping : str
name of grouping to use
vn : str
variable name
main_glac_rgi_all : pd.DataFrame
glacier table
rcp : str
rcp name
Output
------
time_values : np.array
time values that accompany the multimodel data
ds_group : list of lists
dataset containing the multimodel data for a given variable for all the GCMs
ds_glac : np.array
dataset containing the variable of interest for each gcm and glacier
"""
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi_all)
# variable name
if vn == 'volume_norm' or vn == 'mass_change':
vn_adj = 'volume_glac_annual'
elif vn == 'peakwater':
vn_adj = 'runoff_glac_annual'
else:
vn_adj = vn
ds_group = [[] for group in groups]
for ngcm, gcm_name in enumerate(gcm_names):
for region in rgi_regions:
# Load datasets
if gcm_name == 'ERA-Interim':
netcdf_fp = netcdf_fp_era
ds_fn = 'R' + str(region) + '_ERA-Interim_c2_ba1_100sets_1980_2017.nc'
else:
netcdf_fp = netcdf_fp_cmip5 + vn_adj + '/'
ds_fn = ('R' + str(region) + '_' + gcm_name + '_' + rcp + '_c2_ba' + str(input.option_bias_adjustment) +
'_100sets_2000_2100--' + vn_adj + '.nc')
# Bypass GCMs that are missing a rcp scenario
try:
ds = xr.open_dataset(netcdf_fp + ds_fn)
except:
continue
# Extract time variable
if 'annual' in vn_adj:
try:
time_values = ds[vn_adj].coords['year_plus1'].values
except:
time_values = ds[vn_adj].coords['year'].values
elif 'monthly' in vn_adj:
time_values = ds[vn_adj].coords['time'].values
# Merge datasets
if region == rgi_regions[0]:
vn_glac_all = ds[vn_adj].values[:,:,0]
vn_glac_std_all = ds[vn_adj].values[:,:,1]
else:
vn_glac_all = np.concatenate((vn_glac_all, ds[vn_adj].values[:,:,0]), axis=0)
vn_glac_std_all = np.concatenate((vn_glac_std_all, ds[vn_adj].values[:,:,1]), axis=0)
try:
ds.close()
except:
continue
if ngcm == 0:
ds_glac = vn_glac_all[np.newaxis,:,:]
else:
ds_glac = np.concatenate((ds_glac, vn_glac_all[np.newaxis,:,:]), axis=0)
# Cycle through groups
for ngroup, group in enumerate(groups):
# Select subset of data
main_glac_rgi = main_glac_rgi_all.loc[main_glac_rgi_all[group_cn] == group]
vn_glac = vn_glac_all[main_glac_rgi.index.values.tolist(),:]
# vn_glac_std = vn_glac_std_all[main_glac_rgi.index.values.tolist(),:]
# vn_glac_var = vn_glac_std **2
# Regional sum
vn_reg = vn_glac.sum(axis=0)
# Record data for multi-model stats
if ngcm == 0:
ds_group[ngroup] = [group, vn_reg]
else:
ds_group[ngroup][1] = np.vstack((ds_group[ngroup][1], vn_reg))
return groups, time_values, ds_group, ds_glac
def partition_era_groups(grouping, vn, main_glac_rgi_all):
"""Partition multimodel data by each group for all GCMs for a given variable
Parameters
----------
grouping : str
name of grouping to use
vn : str
variable name
main_glac_rgi_all : pd.DataFrame
glacier table
Output
------
time_values : np.array
time values that accompany the multimodel data
ds_group : list of lists
dataset containing the multimodel data for a given variable for all the GCMs
ds_glac : np.array
dataset containing the variable of interest for each gcm and glacier
"""
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi_all)
# variable name
if vn == 'volume_norm' or vn == 'mass_change':
vn_adj = 'volume_glac_annual'
elif vn == 'peakwater':
vn_adj = 'runoff_glac_annual'
else:
vn_adj = vn
ds_group = [[] for group in groups]
for region in rgi_regions:
# Load datasets
ds_fn = 'R' + str(region) + '_ERA-Interim_c2_ba1_100sets_1980_2017.nc'
ds = xr.open_dataset(netcdf_fp_era + ds_fn)
# Extract time variable
if 'annual' in vn_adj:
try:
time_values = ds[vn_adj].coords['year_plus1'].values
except:
time_values = ds[vn_adj].coords['year'].values
elif 'monthly' in vn_adj:
time_values = ds[vn_adj].coords['time'].values
# Merge datasets
if region == rgi_regions[0]:
vn_glac_all = ds[vn_adj].values[:,:,0]
vn_glac_std_all = ds[vn_adj].values[:,:,1]
else:
vn_glac_all = np.concatenate((vn_glac_all, ds[vn_adj].values[:,:,0]), axis=0)
vn_glac_std_all = np.concatenate((vn_glac_std_all, ds[vn_adj].values[:,:,1]), axis=0)
# Close dataset
ds.close()
ds_glac = [vn_glac_all, vn_glac_std_all]
# Cycle through groups
for ngroup, group in enumerate(groups):
# Select subset of data
main_glac_rgi = main_glac_rgi_all.loc[main_glac_rgi_all[group_cn] == group]
vn_glac = vn_glac_all[main_glac_rgi.index.values.tolist(),:]
vn_glac_std = vn_glac_std_all[main_glac_rgi.index.values.tolist(),:]
vn_glac_var = vn_glac_std **2
# Regional mean, standard deviation, and variance
# mean: E(X+Y) = E(X) + E(Y)
# var: Var(X+Y) = Var(X) + Var(Y) + 2*Cov(X,Y)
# assuming X and Y are indepdent, then Cov(X,Y)=0, so Var(X+Y) = Var(X) + Var(Y)
# std: std(X+Y) = (Var(X+Y))**0.5
# Regional sum
vn_reg = vn_glac.sum(axis=0)
vn_reg_var = vn_glac_var.sum(axis=0)
# vn_reg_std = vn_glac_var**0.5
# Record data for multi-model stats
ds_group[ngroup] = [group, vn_reg, vn_reg_var]
return groups, time_values, ds_group, ds_glac
def partition_modelparams_groups(grouping, vn, main_glac_rgi_all):
"""Partition model parameters by each group
Parameters
----------
grouping : str
name of grouping to use
vn : str
variable name
main_glac_rgi_all : pd.DataFrame
glacier table
Output
------
groups : list
list of group names
ds_group : list of lists
dataset containing the multimodel data for a given variable for all the GCMs
"""
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi_all)
ds_group = [[] for group in groups]
# Cycle through groups
for ngroup, group in enumerate(groups):
# Select subset of data
main_glac_rgi = main_glac_rgi_all.loc[main_glac_rgi_all[group_cn] == group]
vn_glac = main_glac_rgi_all[vn].values[main_glac_rgi.index.values.tolist()]
# Regional sum
vn_reg = vn_glac.mean(axis=0)
# Record data for each group
ds_group[ngroup] = [group, vn_reg]
return groups, ds_group
def vn_multimodel_mean_processed(vn, ds, idx, time_values, every_glacier=0):
"""
Calculate multi-model mean for a given variable of interest
Parameters
----------
vn : str
variable/parameter name
ds : list
dataset containing groups
group_idx : int
group index
time_values : np.array
array of years
every_glacier : int
switch to work with groups or work with concatenated dataframe
Output
------
"""
# Multi-model mean
if every_glacier == 0:
vn_multimodel_mean = ds[idx][1].mean(axis=0)
else:
vn_multimodel_mean = ds[:,idx,:].mean(axis=0)
# Normalized volume based on initial volume
if vn == 'volume_norm':
if vn_multimodel_mean[0] > 0:
output_multimodel_mean = vn_multimodel_mean / vn_multimodel_mean[0]
else:
output_multimodel_mean = np.zeros(vn_multimodel_mean.shape)
# Peak water based on 10-yr running average
elif vn == 'peakwater':
vn_runningmean = uniform_filter(vn_multimodel_mean, peakwater_Nyears)
output_multimodel_mean = time_values[np.where(vn_runningmean == vn_runningmean.max())[-1][0]]
return output_multimodel_mean
def peakwater(runoff, time_values, nyears):
"""Compute peak water based on the running mean of N years
Parameters
----------
runoff : np.array
one-dimensional array of runoff for each timestep
time_values : np.array
time associated with each timestep
nyears : int
number of years to compute running mean used to smooth peakwater variations
Output
------
peakwater_yr : int
peakwater year
peakwater_chg : float
percent change of peak water compared to first timestep (running means used)
runoff_chg : float
percent change in runoff at the last timestep compared to the first timestep (running means used)
"""
runningmean = uniform_filter(runoff, size=(nyears))
peakwater_idx = np.where(runningmean == runningmean.max())[-1][0]
peakwater_yr = time_values[peakwater_idx]
peakwater_chg = (runningmean[peakwater_idx] - runningmean[0]) / runningmean[0] * 100
runoff_chg = (runningmean[-1] - runningmean[0]) / runningmean[0] * 100
return peakwater_yr, peakwater_chg, runoff_chg
def size_thresholds(variable, cutoffs, sizes):
"""Loop through size thresholds for a given variable to plot
Parameters
----------
variable : np.array
data associated with glacier characteristic
cutoffs : list
values used as minimums for thresholds
(ex. 100 would give you greater than 100)
sizes : list
size values for the plot
Output
------
output : np.array
plot size for each glacier
"""
output = np.zeros(variable.shape)
for i, cutoff in enumerate(cutoffs):
output[(variable>cutoff) & (output==0)] = sizes[i]
output[output==0] = 2
return output
def select_region_climatedata(gcm_name, rcp, main_glac_rgi):
"""
Get the regional temperature and precipitation for a given dataset.
Extracts all nearest neighbor temperature and precipitation data for a given set of glaciers. The mean temperature
and precipitation of the group of glaciers is returned. If two glaciers have the same temp/prec data, that data
is only used once in the mean calculations. Additionally, one would not expect for different GCMs to be similar
because they all have different resolutions, so this mean calculations will have different numbers of pixels.
Parameters
----------
gcm_name : str
GCM name
rcp : str
rcp scenario (ex. rcp26)
main_glac_rgi : pd.DataFrame
glacier dataset used to select the nearest neighbor climate data
"""
# Date tables
print('select_region_climatedata fxn dates supplied manually')
dates_table_ref = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
dates_table = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
# Load gcm lat/lons
gcm = class_climate.GCM(name=gcm_name, rcp_scenario=rcp)
# Select lat/lon from GCM
ds_elev = xr.open_dataset(gcm.fx_fp + gcm.elev_fn)
gcm_lat_values_all = ds_elev.lat.values
gcm_lon_values_all = ds_elev.lon.values
ds_elev.close()
# Lat/lon dictionary to convert
gcm_lat_dict = dict(zip(range(gcm_lat_values_all.shape[0]), list(gcm_lat_values_all)))
gcm_lon_dict = dict(zip(range(gcm_lon_values_all.shape[0]), list(gcm_lon_values_all)))
# Find nearest neighbors for glaciers that have pixles
latlon_nearidx = pd.DataFrame(np.zeros((main_glac_rgi.shape[0],2)), columns=['CenLat','CenLon'])
latlon_nearidx.iloc[:,0] = (np.abs(main_glac_rgi.CenLat.values[:,np.newaxis] - gcm_lat_values_all).argmin(axis=1))
latlon_nearidx.iloc[:,1] = (np.abs(main_glac_rgi.CenLon.values[:,np.newaxis] - gcm_lon_values_all).argmin(axis=1))
latlon_nearidx = latlon_nearidx.drop_duplicates().sort_values(['CenLat', 'CenLon'])
latlon_nearidx.reset_index(drop=True, inplace=True)
latlon_reg = latlon_nearidx.copy()
latlon_reg.CenLat.replace(gcm_lat_dict, inplace=True)
latlon_reg.CenLon.replace(gcm_lon_dict, inplace=True)
# ===== LOAD CLIMATE DATA =====
# Reference climate data
ref_gcm = class_climate.GCM(name=input.ref_gcm_name)
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
ref_temp, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.temp_fn, ref_gcm.temp_vn, latlon_reg,
dates_table_ref)
ref_prec, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.prec_fn, ref_gcm.prec_vn, latlon_reg,
dates_table_ref)
# ref_elev = ref_gcm.importGCMfxnearestneighbor_xarray(ref_gcm.elev_fn, ref_gcm.elev_vn, latlon_reg)
# GCM climate data
gcm_temp_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, latlon_reg, dates_table)
gcm_prec_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, latlon_reg, dates_table)
# gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, latlon_reg)
# GCM subset to agree with reference time period to calculate bias corrections
gcm_subset_idx_start = np.where(dates_table.date.values == dates_table_ref.date.values[0])[0][0]
gcm_subset_idx_end = np.where(dates_table.date.values == dates_table_ref.date.values[-1])[0][0]
gcm_temp = gcm_temp_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
gcm_prec = gcm_prec_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
## ===== BIAS ADJUSTMENTS =====
# OPTION 2: Adjust temp and prec according to Huss and Hock (2015) accounts for means and interannual variability
if input.option_bias_adjustment == 2:
# TEMPERATURE BIAS CORRECTIONS
# Mean monthly temperature
ref_temp_monthly_avg = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_temp_monthly_avg = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
# Monthly bias adjustment
gcm_temp_monthly_adj = ref_temp_monthly_avg - gcm_temp_monthly_avg
# Monthly temperature bias adjusted according to monthly average
t_mt = gcm_temp_all + np.tile(gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Mean monthly temperature bias adjusted according to monthly average
t_m25avg = np.tile(gcm_temp_monthly_avg + gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Calculate monthly standard deviation of temperature
ref_temp_monthly_std = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
gcm_temp_monthly_std = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
variability_monthly_std = ref_temp_monthly_std / gcm_temp_monthly_std
# Bias adjusted temperature accounting for monthly mean and variability
gcm_temp_bias_adj = t_m25avg + (t_mt - t_m25avg) * np.tile(variability_monthly_std, int(gcm_temp_all.shape[1]/12))
# PRECIPITATION BIAS CORRECTIONS
# Calculate monthly mean precipitation
ref_prec_monthly_avg = (ref_prec.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_prec_monthly_avg = (gcm_prec.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
bias_adj_prec = ref_prec_monthly_avg / gcm_prec_monthly_avg
# Bias adjusted precipitation accounting for differences in monthly mean
gcm_prec_bias_adj = gcm_prec_all * np.tile(bias_adj_prec, int(gcm_temp_all.shape[1]/12))
# Regional means
reg_mean_temp_biasadj = gcm_temp_bias_adj.mean(axis=0)
reg_mean_prec_biasadj = gcm_prec_bias_adj.mean(axis=0)
return reg_mean_temp_biasadj, reg_mean_prec_biasadj
#%% LOAD ALL GLACIERS
# Load all glaciers
for rgi_region in rgi_regions:
# Data on all glaciers
main_glac_rgi_region = modelsetup.selectglaciersrgitable(rgi_regionsO1=[rgi_region], rgi_regionsO2 = 'all',
rgi_glac_number='all')
# Glacier hypsometry [km**2]
main_glac_hyps_region = modelsetup.import_Husstable(main_glac_rgi_region, input.hyps_filepath,
input.hyps_filedict, input.hyps_colsdrop)
# Ice thickness [m], average
main_glac_icethickness_region= modelsetup.import_Husstable(main_glac_rgi_region,
input.thickness_filepath, input.thickness_filedict,
input.thickness_colsdrop)
if rgi_region == rgi_regions[0]:
main_glac_rgi_all = main_glac_rgi_region
main_glac_hyps_all = main_glac_hyps_region
main_glac_icethickness_all = main_glac_icethickness_region
else:
main_glac_rgi_all = pd.concat([main_glac_rgi_all, main_glac_rgi_region], sort=False)
main_glac_hyps_all = pd.concat([main_glac_hyps_all, main_glac_hyps_region], sort=False)
main_glac_icethickness_all = | pd.concat([main_glac_icethickness_all, main_glac_icethickness_region], sort=False) | pandas.concat |
import matplotlib.pyplot as plt
import requests
import os
import skimage
import random
import json
import webbrowser
import pandas as pd
import numpy as np
import time
from PIL import Image, ImageDraw
from ast import literal_eval as make_tuple
global population
global api_calls
global stop
global MUTATION_RATE
population = []
lastEvaluatedPopulation = []
api_calls = 0
stop = False
MUTATION_RATE = 10
# initial random generation of an image
def generateImage():
# set image format
img = Image.new('RGB', (64, 64), color='black')
draw = ImageDraw.Draw(img)
# draw four rectangles with random colors
positions = [
((0, 0), (32, 32)),
((32, 0), (64, 32)),
((0, 32), (32, 64)),
((32, 32), (64, 64)),
]
colors = []
for position in positions:
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
draw.rectangle(position, fill=color)
colors.append(color)
return {"image": img, "confidence": 0, "colors": colors, "class": ""}
# eval fitness for each individual
def evalFitness():
global api_calls
global stop
for individual in population:
name = 'toEval.png'
image = individual["image"]
image.save(name)
payload= {'key': 'Engeibei1uok4xaecahChug6eihos0wo'}
r = requests.post('https://phinau.de/trasi', data=payload, files={'image': open(name, 'rb')})
api_calls += 1
try:
individual["confidence"] = r.json()[0]["confidence"]
individual["class"] = r.json()[0]["class"]
except ValueError:
print("Decoding JSON failed -> hit API rate :(")
stop = True
break
# create initial population
def initPopulation(count):
for i in range(count):
population.append(generateImageOneRect())
# select best individuals from population
def selection(bestCount):
population.sort(key=lambda individual: individual["confidence"], reverse=True)
del population[bestCount:]
# crossover between individuals in the population
def crossover():
# cross rectangles, generate new images
for j in range(len(population)-1):
colorsFirst = population[0 + j]["colors"]
colorsSecond = population[1 + j]["colors"]
img = Image.new('RGB', (64, 64), color='black')
draw = ImageDraw.Draw(img)
positions = [
((0, 0), (32, 32)),
((32, 0), (64, 32)),
((0, 32), (32, 64)),
((32, 32), (64, 64)),
]
colors = [colorsFirst[0], colorsFirst[1], colorsSecond[2], colorsSecond[3]]
for i in range(4):
draw.rectangle(positions[i], fill=colors[i])
population.append({"image": img, "confidence": 0, "colors": colors, "class": ""})
# mutate each individual in the population and delete old population
def mutate(confidence):
# mutate colors of random rectangle
population_size = len(population)
for j in range(len(population)):
img = Image.new('RGB', (64, 64), color='black')
draw = ImageDraw.Draw(img)
positions = [
((0, 0), (32, 32)),
((32, 0), (64, 32)),
((0, 32), (32, 64)),
((32, 32), (64, 64)),
]
colors = population[j]["colors"]
if(population[j]["confidence"] < confidence):
# change the color of a random square
rect = random.randint(0, 3)
colors[rect] = (
colors[rect][0] + 1 + random.randint(-10, 10) * MUTATION_RATE,
colors[rect][1] + 1 + random.randint(-10, 10) * MUTATION_RATE,
colors[rect][2] + 1 + random.randint(-10, 10) * MUTATION_RATE)
for i in range(4):
draw.rectangle(positions[i], fill=colors[i])
population.append({"image": img, "confidence": 0, "colors": colors, "class": ""})
# delete old
del population[:population_size]
def printResults():
for individual in population:
print("confidence: ", individual["confidence"], " class: ", individual["class"])
print("..")
def getBestResult():
best = 0
for individual in population:
if(individual["confidence"] > best):
best = individual["confidence"]
return best
# get the count of images that match the confidence
def getCountThatMatch(confidence):
count = 0
for individual in population:
if(individual["confidence"] >= confidence):
count += 1
return count
# init parameters
INITIAL_POPULATION = 5 # EXPERIMENT
SELECTED_COUNT = 5 # specification
DESIRED_CONFIDENCE = 0.20 # specification
# run evolutionary algorithm (init -> selection -> loop(crossover-> mutate -> selection) until confidence matches all images)
def runEvoAlgorithm():
initPopulation(INITIAL_POPULATION)
evalFitness()
selection(SELECTED_COUNT)
printResults()
while getCountThatMatch(DESIRED_CONFIDENCE) < SELECTED_COUNT and stop == False and api_calls < 50:
#crossover()
mutate(DESIRED_CONFIDENCE)
evalFitness()
selection(SELECTED_COUNT)
if (stop == False):
printResults()
# save generated images with desired confidence
def saveImages():
for i in range(len(population)):
if(population[i]["confidence"] > 0.05):
image = population[i]["image"]
name = "img" + \
str(i) + "_" + str(population[i]["confidence"]
) + "_" + str(population[i]["class"]) + ".png"
image.save(name)
webbrowser.open(name)
# make request with 1000 single color images and save results to CSV file
def getApiResultsForSingleColors():
# set image format
color = (0, 0, 0)
img = Image.new('RGB', (64, 64), color=color)
for i in range(10):
for j in range(10):
for k in range(10):
color = (i*25, j*25, k*25)
img = Image.new('RGB', (64, 64), color=color)
individual = {"image": img, "confidence": 0, "color": color, "class": ""}
# eval
name = 'toEval.png'
image = img
image.save(name)
payload= {'key': 'Engeibei1uok4xaecahChug6eihos0wo'}
r = requests.post('https://phinau.de/trasi', data=payload, files={'image': open(name, 'rb')})
time.sleep(1)
try:
individual["confidence"] = r.json()[0]["confidence"]
individual["class"] = r.json()[0]["class"]
population.append(individual)
df = | pd.DataFrame(population, columns=["class", "confidence", "color"]) | pandas.DataFrame |
# Purpose: combine two tabs in a registrar-data file, and then add the selected instructor info (the output file of instructor_info.py)
# To run this script in Anaconda: python registrar_data_modify.py --input_file1=summer2017_registrar.xlsx --input_file2=instructor_selected_info.csv --output_file=modified_registrar_data.xlsx
import argparse
import csv
import pandas
from pandas import DataFrame
#Define the arguments sent to the script
parser = argparse.ArgumentParser(description='Create merged spreadsheet')
parser.add_argument('--input_file1', action='store',dest='input_file1',default='')
parser.add_argument('--input_file2',action='store',dest='input_file2',default='')
parser.add_argument('--output_file', action='store',dest='output_file',default='')
args = parser.parse_args()
#create a function to combine tabs in a registrar file, for example summer2017_registrar.xlsx
def combine_tabs(filename):
if '.xlsx' in filename or '.xls' in filename:
print("Opening file" + filename)
Data = pandas.ExcelFile(filename)
Mastertab = pandas.read_excel(Data,0) # mastertab is the first tab in an excel file
Scoretab = pandas.read_excel(Data,1) # scoretab is the second tav in an excel file
Scoretab_Dict = Scoretab.set_index('ID').T.to_dict()
# for each ID in the ID column in the mastertab, if a ID matches the key in Scoretab_Dict, then add values of the Scoretab_Dict to new columns in the mastertab
for ID in Mastertab['ID']:
if ID in Scoretab_Dict:
#print('match:',ID)
# add values to the a new column is the master tab one by one
Mastertab['A01 - ACT English'] = [Scoretab_Dict[ID]['A01 - ACT English'] for ID in Mastertab['ID']]
Mastertab['A02 - ACT Math'] = [Scoretab_Dict[ID]['A02 - ACT Math'] for ID in Mastertab['ID']]
Mastertab['A03 - ACT Reading'] = [Scoretab_Dict[ID]['A03 - ACT Reading'] for ID in Mastertab['ID']]
Mastertab['A04 - ACT Science Reasoning'] = [Scoretab_Dict[ID]['A04 - ACT Science Reasoning'] for ID in Mastertab['ID']]
Mastertab['A07 - ACT Combined English/Writing'] = [Scoretab_Dict[ID]['A07 - ACT Combined English/Writing'] for ID in Mastertab['ID']]
Mastertab['A05 - ACT Composite'] = [Scoretab_Dict[ID]['A05 - ACT Composite'] for ID in Mastertab['ID']]
Mastertab['S01 - SAT Critical Reading'] = [Scoretab_Dict[ID]['S01 - SAT Critical Reading'] for ID in Mastertab['ID']]
Mastertab['S02 - SAT Mathematics'] = [Scoretab_Dict[ID]['S02 - SAT Mathematics'] for ID in Mastertab['ID']]
Mastertab['S07 - SAT Writing'] = [Scoretab_Dict[ID]['S07 - SAT Writing'] for ID in Mastertab['ID']]
Mastertab['TIBL - TOEFL IBT Listening Score'] = [Scoretab_Dict[ID]['TIBL - TOEFL IBT Listening Score'] for ID in Mastertab['ID']]
Mastertab['TIBR - TOEFL IBT Reading Score'] = [Scoretab_Dict[ID]['TIBR - TOEFL IBT Reading Score'] for ID in Mastertab['ID']]
Mastertab['TIBS - TOEFL IBT Speaking Score'] = [Scoretab_Dict[ID]['TIBS - TOEFL IBT Speaking Score'] for ID in Mastertab['ID']]
Mastertab['TIBW - TOEFL IBT Writing Score'] = [Scoretab_Dict[ID]['TIBW - TOEFL IBT Writing Score'] for ID in Mastertab['ID']]
Mastertab['TIBT - TOEFL IBT Total Score'] = [Scoretab_Dict[ID]['TIBT - TOEFL IBT Total Score'] for ID in Mastertab['ID']]
Mastertab['ILT1 - IELTS Listening'] = [Scoretab_Dict[ID]['ILT1 - IELTS Listening'] for ID in Mastertab['ID']]
Mastertab['ILT2 - IELTS Overall'] = [Scoretab_Dict[ID]['ILT2 - IELTS Overall'] for ID in Mastertab['ID']]
Mastertab['ILT3 - IELTS Reading'] = [Scoretab_Dict[ID]['ILT3 - IELTS Reading'] for ID in Mastertab['ID']]
Mastertab['ILT4 - IELTS Speaking'] = [Scoretab_Dict[ID]['ILT4 - IELTS Speaking'] for ID in Mastertab['ID']]
Mastertab['ILT5 - IELTS Writing'] = [Scoretab_Dict[ID]['ILT5 - IELTS Writing'] for ID in Mastertab['ID']]
# If IDs between the two tabs do not match then print NA
else:
Mastertab['A01 - ACT English'] = 'NA'
Mastertab['A02 - ACT Math'] = 'NA'
Mastertab['A03 - ACT Reading'] = 'NA'
Mastertab['A04 - ACT Science Reasoning'] = 'NA'
Mastertab['A07 - ACT Combined English/Writing'] = 'NA'
Mastertab['A05 - ACT Composite'] = 'NA'
Mastertab['S01 - SAT Critical Reading'] = 'NA'
Mastertab['S02 - SAT Mathematics'] = 'NA'
Mastertab['S07 - SAT Writing'] = 'NA'
Mastertab['TIBL - TOEFL IBT Listening Score'] = 'NA'
Mastertab['TIBR - TOEFL IBT Reading Score'] = 'NA'
Mastertab['TIBS - TOEFL IBT Speaking Score'] = 'NA'
Mastertab['TIBW - TOEFL IBT Writing Score'] = 'NA'
Mastertab['TIBT - TOEFL IBT Total Score'] = 'NA'
Mastertab['ILT1 - IELTS Listening'] = 'NA'
Mastertab['ILT2 - IELTS Overall'] = 'NA'
Mastertab['ILT3 - IELTS Reading'] = 'NA'
Mastertab['ILT4 - IELTS Speaking'] = 'NA'
Mastertab['ILT5 - IELTS Writing'] = 'NA'
return (Mastertab)
# Create a function to build data frame
def build_data_frame (filename):
File = filename
Data = | pandas.read_csv(File) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' ridge regression '''
''' a technique for analysing multiple regression data
that suffer from multi-colinearity
'''
import numpy as np
import pandas as pd
from sklearn import linear_model
import random
import matplotlib.pyplot as plt
''' multi variable linear equation '''
''' t = 12x -0.5y + 9z + 24
'''
constant = 24
error_induce = 0.01
def fx(x):
res = (12.0 * x)
error = 0.0
error = res * random.uniform(-error_induce, error_induce)
return res + error
def fy(y):
res = (-05.0 * y)
error = 0.0
error = res * random.uniform(-error_induce, error_induce)
return res + error
def fz(z):
res = (09.0 * z)
error = 0.0
error = res * random.uniform(-error_induce, error_induce)
return res + error
''' data preparation '''
max_sample_value = 50
total_samples = 2500
train_sample_cnt = int((total_samples * 30.0 / 100.0))
test_sample_cnt = total_samples - train_sample_cnt
random_array = np.random.randint (
max_sample_value,
size=total_samples).astype(float)
x_samples = fx(random_array)
y_samples = fy(random_array)
z_samples = fz(random_array)
t_samples = x_samples + y_samples + z_samples
t_samples = t_samples + constant
''' splitting samples into train data and test data '''
x_samples_train, x_samples_test = np.split(
x_samples, [train_sample_cnt,])
y_samples_train, y_samples_test = np.split(
y_samples, [train_sample_cnt,])
z_samples_train, z_samples_test = np.split(
z_samples, [train_sample_cnt,])
t_samples_train, t_samples_test = np.split(
t_samples, [train_sample_cnt,])
''' combining all variables in column structure '''
xyz_samples_train = {'colx': x_samples_train,
'coly': y_samples_train,
'colz': z_samples_train}
dfxyz_samples_train = pd.DataFrame(data=xyz_samples_train)
dft_samples_train = | pd.DataFrame(data=t_samples_train) | pandas.DataFrame |
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ts_charting.figure as figure
from ts_charting.figure import process_series
class Testprocess_data(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_already_aligned(self):
plot_index = pd.date_range(start="2000", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
plot_series = process_series(series, plot_index)
tm.assert_almost_equal(series, plot_series)
tm.assert_almost_equal(plot_series.index, plot_index)
def test_partial_plot(self):
"""
Test plotting series that is a subset of plot_index.
Should align and fill with nans
"""
plot_index = pd.date_range(start="2000", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
series = series[:50] # only first 50
plot_series = process_series(series, plot_index)
# have same index
tm.assert_almost_equal(plot_series.index, plot_index)
assert plot_series.count() == 50
assert np.all(plot_series[50:].isnull()) # method=None so fill with nan
assert np.all(plot_series[:50] == series[:50])
def test_unaligned_indexes(self):
"""
Test when series.index and plot_index have no common datetimes
"""
plot_index = pd.date_range(start="2000", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
# move days to 11 PM the night before
shift_series = series.tshift(-1, '1h')
plot_series = process_series(shift_series, plot_index)
# without method, data doesn't align and we nothing but nans
tm.assert_almost_equal(plot_series.index, plot_index) # index aligh properly
assert np.all(plot_series.isnull()) # no data
# method = 'ffill'
plot_series = process_series(shift_series, plot_index, method='ffill')
# without method, data doesn't align and we nothing but nans
tm.assert_almost_equal(plot_series.index, plot_index) # index align
# since we're forward filling a series we tshifted into past
# plot_series should just equal the original series
tm.assert_almost_equal(plot_series, series)
def test_different_freqs(self):
"""
Tests indexes of differeing frequencies. This is more of repeat
test of test_partial_plot but with many holes instead of one half missing
value.
"""
plot_index = pd.date_range(start="2000-01-01", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
grouped_series = series.resample('MS', 'max')
plot_series = process_series(grouped_series, plot_index)
tm.assert_almost_equal(plot_series.index, plot_index) # index align
# method=None, dropna should give back same series
tm.assert_almost_equal(plot_series.dropna(), grouped_series)
plot_series = process_series(grouped_series, plot_index, method='ffill')
tm.assert_almost_equal(plot_series.index, plot_index) # index align
assert plot_series.isnull().sum() == 0
month_ind = plot_series.index.month - 1
# assert that each value corresponds to its month in grouped_series
assert np.all(grouped_series[month_ind] == plot_series)
def test_scalar(self):
"""
Test the various ways we handle scalars.
"""
plot_index = pd.date_range(start="2000-01-01", freq="D", periods=100)
plot_series = process_series(5, plot_index)
tm.assert_almost_equal(plot_series.index, plot_index) # index align
assert np.all(plot_series == 5)
# explicitly pass in the series index. Should have a plot_series with only iloc[10:20]
# equal to the scalar 5.
plot_series = process_series(5, plot_index, series_index=plot_index[10:20])
tm.assert_almost_equal(plot_series.index, plot_index) # index align
assert np.all(plot_series[10:20] == 5)
assert plot_series.isnull().sum() == 90
# no plot_index. This still works because we're passing in series_index
plot_series = process_series(5, None, series_index=plot_index[10:20])
correct = pd.Series(5, index=plot_index[10:20])
tm.assert_almost_equal(correct, plot_series)
# without any index, a scalar will error. Cannot plot a scalar on an
# empty plot without passing in an index
try:
plot_series = process_series(5, None)
except:
pass
else:
assert False, "scalar should fail without plot_index or series_index"
def test_iterable(self):
"""
Non pd.Series iterables require an equal length series_index or
plot_index.
"""
try:
plot_series = process_series(range(10), None)
except:
pass
else:
assert False, "iterable should fail without plot_index or series_index"
plot_index = pd.date_range(start="2000-01-01", freq="D", periods=100)
try:
plot_series = process_series(range(10), plot_index)
except:
pass
else:
assert False, "iterable requires an index of same length"
# equal length, good times
plot_series = process_series(range(10), plot_index[:10])
correct = pd.Series(range(10), index=plot_index[:10])
| tm.assert_almost_equal(correct, plot_series) | pandas.util.testing.assert_almost_equal |
"""
Experimental Mapping the maps keys using a switch_case structure.
Development paused while waiting for 3.10's pattern matching that may subsume this.
>>> from i2.switch_case_tree import *
>>> from collections import Counter
>>>
>>> special_featurizer = {
... 'len': len,
... 'cols': lambda df: df.columns,
... 'sum': lambda df: df.sum().sum(),
... }
>>> special_comparison = {
... 'alleq': lambda x, y: all(x == y),
... 'isin': lambda x, y: x in y,
... 'eq': operator.eq,
... }
>>> featurizer = ChainMap(
... special_featurizer,
... special_comparison,
... {'some_local_func': lambda x: list(map(str, x))}
... )
>>>
>>>
>>> comparison = ChainMap(
... special_comparison, AttrMap(operator, is_valid_val=is_valid_comparision),
... )
>>>
>>> assert comparison['contains'] == operator.contains
>>>
Let's have a look at the "featurizers" (where we purposely injected 3 functions that
were in fact not featurizers!
>>> sorted(featurizer) # doctest: +NORMALIZE_WHITESPACE
['alleq', 'cols', 'eq', 'isin', 'len', 'some_local_func', 'sum']
>>> Counter(map(is_valid_featurizer, featurizer.values()))
Counter({True: 4, False: 3})
>>> Counter(map(is_valid_comparision, featurizer.values()))
Counter({False: 4, True: 3})
Let's have a look at the comparison functions...
>>> Counter(map(is_valid_comparision, comparison.values()))
Counter({True: 81})
>>> Counter(map(is_valid_featurizer, comparison.values()))
Counter({False: 80, True: 1})
What's that single comparison function that's also a featurizer?
>>> next(filter(is_valid_featurizer, comparison.values())).__name__
'length_hint'
>>> from contextlib import suppress
>>>
>>> with suppress(ModuleNotFoundError, ImportError):
... import pandas as pd
... from collections import namedtuple
... Condition = namedtuple('Condition', ['feat', 'comp'])
... condition = {
... feat + '_' + comp: Condition(featurizer[feat], comparison[comp])
... for feat, comp in [('len', 'lt'), ('cols', 'isin'), ('cols', 'contains'),]
... }
... assert all(
... is_valid_feat_and_comp(feat, comp) for feat, comp in condition.values()
... )
...
... df = pd.DataFrame({'a': [1, 2, 3], 'b': [10, 20, 30]})
... filt = mk_filt(df, *condition['len_lt'])
... result = list(filter(filt, [2, 3, 4, 5]))
... assert result == [4, 5]
"""
from collections import ChainMap
from collections.abc import Mapping
import operator
import inspect
class AsIsMap:
def __init__(self, is_valid_key=None):
if is_valid_key is None:
def is_valid_key(x):
return True
self._is_valid_key = is_valid_key
def _validate_key(self, k):
if not self._is_valid_key(k):
raise KeyError(f"{k} wasn't a valid key")
def __getitem__(self, k):
self._validate_key(k)
return k
class AttrMap(Mapping):
def __init__(self, obj, is_valid_val=None):
self._obj = obj
if is_valid_val is None:
def is_valid_val(x):
return True
self._is_valid_val = is_valid_val
@classmethod
def _validate_key(cls, k):
if not isinstance(k, str):
raise KeyError('key should be a string')
def _validate_val(self, v):
if not self._is_valid_val(v):
raise ValueError("key was valid and value found, but value wasn't valid")
def _getitem(self, k):
return getattr(self._obj, k)
def _val_of_key_is_valid(self, k):
return self._is_valid_val(self._getitem(k))
def __getitem__(self, k):
self._validate_key(k)
v = self._getitem(k)
self._validate_val(v)
return v
def __contains__(self, k):
self._validate_key(k)
return hasattr(self._obj, k) and self._val_of_key_is_valid(k)
def __iter__(self):
return filter(self._val_of_key_is_valid, dir(self._obj))
def __len__(self):
c = 0
for _ in self.__iter__():
c += 1
return c
def n_args_and_n_args_with_no_defaults(func):
try:
args = inspect.signature(func).parameters.values()
except ValueError: # happens because some builtins don't have signatures (!?!?)
return 0, 0
return len(args), len(list(filter(lambda x: x.default == x.empty, args)))
def is_valid_featurizer(func):
if not callable(func):
return False
n_args, n_no_dflt_args = n_args_and_n_args_with_no_defaults(func)
return (n_args >= 1) and (n_no_dflt_args <= 1)
def is_valid_comparision(func):
if not callable(func):
return False
n_args, n_no_dflt_args = n_args_and_n_args_with_no_defaults(func)
return (n_args >= 2) and (n_no_dflt_args <= 2)
def is_valid_feat_and_comp(feat, comp):
return is_valid_featurizer(feat) and is_valid_comparision(comp)
def values_are_valid_feat_and_comp(d):
return all(is_valid_feat_and_comp(feat, comp) for feat, comp in d.values())
def mk_filt(obj, featurizer, comparison):
feature = featurizer(obj)
def filt(x):
return comparison(feature, x)
return filt
def test_switch_case_tree():
from collections import Counter
special_featurizer = {
'len': len,
'cols': lambda df: df.columns,
'sum': lambda df: df.sum().sum(),
}
some_local_func = lambda x: list(map(str, x))
featurizer = ChainMap(
special_featurizer,
{
k: v
for k, v in locals().items()
if is_valid_featurizer(v) and getattr(v, '__module__', '').startswith('i2.')
},
)
special_comparison = {
'alleq': lambda x, y: all(x == y),
'isin': lambda x, y: x in y,
'eq': operator.eq,
}
comparison = ChainMap(
special_comparison, AttrMap(operator, is_valid_val=is_valid_comparision),
)
assert comparison['contains'] == operator.contains
assert Counter(map(is_valid_featurizer, featurizer.values())) == Counter({True: 3})
assert Counter(map(is_valid_comparision, featurizer.values())) == Counter(
{False: 3}
)
assert Counter(map(is_valid_featurizer, comparison.values())) == Counter(
{False: 80, True: 1}
)
assert Counter(map(is_valid_comparision, comparison.values())) == Counter(
{True: 81}
)
featurizer_kvs = {k: v.__name__ for k, v in featurizer.items()}
assert featurizer_kvs == {'len': 'len', 'cols': '<lambda>', 'sum': '<lambda>'}
from contextlib import suppress
with suppress(ModuleNotFoundError, ImportError):
import pandas as pd
from collections import namedtuple
Condition = namedtuple('Condition', ['feat', 'comp'])
condition = {
feat + '_' + comp: Condition(featurizer[feat], comparison[comp])
for feat, comp in [('len', 'lt'), ('cols', 'isin'), ('cols', 'contains'),]
}
assert all(
is_valid_feat_and_comp(feat, comp) for feat, comp in condition.values()
)
df = | pd.DataFrame({'a': [1, 2, 3], 'b': [10, 20, 30]}) | pandas.DataFrame |
#!/usr/bin/env python3
#Author: <NAME>
#Contact: <EMAIL>
from __future__ import print_function
from . import SigProfilerMatrixGenerator as matGen
import os
import SigProfilerMatrixGenerator as sig
import re
import sys
import pandas as pd
import datetime
from SigProfilerMatrixGenerator.scripts import convert_input_to_simple_files as convertIn
import uuid
import shutil
import time
import numpy as np
import platform
import itertools
import statsmodels
import matplotlib as plt
from pathlib import Path
import sigProfilerPlotting as sigPlt
import scipy
def perm(n, seq):
'''
Generates a list of all available permutations of n-mers.
Parameters:
n -> length of the desired permutation string
seq -> list of all possible string values
Returns:
permus -> list of all available permutations
'''
permus = []
for p in itertools.product(seq, repeat=n):
permus.append("".join(p))
return(permus)
def SigProfilerMatrixGeneratorFunc (project, genome, vcfFiles, exome=False, bed_file=None, chrom_based=False, plot=False, tsb_stat=False, seqInfo=False, cushion=100, gs=False):
'''
Allows for the import of the sigProfilerMatrixGenerator.py function. Returns a dictionary
with each context serving as the first level of keys.
Parameters:
project -> unique name given to the current samples
genome -> reference genome
vcfFiles -> path where the input vcf files are located.
exome -> flag to use only the exome or not
bed_file -> BED file that contains a list of ranges to be used in generating the matrices
chrom_based -> flag to create the matrices on a per chromosome basis
plot -> flag to generate the plots for each context
tsb_stat -> performs a transcriptional strand bias test for the 24, 384, and 6144 contexts. The output is
saved into the output/TSB directory
gs -> flag that performs a gene strand bias test
Returns:
matrices -> dictionary (nested) of the matrices for each context
example:
matrices = {'96': {'PD1001a':{'A[A>C]A':23,
'A[A>G]A':10,...},
'PD1202a':{'A[A>C]A':23,
'A[A>G]A':10,...},...},
'192':{'PD1001a':{'T:A[A>C]A':23,
'T:A[A>G]A':10,...},
'PD1202a':{'T:A[A>C]A':23,
'T:A[A>G]A':10,...},...},...}
'''
# Instantiates all of the required variables and references
if gs:
print("The Gene Strand Bias is not yet supported! Continuing with the matrix generation.")
gs = False
functionFlag = True
bed = False
bed_ranges = None
limited_indel = True
exome = exome
plot = plot
# Instantiates the final output matrix
matrices = {'96':None, '1536':None, '384':None, '6144':None, 'DINUC':None, '6':None, '24':None, 'INDEL':None}
# Provides a chromosome conversion from NCBI notation
ncbi_chrom = {'NC_000067.6':'1', 'NC_000068.7':'2', 'NC_000069.6':'3', 'NC_000070.6':'4',
'NC_000071.6':'5', 'NC_000072.6':'6', 'NC_000073.6':'7', 'NC_000074.6':'8',
'NC_000075.6':'9', 'NC_000076.6':'10', 'NC_000077.6':'11', 'NC_000078.6':'12',
'NC_000079.6':'13', 'NC_000080.6':'14', 'NC_000081.6':'15', 'NC_000082.6':'16',
'NC_000083.6':'17', 'NC_000084.6':'18', 'NC_000085.6':'19', 'NC_000086.7':'X',
'NC_000087.7':'Y'}
# Provides the reference file conversion from binary to base information
tsb_ref = {0:['N','A'], 1:['N','C'], 2:['N','G'], 3:['N','T'],
4:['T','A'], 5:['T','C'], 6:['T','G'], 7:['T','T'],
8:['U','A'], 9:['U','C'], 10:['U','G'], 11:['U','T'],
12:['B','A'], 13:['B','C'], 14:['B','G'], 15:['B','T'],
16:['N','N'], 17:['T','N'], 18:['U','N'], 19:['B','N']}
bias_sort = {'T':0,'U':1,'N':3,'B':2, 'Q':4}
tsb = ['T','U','N','B']
tsb_I = ['T','U','N','B','Q']
bases = ['A','C','G','T']
mutation_types = ['CC>AA','CC>AG','CC>AT','CC>GA','CC>GG','CC>GT','CC>TA','CC>TG','CC>TT',
'CT>AA','CT>AC','CT>AG','CT>GA','CT>GC','CT>GG','CT>TA','CT>TC','CT>TG',
'TC>AA','TC>AG','TC>AT','TC>CA','TC>CG','TC>CT','TC>GA','TC>GG','TC>GT',
'TT>AA','TT>AC','TT>AG','TT>CA','TT>CC','TT>CG','TT>GA','TT>GC','TT>GG']
mutation_types_non_tsb = ['AC>CA','AC>CG','AC>CT','AC>GA','AC>GG','AC>GT','AC>TA','AC>TG','AC>TT',
'AT>CA','AT>CC','AT>CG','AT>GA','AT>GC','AT>TA',
'CG>AT','CG>GC','CG>GT','CG>TA','CG>TC','CG>TT',
'GC>AA','GC>AG','GC>AT','GC>CA','GC>CG','GC>TA',
'TA>AT','TA>CG','TA>CT','TA>GC','TA>GG','TA>GT',
'TG>AA','TG>AC','TG>AT','TG>CA','TG>CC','TG>CT','TG>GA','TG>GC','TG>GT']
indels_seq_types = [ # Single-sequences
'C', 'T',
# Di-sequences
'AC','AT','CA','CC','CG','CT','GC','TA','TC','TT',
# Tri-sequences
'ACC', 'ACT', 'ATC', 'ATT', 'CAC', 'CAT', 'CCA', 'CCC', 'CCG', 'CCT', 'CGC', 'CGT', 'CTA', 'CTC', 'CTG', 'CTT',
'GCC', 'GCT', 'GTC', 'GTT', 'TAC', 'TAT', 'TCA', 'TCC', 'TCG', 'TCT', 'TGC', 'TGT', 'TTA', 'TTC', 'TTG', 'TTT',
# Tetra-sequences
'AACC', 'AACT', 'AATC', 'AATT', 'ACAC', 'ACAT', 'ACCA', 'ACCC', 'ACCG', 'ACCT', 'ACGC', 'ACGT', 'ACTA', 'ACTC', 'ACTG', 'ACTT', 'AGCC', 'AGCT', 'AGTC',
'AGTT', 'ATAC', 'ATAT', 'ATCA', 'ATCC', 'ATCG', 'ATCT', 'ATGC', 'ATGT', 'ATTA', 'ATTC', 'ATTG', 'ATTT', 'CAAC', 'CAAT', 'CACA', 'CACC', 'CACG', 'CACT',
'CAGC', 'CAGT', 'CATA', 'CATC', 'CATG', 'CATT', 'CCAA', 'CCAC', 'CCAG', 'CCAT', 'CCCA', 'CCCC', 'CCCG', 'CCCT', 'CCGA', 'CCGC', 'CCGG', 'CCGT', 'CCTA',
'CCTC', 'CCTG', 'CCTT', 'CGAC', 'CGAT', 'CGCA', 'CGCC', 'CGCG', 'CGCT', 'CGGC', 'CGTA', 'CGTC', 'CGTG', 'CGTT', 'CTAA', 'CTAC', 'CTAG', 'CTAT', 'CTCA',
'CTCC', 'CTCG', 'CTCT', 'CTGA', 'CTGC', 'CTGG', 'CTGT', 'CTTA', 'CTTC', 'CTTG', 'CTTT', 'GACC', 'GATC', 'GCAC', 'GCCA', 'GCCC', 'GCCG', 'GCCT', 'GCGC',
'GCTA', 'GCTC', 'GCTG', 'GCTT', 'GGCC', 'GGTC', 'GTAC', 'GTCA', 'GTCC', 'GTCG', 'GTCT', 'GTGC', 'GTTA', 'GTTC', 'GTTG', 'GTTT', 'TAAC', 'TACA', 'TACC',
'TACG', 'TACT', 'TAGC', 'TATA', 'TATC', 'TATG', 'TATT', 'TCAA', 'TCAC', 'TCAG', 'TCAT', 'TCCA', 'TCCC', 'TCCG', 'TCCT', 'TCGA', 'TCGC', 'TCGG', 'TCGT',
'TCTA', 'TCTC', 'TCTG', 'TCTT', 'TGAC', 'TGCA', 'TGCC', 'TGCG', 'TGCT', 'TGTA', 'TGTC', 'TGTG', 'TGTT', 'TTAA', 'TTAC', 'TTAG', 'TTAT', 'TTCA', 'TTCC',
'TTCG', 'TTCT', 'TTGA', 'TTGC', 'TTGG', 'TTGT', 'TTTA', 'TTTC', 'TTTG', 'TTTT',
# Penta-sequences
'AACCC', 'AACCT', 'AACTC', 'AACTT', 'AATCC', 'AATCT', 'AATTC', 'AATTT', 'ACACC', 'ACACT', 'ACATC', 'ACATT', 'ACCAC', 'ACCAT', 'ACCCA', 'ACCCC', 'ACCCG',
'ACCCT', 'ACCGC', 'ACCGT', 'ACCTA', 'ACCTC', 'ACCTG', 'ACCTT', 'ACGCC', 'ACGCT', 'ACGTC', 'ACGTT', 'ACTAC', 'ACTAT', 'ACTCA', 'ACTCC', 'ACTCG', 'ACTCT',
'ACTGC', 'ACTGT', 'ACTTA', 'ACTTC', 'ACTTG', 'ACTTT', 'AGCCC', 'AGCCT', 'AGCTC', 'AGCTT', 'AGTCC', 'AGTCT', 'AGTTC', 'AGTTT', 'ATACC', 'ATACT', 'ATATC',
'ATATT', 'ATCAC', 'ATCAT', 'ATCCA', 'ATCCC', 'ATCCG', 'ATCCT', 'ATCGC', 'ATCGT', 'ATCTA', 'ATCTC', 'ATCTG', 'ATCTT', 'ATGCC', 'ATGCT', 'ATGTC', 'ATGTT',
'ATTAC', 'ATTAT', 'ATTCA', 'ATTCC', 'ATTCG', 'ATTCT', 'ATTGC', 'ATTGT', 'ATTTA', 'ATTTC', 'ATTTG', 'ATTTT', 'CAACC', 'CAACT', 'CAATC', 'CAATT', 'CACAC',
'CACAT', 'CACCA', 'CACCC', 'CACCG', 'CACCT', 'CACGC', 'CACGT', 'CACTA', 'CACTC', 'CACTG', 'CACTT', 'CAGCC', 'CAGCT', 'CAGTC', 'CAGTT', 'CATAC', 'CATAT',
'CATCA', 'CATCC', 'CATCG', 'CATCT', 'CATGC', 'CATGT', 'CATTA', 'CATTC', 'CATTG', 'CATTT', 'CCAAC', 'CCAAT', 'CCACA', 'CCACC', 'CCACG', 'CCACT', 'CCAGC',
'CCAGT', 'CCATA', 'CCATC', 'CCATG', 'CCATT', 'CCCAA', 'CCCAC', 'CCCAG', 'CCCAT', 'CCCCA', 'CCCCC', 'CCCCG', 'CCCCT', 'CCCGA', 'CCCGC', 'CCCGG', 'CCCGT',
'CCCTA', 'CCCTC', 'CCCTG', 'CCCTT', 'CCGAC', 'CCGAT', 'CCGCA', 'CCGCC', 'CCGCG', 'CCGCT', 'CCGGC', 'CCGGT', 'CCGTA', 'CCGTC', 'CCGTG', 'CCGTT', 'CCTAA',
'CCTAC', 'CCTAG', 'CCTAT', 'CCTCA', 'CCTCC', 'CCTCG', 'CCTCT', 'CCTGA', 'CCTGC', 'CCTGG', 'CCTGT', 'CCTTA', 'CCTTC', 'CCTTG', 'CCTTT', 'CGACC', 'CGACT',
'CGATC', 'CGATT', 'CGCAC', 'CGCAT', 'CGCCA', 'CGCCC', 'CGCCG', 'CGCCT', 'CGCGC', 'CGCGT', 'CGCTA', 'CGCTC', 'CGCTG', 'CGCTT', 'CGGCC', 'CGGCT', 'CGGTC',
'CGGTT', 'CGTAC', 'CGTAT', 'CGTCA', 'CGTCC', 'CGTCG', 'CGTCT', 'CGTGC', 'CGTGT', 'CGTTA', 'CGTTC', 'CGTTG', 'CGTTT', 'CTAAC', 'CTAAT', 'CTACA', 'CTACC',
'CTACG', 'CTACT', 'CTAGC', 'CTAGT', 'CTATA', 'CTATC', 'CTATG', 'CTATT', 'CTCAA', 'CTCAC', 'CTCAG', 'CTCAT', 'CTCCA', 'CTCCC', 'CTCCG', 'CTCCT', 'CTCGA',
'CTCGC', 'CTCGG', 'CTCGT', 'CTCTA', 'CTCTC', 'CTCTG', 'CTCTT', 'CTGAC', 'CTGAT', 'CTGCA', 'CTGCC', 'CTGCG', 'CTGCT', 'CTGGC', 'CTGGT', 'CTGTA', 'CTGTC',
'CTGTG', 'CTGTT', 'CTTAA', 'CTTAC', 'CTTAG', 'CTTAT', 'CTTCA', 'CTTCC', 'CTTCG', 'CTTCT', 'CTTGA', 'CTTGC', 'CTTGG', 'CTTGT', 'CTTTA', 'CTTTC', 'CTTTG',
'CTTTT', 'GACCC', 'GACCT', 'GACTC', 'GACTT', 'GATCC', 'GATCT', 'GATTC', 'GATTT', 'GCACC', 'GCACT', 'GCATC', 'GCATT', 'GCCAC', 'GCCAT', 'GCCCA', 'GCCCC',
'GCCCG', 'GCCCT', 'GCCGC', 'GCCGT', 'GCCTA', 'GCCTC', 'GCCTG', 'GCCTT', 'GCGCC', 'GCGCT', 'GCGTC', 'GCGTT', 'GCTAC', 'GCTAT', 'GCTCA', 'GCTCC', 'GCTCG',
'GCTCT', 'GCTGC', 'GCTGT', 'GCTTA', 'GCTTC', 'GCTTG', 'GCTTT', 'GGCCC', 'GGCCT', 'GGCTC', 'GGCTT', 'GGTCC', 'GGTCT', 'GGTTC', 'GGTTT', 'GTACC', 'GTACT',
'GTATC', 'GTATT', 'GTCAC', 'GTCAT', 'GTCCA', 'GTCCC', 'GTCCG', 'GTCCT', 'GTCGC', 'GTCGT', 'GTCTA', 'GTCTC', 'GTCTG', 'GTCTT', 'GTGCC', 'GTGCT', 'GTGTC',
'GTGTT', 'GTTAC', 'GTTAT', 'GTTCA', 'GTTCC', 'GTTCG', 'GTTCT', 'GTTGC', 'GTTGT', 'GTTTA', 'GTTTC', 'GTTTG', 'GTTTT', 'TAACC', 'TAACT', 'TAATC', 'TAATT',
'TACAC', 'TACAT', 'TACCA', 'TACCC', 'TACCG', 'TACCT', 'TACGC', 'TACGT', 'TACTA', 'TACTC', 'TACTG', 'TACTT', 'TAGCC', 'TAGCT', 'TAGTC', 'TAGTT', 'TATAC',
'TATAT', 'TATCA', 'TATCC', 'TATCG', 'TATCT', 'TATGC', 'TATGT', 'TATTA', 'TATTC', 'TATTG', 'TATTT', 'TCAAC', 'TCAAT', 'TCACA', 'TCACC', 'TCACG', 'TCACT',
'TCAGC', 'TCAGT', 'TCATA', 'TCATC', 'TCATG', 'TCATT', 'TCCAA', 'TCCAC', 'TCCAG', 'TCCAT', 'TCCCA', 'TCCCC', 'TCCCG', 'TCCCT', 'TCCGA', 'TCCGC', 'TCCGG',
'TCCGT', 'TCCTA', 'TCCTC', 'TCCTG', 'TCCTT', 'TCGAC', 'TCGAT', 'TCGCA', 'TCGCC', 'TCGCG', 'TCGCT', 'TCGGC', 'TCGGT', 'TCGTA', 'TCGTC', 'TCGTG', 'TCGTT',
'TCTAA', 'TCTAC', 'TCTAG', 'TCTAT', 'TCTCA', 'TCTCC', 'TCTCG', 'TCTCT', 'TCTGA', 'TCTGC', 'TCTGG', 'TCTGT', 'TCTTA', 'TCTTC', 'TCTTG', 'TCTTT', 'TGACC',
'TGACT', 'TGATC', 'TGATT', 'TGCAC', 'TGCAT', 'TGCCA', 'TGCCC', 'TGCCG', 'TGCCT', 'TGCGC', 'TGCGT', 'TGCTA', 'TGCTC', 'TGCTG', 'TGCTT', 'TGGCC', 'TGGCT',
'TGGTC', 'TGGTT', 'TGTAC', 'TGTAT', 'TGTCA', 'TGTCC', 'TGTCG', 'TGTCT', 'TGTGC', 'TGTGT', 'TGTTA', 'TGTTC', 'TGTTG', 'TGTTT', 'TTAAC', 'TTAAT', 'TTACA',
'TTACC', 'TTACG', 'TTACT', 'TTAGC', 'TTAGT', 'TTATA', 'TTATC', 'TTATG', 'TTATT', 'TTCAA', 'TTCAC', 'TTCAG', 'TTCAT', 'TTCCA', 'TTCCC', 'TTCCG', 'TTCCT',
'TTCGA', 'TTCGC', 'TTCGG', 'TTCGT', 'TTCTA', 'TTCTC', 'TTCTG', 'TTCTT', 'TTGAC', 'TTGAT', 'TTGCA', 'TTGCC', 'TTGCG', 'TTGCT', 'TTGGC', 'TTGGT', 'TTGTA',
'TTGTC', 'TTGTG', 'TTGTT', 'TTTAA', 'TTTAC', 'TTTAG', 'TTTAT', 'TTTCA', 'TTTCC', 'TTTCG', 'TTTCT', 'TTTGA', 'TTTGC', 'TTTGG', 'TTTGT', 'TTTTA', 'TTTTC',
'TTTTG', 'TTTTT']
# Pre-fills the mutation types variable
size = 5
mut_types_initial = perm(size, "ACGT")
mut_types = []
for tsbs in tsb:
for mut in mut_types_initial:
current_base = mut[int(size/2)]
if current_base == 'C' or current_base == 'T':
for base in bases:
if base != current_base:
mut_types.append(tsbs+":"+mut[0:int(size/2)] + "[" + current_base+">"+ base+"]"+mut[int(size/2)+1:])
# Organizes all of the mutation types for DINUCs
mutation_types_tsb_context = []
for base in bases:
for mut in mutation_types:
for base2 in bases:
for base3 in tsb:
mutation_types_tsb_context.append(''.join([base3,":",base,"[",mut,"]",base2]))
for base in bases:
for mut in mutation_types_non_tsb:
for base2 in bases:
mutation_types_tsb_context.append(''.join(['Q:', base, "[", mut, "]", base2]))
indel_types_tsb = []
indel_types_simple = []
indel_complete = []
indel_cat = ['Del', 'Ins']
indel_types = ['1:Del:C:0', '1:Del:C:1', '1:Del:C:2', '1:Del:C:3', '1:Del:C:4', '1:Del:C:5',
'1:Del:T:0', '1:Del:T:1', '1:Del:T:2', '1:Del:T:3', '1:Del:T:4', '1:Del:T:5',
'1:Ins:C:0', '1:Ins:C:1', '1:Ins:C:2', '1:Ins:C:3', '1:Ins:C:4', '1:Ins:C:5',
'1:Ins:T:0', '1:Ins:T:1', '1:Ins:T:2', '1:Ins:T:3', '1:Ins:T:4', '1:Ins:T:5',
# >1bp INDELS
'2:Del:R:0', '2:Del:R:1', '2:Del:R:2', '2:Del:R:3', '2:Del:R:4', '2:Del:R:5',
'3:Del:R:0', '3:Del:R:1', '3:Del:R:2', '3:Del:R:3', '3:Del:R:4', '3:Del:R:5',
'4:Del:R:0', '4:Del:R:1', '4:Del:R:2', '4:Del:R:3', '4:Del:R:4', '4:Del:R:5',
'5:Del:R:0', '5:Del:R:1', '5:Del:R:2', '5:Del:R:3', '5:Del:R:4', '5:Del:R:5',
'2:Ins:R:0', '2:Ins:R:1', '2:Ins:R:2', '2:Ins:R:3', '2:Ins:R:4', '2:Ins:R:5',
'3:Ins:R:0', '3:Ins:R:1', '3:Ins:R:2', '3:Ins:R:3', '3:Ins:R:4', '3:Ins:R:5',
'4:Ins:R:0', '4:Ins:R:1', '4:Ins:R:2', '4:Ins:R:3', '4:Ins:R:4', '4:Ins:R:5',
'5:Ins:R:0', '5:Ins:R:1', '5:Ins:R:2', '5:Ins:R:3', '5:Ins:R:4', '5:Ins:R:5',
#MicroHomology INDELS
'2:Del:M:1', '3:Del:M:1', '3:Del:M:2', '4:Del:M:1', '4:Del:M:2', '4:Del:M:3',
'5:Del:M:1', '5:Del:M:2', '5:Del:M:3', '5:Del:M:4', '5:Del:M:5', '2:Ins:M:1',
'3:Ins:M:1', '3:Ins:M:2', '4:Ins:M:1', '4:Ins:M:2', '4:Ins:M:3', '5:Ins:M:1',
'5:Ins:M:2', '5:Ins:M:3', '5:Ins:M:4', '5:Ins:M:5', 'complex', 'non_matching']
for indels in indel_types[:-13]:
for tsbs in tsb_I:
indel_types_tsb.append(tsbs + ":" + indels)
for indels in indels_seq_types:
repeat = str(len(indels))
for id_cat in indel_cat:
for l in range(0, 6, 1):
indel_complete.append(":".join([repeat, id_cat, indels, str(l)]))
for id_cat in indel_cat:
for i in range(0, 6, 1):
indel_complete.append(":".join(['5',id_cat, '5',str(i)]))
indel_types_simple = indel_types[:24]
indel_types_simple.append('long_Del')
indel_types_simple.append('long_Ins')
indel_types_simple.append('MH')
indel_types_simple.append('complex')
# Instantiates the initial contexts to generate matrices for
contexts = ['6144']
# Organizes all of the reference directories for later reference:
ref_dir, tail = os.path.split(os.path.dirname(os.path.abspath(__file__)))
chrom_path =ref_dir + '/references/chromosomes/tsb/' + genome + "/"
transcript_path = ref_dir + '/references/chromosomes/transcripts/' + genome + "/"
# Terminates the code if the genome reference files have not been created/installed
if not os.path.exists(chrom_path):
print("The specified genome: " + genome + " has not been installed\nRun the following command to install the genome:\n\tpython sigProfilerMatrixGenerator/install.py -g " + genome)
sys.exit()
# Organizes all of the input and output directories:
if vcfFiles[-1] != "/":
vcfFiles += "/"
vcf_path = vcfFiles + "input/"
vcf_path_original = vcf_path
if not os.path.exists(vcf_path) or len(os.listdir(vcf_path)) < 1:
os.makedirs(vcf_path, exist_ok=True)
input_files = os.listdir(vcfFiles)
if os.path.exists(vcfFiles + "input/"):
input_files.remove("input")
if os.path.exists(vcfFiles + "logs/"):
input_files.remove("logs")
if ".DS_Store" in input_files:
input_files.remove(".DS_Store")
if "__init__.py" in input_files:
input_files.remove("__init__.py")
if "__pycache__" in input_files:
input_files.remove("__pycache__")
if os.path.exists(vcfFiles + "output/"):
input_files.remove("output")
for files in input_files:
shutil.copy(vcfFiles + files, vcf_path + files)
output_matrix = vcfFiles + "output/"
if not os.path.exists(output_matrix):
os.makedirs(output_matrix)
# Organizes the error and log files
time_stamp = datetime.date.today()
output_log_path = vcfFiles + "logs/"
if not os.path.exists(output_log_path):
os.makedirs(output_log_path)
error_file = output_log_path + 'SigProfilerMatrixGenerator_' + project + "_" + genome + str(time_stamp) + ".err"
log_file = output_log_path + 'SigProfilerMatrixGenerator_' + project + "_" + genome + str(time_stamp) + ".out"
if os.path.exists(error_file):
os.remove(error_file)
if os.path.exists(log_file):
os.remove(log_file)
sys.stderr = open(error_file, 'w')
log_out = open(log_file, 'w')
log_out.write("THIS FILE CONTAINS THE METADATA ABOUT SYSTEM AND RUNTIME\n\n\n")
log_out.write("-------System Info-------\n")
log_out.write("Operating System Name: "+ platform.uname()[0]+"\n"+"Nodename: "+ platform.uname()[1]+"\n"+"Release: "+ platform.uname()[2]+"\n"+"Version: "+ platform.uname()[3]+"\n")
log_out.write("\n-------Python and Package Versions------- \n")
log_out.write("Python Version: "+str(platform.sys.version_info.major)+"."+str(platform.sys.version_info.minor)+"."+str(platform.sys.version_info.micro)+"\n")
log_out.write("SigProfilerMatrixGenerator Version: "+sig.__version__+"\n")
log_out.write("SigProfilerPlotting version: "+sigPlt.__version__+"\n")
log_out.write("matplotlib version: "+plt.__version__+"\n")
log_out.write("statsmodels version: "+statsmodels.__version__+"\n")
log_out.write("scipy version: "+scipy.__version__+"\n")
log_out.write("pandas version: "+pd.__version__+"\n")
log_out.write("numpy version: "+np.__version__+"\n")
log_out.write("\n-------Vital Parameters Used for the execution -------\n")
log_out.write("Project: {}\nGenome: {}\nInput File Path: {}\nexome: {}\nbed_file: {}\nchrom_based: {}\nplot: {}\ntsb_stat: {}\nseqInfo: {}\n".format(project, genome, vcfFiles, str(exome), str(bed_file), str(chrom_based), str(plot), str(tsb_stat), str(seqInfo)))
log_out.write("\n-------Date and Time Data------- \n")
tic = datetime.datetime.now()
log_out.write("Date and Clock time when the execution started: "+str(tic)+"\n\n\n")
log_out.write("-------Runtime Checkpoints------- \n")
log_out.close()
# Gathers all of the vcf files:
vcf_files_temp = os.listdir(vcf_path)
vcf_files = []
first_extenstion = True
for file in vcf_files_temp:
# Skips hidden files
if file[0:3] == '.DS' or file[0:2] == '__':
pass
else:
vcf_files.append(file)
# Creates a temporary folder for sorting and generating the matrices
file_name = vcf_files[0].split(".")
file_extension = file_name[-1]
unique_folder = project + "_"+ str(uuid.uuid4())
output_path = output_matrix + "temp/" + unique_folder + "/"
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.makedirs(output_path)
skipped_muts = 0
# Converts the input files to standard text in the temporary folder
if file_extension == 'genome':
snv, indel, skipped, samples = convertIn.convertTxt(project, vcf_path, genome, output_path)
else:
if file_extension == 'txt':
snv, indel, skipped, samples = convertIn.convertTxt(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
elif file_extension == 'vcf':
snv, indel, skipped, samples = convertIn.convertVCF(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
elif file_extension == 'maf':
snv, indel, skipped, samples = convertIn.convertMAF(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
elif file_extension == 'tsv':
snv, indel, skipped, samples = convertIn.convertICGC(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
else:
print("File format not supported")
skipped_muts += skipped
# Instantiates variables for final output statistics
analyzed_muts = [0, 0, 0]
sample_count_high = 0
# Begins matrix generation for all possible contexts
for i in range(0, 2, 1):
if i == 0 and snv:
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
mutation_dinuc_pd_all = pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples)
output_path_snv = output_path + "SNV/"
vcf_files = os.listdir(output_path_snv)
vcf_path = output_path_snv
print("Starting matrix generation for SNVs and DINUCs...", end='', flush=True)
start = time.time()
# Skips SNVs if none are present
elif i == 0 and not snv:
continue
elif i == 1 and indel:
mutation_ID = {}
mutation_ID['ID'] = pd.DataFrame(0, index=indel_types, columns=samples)
mutation_ID['simple'] = pd.DataFrame(0, index=indel_types_simple, columns=samples)
mutation_ID['tsb'] = pd.DataFrame(0, index=indel_types_tsb, columns=samples)
mutation_ID['complete'] = pd.DataFrame(0, index=indel_complete, columns=samples)
contexts = ['INDEL']
output_path_indel = output_path + "INDEL/"
vcf_files = os.listdir(output_path_indel)
vcf_path = output_path_indel
print("Starting matrix generation for INDELs...", end='', flush=True)
start = time.time()
# Skips INDELs if none are present and deletes the temp folder
elif i ==1 and not indel:
shutil.rmtree(output_matrix + "temp/")
continue
# Removes hidden files generated in macos
if ".DS_Store" in vcf_files:
vcf_files.remove(".DS_Store")
# Generates the bed regions if a bed file was provided
if bed_file != None:
bed = True
bed_file_path = bed_file
bed_ranges = matGen.BED_filtering(bed_file_path)
else:
bed_file_path = None
# Sorts files based on chromosome, sample, and start position
if not chrom_based:
chrom_start = None
if i != 1:
for file in vcf_files:
chrom = file.split("_")[0]
with open(vcf_path + file) as f:
lines = [line.strip().split() for line in f]
lines = sorted(lines, key = lambda x: (x[0], int(x[2])))
context = '6144'
mutation_pd, skipped_mut, total, total_DINUC, mutation_dinuc_pd_all = matGen.catalogue_generator_single (lines, chrom, mutation_pd, mutation_dinuc_pd_all, mutation_types_tsb_context, vcf_path, vcf_path_original, vcf_files, bed_file_path, chrom_path, project, output_matrix, context, exome, genome, ncbi_chrom, functionFlag, bed, bed_ranges, chrom_based, plot, tsb_ref, transcript_path, tsb_stat, seqInfo, gs, log_file)
if chrom_based and not exome and not bed:
matrices = matGen.matrix_generator (context, output_matrix, project, samples, bias_sort, mutation_pd, exome, mut_types, bed, chrom, functionFlag, plot, tsb_stat)
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
dinuc_mat = matGen.matrix_generator_DINUC (output_matrix, samples, bias_sort, mutation_dinuc_pd_all, mutation_types_tsb_context, project, exome, bed, chrom, plot)
mutation_dinuc_pd_all = | pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples) | pandas.DataFrame |
import numpy as np
from pandas import DataFrame
from sklearn.cluster import DBSCAN
from GoMapClustering.AngularClustering import DBACAN
from GoMapClustering.base import GoMapClusterMixin
class AngleMetricDBSCAN(GoMapClusterMixin):
def __init__(
self,
max_distance: float,
max_angle: float,
min_samples: int
) -> None:
self.__dbscan = DBSCAN(eps=max_distance, min_samples=min_samples)
self.__dbacan = DBACAN(eps=max_angle, min_samples=min_samples)
def fit_predict(self, X, y=None):
'''
# Parameters
- X (List[Tuple[float, float, float]]): Tuples of the format (x, y, angle)
- y (None): Not used, only for compatability reasons
'''
X = | DataFrame(X, columns=['x', 'y', 'angle']) | pandas.DataFrame |
import os
import tqdm
import pandas as pd
def load_records(path, results_fname='results', depth=1):
assert results_fname in ['results', 'meters']
records = []
def add_record(results_path):
try:
df = | pd.read_pickle(results_path) | pandas.read_pickle |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from matplotlib.ticker import ScalarFormatter
import matplotlib.dates as mdates
inf_df = pd.read_excel('SNB_data.xlsx', sheet_name="Inf", header=0)
fig = plt.subplot()
plt.plot(inf_df['Date'], inf_df['Real Inflation'])
plt.plot(inf_df['Date'], inf_df.iloc[:, 2:], ':', alpha=0.7)
plt.ylabel('Inflation')
# set the ticks so they don't overlap
fig.set_xticks(np.arange(0, len(inf_df['Date'])+1, 10))
plt.xticks(rotation=30)
#fig.set_yticks(np.arange(0, max(inf_df['Real Inflation']), 3))
for i1, col in enumerate(inf_df.iloc[:, 2:].columns):
col1 = col
if (col == '20015-Q4'):
col1 = '2015-Q4'
i = inf_df.index[inf_df['Date'] == col1].tolist()[0]
y_pos = inf_df.iloc[:, 2:].notna()[::-1].idxmax()
df2 = inf_df.iloc[:, 2:].copy(deep=False)
y = df2.iloc[y_pos[i1], i1]
i = min(i, 64-i1)
plt.annotate(col, xy=(i + 12, y), size=4)
plt.savefig('output/inflation_forecasts.png', dpi=300)
##### Figure 2: FX vs M0
df2 = pd.read_excel('yolan.xlsx', sheet_name="FXvsM0", header=0)
df2['Date'] = pd.to_datetime(df2['Date'])
fig1, ax = plt.subplots(1,1)
df2.plot(x="Date", y=['FX', 'M0'], ax=ax)
labels=["Foreign Currency Reserve", "Monetary Base"]
plt.legend(labels)
plt.yscale('log')
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.ticklabel_format(style='plain', axis='y')
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.ylabel('mil. CHF')
#plt.legend()
plt.savefig('output/fx.png', dpi=300)
## Figure 3: FX
df3 = pd.read_excel('yolan.xlsx', sheet_name="FX", header=0)
fig2, ax1 = plt.subplots(1,1)
df3['Date'] = | pd.to_datetime(df3['Date'], format='%Y') | pandas.to_datetime |
from sklearn.covariance import EmpiricalCovariance, LedoitWolf, OAS
from sklearn.model_selection import GroupShuffleSplit
from scipy.spatial.distance import mahalanobis
import numpy as np
import pandas as pd
from multiprocessing import Pool
from contextlib import closing
from functools import partial
def similarity(X, y, group=None, n_splits = 1000, class_order=None,
return_raw=False, return_distance=False, normalize=False,
split_size=None, distance='mahalanobis',
cov_estimator='oas', cov_method='shared_split', n_jobs=1):
"""
Calculates similarity between points of each class.
Parameters
----------
X, y: arrays of same shape[0]
Respectively the features and labels.
group : array of shape equal to y, optional
Half of groups will make each split.
If None, y is used instead.
n_splits : int, default 100
How many times to repeat the separatioon and calculation of distances.
class_order : list, optional
Class ordering. If None, np.unique(y) is used
split_size : int, optional
size of each set on each split.
if None, half of the groups are used in each (floor rounded)
distance : {'mahalanobis', 'euclidean'}
How to measure distance between split means.
cov_estimator : {'oas', 'lw', 'ml'}
Which method will decide regularization strength
Ignored if distance is 'euclidean'
cov_method : {'shared_split','shared_single', 'class_single', 'class_split'}
shared_single - only one covariance for whole dataset
shared_split - one covariance, recalculated in each split
class_single - one covariance per class
class_split - one covariance per class per split
Ignored if distance is 'euclidean'
"""
assert cov_method in ['shared_split','shared_single', 'class_split', 'class_single']
assert y.shape[1]==1
y = y.ravel()
classes = np.unique(y) if class_order is None else class_order
groups = classes if group is None else np.unique(group)
split_size = len(groups)//2 if split_size is None else split_size
# sh = GroupShuffleSplit(n_splits, split_size, split_size)
if distance is 'mahalanobis':
clf = MahalanobisClassifier(classes=classes, estimator=cov_estimator,
shared_cov= ('shared' in cov_method),
assume_centered=False)
if 'split' not in cov_method:
clf.fit_cov(X, y)
elif distance is 'euclidean':
clf = EuclideanClassifier()
raise NotImplementedError
else:
raise NotImplementedError
with closing(Pool(n_jobs)) as p:
func = partial(one_split, clf=clf, split_size=split_size,
X=X, y=y, group=group, classes=classes,
cov_method=cov_method)
res = p.map(func, np.arange(n_splits))
results = pd.concat(res)
if normalize:
results[classes] = results[classes]/results[classes].values.max()
if return_distance:
pass
else:
results[classes] = 1/(1+results[classes])
if return_raw:
return results
else:
results = results.reset_index().groupby('Real Time').mean().drop(['split','cv'],axis=1)
if 'class' in cov_method:
results = (results+results.T)/2
return results
def one_split(cv_i, clf, split_size, X, y, group, classes, cov_method):
sh = GroupShuffleSplit(1, split_size, split_size,random_state=cv_i)
(idx_1, idx_2) = next(sh.split(X, y, group))
X_1, X_2, y_1, y_2 = X[idx_1], X[idx_2], y[idx_1], y[idx_2]
mean_1 = [X_1[(y_1==yi).ravel()].mean(axis=0) for yi in classes]
mean_2 = [X_2[(y_2==yi).ravel()].mean(axis=0) for yi in classes]
if 'split' in cov_method:
clf.fit_cov((X_1, X_2), (y_1, y_2), is_tuple=True)
dists_1 = clf.fit(X_1, y_1).transform(mean_2)
dists_2 = clf.fit(X_2, y_2).transform(mean_1)
local_res = pd.DataFrame(np.vstack((dists_1,dists_2)),
index=pd.Index(np.hstack((classes, classes)),
name='Real Time'),
columns= | pd.Index(classes, name='Target time') | pandas.Index |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = | Categorical([], categories=[5, 4, 3, 2, 1], ordered=True) | pandas.Categorical |
"""Utils for the command line tool."""
# Standard library
import datetime as dt
import logging
import os
import pickle
import sys
from pathlib import Path
# Third-party
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
# from ipdb import set_trace
def count_to_log_level(count: int) -> int:
"""Map occurrence of the command line option verbose to the log level."""
if count == 0:
return logging.ERROR
elif count == 1:
return logging.WARNING
elif count == 2:
return logging.INFO
else:
return logging.DEBUG
def extract_tqc(grib_file, out_dir, date_str, lt):
"""Extract tqc from model file using fieldextra.
Args:
grib_file (str): Grib file
out_dir (str): Output directory
date_str (str): date
lt (int): leadtime
"""
logging.debug(f"Apply fxfilter to: {grib_file}.")
# new filename
new_name = Path(out_dir, f"tqc_{date_str}_{lt:03}.grb2")
logging.info(f"Creating: {str(new_name)}.")
# check whether filtered file already exists
if new_name.is_file():
logging.info(f" ...exists already!")
return
# apply fxfilter
cmd = f"fxfilter -o {new_name} -s TQC {grib_file}"
logging.debug(f"Will run: {cmd}")
os.system(cmd)
return
def retrieve_cosmo_files(start, end, interval, out_dir, max_lt):
"""Retrieve COSMO files.
Args:
start (datetime): start
end (datetime): end
interval (int): interval between simulations
out_dir (str): output directory for tqc-files
max_lt (int): maximum leadtime
"""
cosmo_dir = f"/store/s83/osm/COSMO-1E/" # FCST{start.strftime('%y')}"
logging.info(f"Retrieving COSMO-files from {cosmo_dir}")
# create output directory
Path(out_dir).mkdir(parents=True, exist_ok=True)
# list of ini-dates of simulations
dates = pd.date_range(start, end, freq=f"{interval}H")
# loop over simulations
for date in dates:
# string of date for directories
date_str = date.strftime("%y%m%d%H")
# collect grib files
for lt in range(0, max_lt + 1, 1):
model_file = list(
Path(cosmo_dir, f"FCST{date.strftime('%y')}").glob(
f"{date_str}_???/grib/c1effsurf{lt:03}_000"
)
)
if len(model_file) == 0:
logging.warning(f"No file found for {date_str}: +{lt}h.")
elif len(model_file) > 1:
print(f"Model file description ambiguous.")
sys.exit(1)
else:
# apply fxfilter
extract_tqc(model_file[0], out_dir, date_str, lt)
def get_fls_fractions(in_dir):
"""Retrieve dataframe containing FLS fractions.
Args:
in_dir (str): input directory
"""
pass
def get_ml_mask(lats, lons):
"""Retrieve mask of Swiss Plateau (Mittelland).
Args:
lats (array): latitudes
lons (array): longitudes
Returns:
mask (array with True and False)
"""
# polygon points
ll_corner = (46.12, 5.89)
p1 = (46.06, 6.10)
p2 = (46.33, 6.78)
p3 = (46.55, 7.01)
p4 = (46.64, 7.31)
p5 = (46.65, 7.65)
p6 = (46.62, 7.79)
p7 = (46.81, 8.33)
p8 = (47.09, 9.78)
p9 = (47.82, 10.02)
p10 = (47.81, 8.34)
p11 = (47.38, 7.87)
p12 = (47.29, 7.68)
p13 = (47.25, 7.45)
p14 = (47.13, 7.06)
p15 = (47.07, 6.87)
p16 = (46.73, 6.36)
p17 = (46.59, 6.30)
p18 = (46.18, 5.86)
# create polygon
Path = mpath.Path
path_data = [
(Path.MOVETO, ll_corner),
(Path.LINETO, p1),
(Path.LINETO, p2),
(Path.LINETO, p3),
(Path.LINETO, p4),
(Path.LINETO, p5),
(Path.LINETO, p6),
(Path.LINETO, p7),
(Path.LINETO, p8),
(Path.LINETO, p9),
(Path.LINETO, p10),
(Path.LINETO, p11),
(Path.LINETO, p12),
(Path.LINETO, p13),
(Path.LINETO, p14),
(Path.LINETO, p15),
(Path.LINETO, p16),
(Path.LINETO, p17),
(Path.LINETO, p18),
(Path.CLOSEPOLY, ll_corner),
]
codes, verts = zip(*path_data)
path = mpath.Path(verts, codes)
# store original shape
shape = lats.shape
# path.contains_points checks whether points are within polygon
# however, this function can only handle vectors
# -> ravel and unravel
latlon = [[lat, lon] for lat, lon in zip(lats.ravel(), lons.ravel())]
mask = np.reshape(path.contains_points(latlon), shape)
return mask
def save_as_pickle(obj, path):
"""Save object as pickled object.
Args:
obj (python object): usually dataframe
path (str): full path
"""
# create parent if not existing yet
print(path.parents[0])
path.parents[0].mkdir(parents=True, exist_ok=True)
# dump object
pickle.dump(obj, open(path, "wb"))
logging.info(f"Saved {path}")
return
def calc_fls_fractions(
start,
end,
interval,
in_dir_obs,
in_dir_model,
out_dir_fls,
max_lt,
extend_previous,
threshold,
):
"""Calculate FLS fractions in Swiss Plateau for OBS and FCST.
Args:
start (datetime): start
end (datetime): end
interval (int): interval between simulations
in_dir_obs (str): dir with sat data
in_dir_model (str): dir with model data
out_dir_fls (str): dir with fls fractions
max_lt (int): maximum leadtime
extend_previous (bool): load previous obs and fcst dataframes
threshold (float): threshold for low stratus confidence level
Returns:
obs (dataframe)
fcst (dataframe)
"""
# determine init and valid timestamps
ini_times = pd.date_range(start=start, end=end, freq=f"{interval}H")
valid_times = pd.date_range(
start=start, end=end + dt.timedelta(hours=max_lt), freq="1H"
)
# retrieve OBS dataframe
obs_path = Path(out_dir_fls, "obs.p")
if obs_path.is_file() and extend_previous:
obs = pickle.load(open(obs_path, "rb"))
logging.info("Loaded obs from pickled object.")
else:
# create dataframe
obs = | pd.DataFrame(columns=["fls_frac", "high_clouds"], index=valid_times) | pandas.DataFrame |
from html.parser import HTMLParser
from bs4 import BeautifulSoup
import glob, os
import pandas as pd
import string
import nltk
import re
from nltk.corpus import stopwords
from nltk.stem.snowball import RussianStemmer
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
def parse(dir):
files = []
for file in glob.glob(f'{dir}/messages*'):
files.append(file)
fid = 0
messages = []
for file in files:
fid += 1
print('parsing', fid, len(files))
doc = BeautifulSoup(open(file), 'html.parser')
doc_messages = doc.find_all('div', ['message default clearfix', 'message default clearfix joined'])
messages.extend(doc_messages)
data = {}
id = 0
for raw_message in messages:
id += 1
if id % 100 == 0:
print('processing', id, len(messages))
author = raw_message.find('div', class_='initials')
author_name = raw_message.find('div', class_='from_name')
if author is not None:
last_author = author
last_author_name = author_name
message = raw_message.find('div', class_='text')
date = raw_message.find('div', class_='pull_right date details')
if message is not None:
author_data = last_author.text.strip()
author_name_data = last_author_name.text.strip()
timestamp_data = pd.to_datetime(date['title'], dayfirst=True)
text_data = message.text.strip()
data[id] = (author_data, author_name_data, timestamp_data, text_data)
df = pd.DataFrame.from_dict(data, orient='index', columns=['author_initials', 'author_name', 'timestamp', 'text'])
df.to_csv('crab_data.csv', encoding='utf-8')
def plot_general_activity():
df = | pd.read_csv('crab_data.csv') | pandas.read_csv |
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import pandas as pd
import os
import h5py
from shapely.geometry import point,polygon
import icepyx as ipd
from datetime import date
from dateutil.relativedelta import relativedelta
from mpl_toolkits.axes_grid1 import make_axes_locatable
from playsound import playsound
def get_data(bbox,date_range,path) :
try:
os.mkdir(path+'/'+date_range[0]+'--'+date_range[1])
except:
None
path = path+'/'+date_range[0]+'--'+date_range[1]
#creating the icepyx object
region = ipd.Query('ATL06',bbox,date_range)
print(region.avail_granules())
region.granules.avail
#logging into earthdata
earthdata_uid = input("Enter your Earthdata username:")
email = input("Enter your Eathdata email:")
region.earthdata_login(earthdata_uid,email)
#creating a default variable list
region.order_vars.append(defaults=True)
#print(region.order_vars.wanted,sep='/n')
region.order_vars.remove(all=True)
#modifying the default variable list
#print(region.order_vars.wanted)
region.order_vars.append(var_list=['latitude'])
region.order_vars.append(var_list=['longitude'])
region.order_vars.append(var_list=['h_li'])
region.order_vars.append(var_list=['x_atc'])
region.order_vars.append(var_list=['atl06_quality_summary'])
print("The requested data is:")
print(region.order_vars.wanted)
region.subsetparams(Coverage=region.order_vars.wanted)
region.reqparams['page_size']=int(input("Enter desired number of granules per order:"))
#ordering data
email=input("Do you want an email containing information of your order requests(y/n)")
email=True if email=='y' else False
region.order_granules(email=email)
#downloading data
region.download_granules(path)
def data_to_csv(path_in):
group = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r']
try:
os.mkdir(path_in+'/CSV')
except:
None
path_out = path_in+'/CSV'
a=os.listdir(path_in)
try:
a.remove('.ipynb_checkpoints')
except:
None
for g in group:
beam = pd.DataFrame()
beam['lat']=[]
beam['lon']=[]
beam['h_li']=[]
beam['x_atc']=[]
beam['q_flag']=[]
for fname in a:
df = | pd.DataFrame() | pandas.DataFrame |
#Download 10 years historical data of stocks from yahoo finance wrapper by 'https://github.com/ranaroussi/yfinance'
#Process historical data for "adj close" prices and find percentage change
#Headline news data from 'https://www.kaggle.com/miguelaenlle/massive-stock-news-analysis-db-for-nlpbacktests?select=analyst_ratings_processed.csv'
#Map price percentage change to date and stock for headline news
#Imports
import concurrent.futures
import pandas as pd
import numpy as np
import yfinance as yf #Yahoo Finance wrapper
from tqdm.auto import tqdm
#Load headline data
df_hl = | pd.read_csv('analyst_ratings_processed.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = | DataFrame([['foo', 'bar'], ['spam', 'eggs']]) | pandas.DataFrame |
"""
calculates particle geometric properties
"""
import cocpit.config as config
import cocpit.pic as pic # isort: split
import multiprocessing
import time
from functools import partial
import numpy as np
import pandas as pd
def get_attributes(filename, open_dir):
image = pic.Image(open_dir, filename)
# image.resize_stretch(desired_size)
image.find_contours()
if len(image.contours) != 0:
image.calculate_largest_contour()
image.calculate_area()
if image.area != 0.0:
image.calculate_perim()
image.calculate_hull_area()
image.morph_contours()
# image.mask_background()
count_edge_px = np.count_nonzero(image.edges())
std = np.std(np.nonzero(image.edges())) if count_edge_px > 0 else 0
lapl = image.laplacian()
contours = len(image.contours)
edges = count_edge_px
contrast = image.contrast()
cnt_area = image.area
solidity = image.solidity()
complexity = image.complexity()
equiv_d = image.equiv_d()
convex_perim = image.convex_perim(True)
hull_area = image.hull_area
perim = image.perim
phi = image.phi()
circularity = image.circularity()
perim_area_ratio = image.perim_area_ratio()
roundness = image.roundness()
filled_circular_area_ratio = image.filled_circular_area_ratio()
extreme_points = image.extreme_points()
else:
lapl = -999
contours = -999
edges = -999
contrast = -999
cnt_area = -999
solidity = -999
complexity = -999
equiv_d = -999
convex_perim = -999
hull_area = -999
perim = -999
phi = -999
circularity = -999
perim_area_ratio = -999
roundness = -999
filled_circular_area_ratio = -999
extreme_points = -999
std = -999
keys = [
"blur",
"contours",
"edges",
"std",
"cnt_area",
"contrast",
"circularity",
"solidity",
"complexity",
"equiv_d",
"convex_perim",
"hull_area",
"perim",
"phi",
"extreme_points",
"filled_circular_area_ratio",
"roundness",
"perim_area_ratio",
]
values = [
lapl,
contours,
edges,
std,
cnt_area,
contrast,
circularity,
solidity,
complexity,
equiv_d,
convex_perim,
hull_area,
perim,
phi,
extreme_points,
filled_circular_area_ratio,
roundness,
perim_area_ratio,
]
properties = {key: val for key, val in zip(keys, values)}
# turn dictionary into dataframe
properties = pd.DataFrame(properties, index=[0])
return properties
def main(df, open_dir):
"""
reads in dataframe for a campaign after ice classification and
calculates particle geometric properties using the cocpit.pic module
returns
-------
df (pd.DataFrame): dataframe with image attributes appended
"""
files = df['filename']
start = time.time()
with multiprocessing.Pool(config.NUM_CPUS) as p:
properties = p.map(partial(get_attributes, open_dir=open_dir), files)
p.close()
# properties = Parallel(n_jobs=num_cpus)(
# delayed(get_attributes)(open_dir, filename) for filename in files
# )
# append new properties dictionary to existing dataframe
properties = pd.concat(properties, ignore_index=True)
df = | pd.concat([df, properties], axis=1) | pandas.concat |
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from evalml.automl.automl_search import AutoMLSearch
from evalml.automl.engine import EngineBase
from evalml.objectives import F1, LogLossBinary
from evalml.preprocessing import split_data
@patch('evalml.pipelines.BinaryClassificationPipeline.score')
@patch('evalml.pipelines.BinaryClassificationPipeline.fit')
def test_train_and_score_pipelines(mock_fit, mock_score, dummy_binary_pipeline_class, X_y_binary):
X, y = X_y_binary
mock_score.return_value = {'Log Loss Binary': 0.42}
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', max_time=1, max_batches=1,
allowed_pipelines=[dummy_binary_pipeline_class])
pipeline = dummy_binary_pipeline_class({})
evaluation_result = EngineBase.train_and_score_pipeline(pipeline, automl, automl.X_train, automl.y_train)
assert mock_fit.call_count == automl.data_splitter.get_n_splits()
assert mock_score.call_count == automl.data_splitter.get_n_splits()
assert evaluation_result.get('training_time') is not None
assert evaluation_result.get('cv_score_mean') == 0.42
pd.testing.assert_series_equal(evaluation_result.get('cv_scores'), pd.Series([0.42] * 3))
for i in range(automl.data_splitter.get_n_splits()):
assert evaluation_result['cv_data'][i]['all_objective_scores']['Log Loss Binary'] == 0.42
@patch('evalml.pipelines.BinaryClassificationPipeline.score')
@patch('evalml.pipelines.BinaryClassificationPipeline.fit')
def test_train_and_score_pipelines_error(mock_fit, mock_score, dummy_binary_pipeline_class, X_y_binary, caplog):
X, y = X_y_binary
mock_score.side_effect = Exception('yeet')
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', max_time=1, max_batches=1,
allowed_pipelines=[dummy_binary_pipeline_class])
pipeline = dummy_binary_pipeline_class({})
evaluation_result = EngineBase.train_and_score_pipeline(pipeline, automl, automl.X_train, automl.y_train)
assert mock_fit.call_count == automl.data_splitter.get_n_splits()
assert mock_score.call_count == automl.data_splitter.get_n_splits()
assert evaluation_result.get('training_time') is not None
assert np.isnan(evaluation_result.get('cv_score_mean'))
pd.testing.assert_series_equal(evaluation_result.get('cv_scores'), | pd.Series([np.nan] * 3) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from datetime import datetime
from pandas.util import testing as tm
from pandas import DataFrame, MultiIndex, compat, Series, bdate_range, Index
def test_apply_issues():
# GH 5788
s = """2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:00,1.40750
2011.05.16,03:00,1.40649
2011.05.17,02:00,1.40893
2011.05.17,03:00,1.40760
2011.05.17,04:00,1.40750
2011.05.17,05:00,1.40649
2011.05.18,02:00,1.40893
2011.05.18,03:00,1.40760
2011.05.18,04:00,1.40750
2011.05.18,05:00,1.40649"""
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'],
parse_dates=[['date', 'time']])
df = df.set_index('date_time')
expected = df.groupby(df.index.date).idxmax()
result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
tm.assert_frame_equal(result, expected)
# GH 5789
# don't auto coerce dates
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'])
exp_idx = pd.Index(
['2011.05.16', '2011.05.17', '2011.05.18'
], dtype=object, name='date')
expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
result = df.groupby('date').apply(
lambda x: x['time'][x['value'].idxmax()])
tm.assert_series_equal(result, expected)
def test_apply_trivial():
# GH 20066
# trivial apply: ignore input and return a constant dataframe.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df.iloc[1:], df.iloc[1:]],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df.iloc[1:])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH#20066; function passed into apply "
"returns a DataFrame with the same index "
"as the one to create GroupBy object.",
strict=True)
def test_apply_trivial_fail():
# GH 20066
# trivial apply fails if the constant dataframe has the same index
# with the one used to create GroupBy object.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df, df],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df)
tm.assert_frame_equal(result, expected)
def test_fast_apply():
# make sure that fast apply is correctly called
# rather than raising any kind of error
# otherwise the python path will be callsed
# which slows things down
N = 1000
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
def f(g):
return 1
g = df.groupby(['key', 'key2'])
grouper = g.grouper
splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
group_keys = grouper._get_group_keys()
values, mutated = splitter.fast_apply(f, group_keys)
assert not mutated
def test_apply_with_mixed_dtype():
# GH3480, apply with mixed dtype on axis=1 breaks in 0.11
df = DataFrame({'foo1': np.random.randn(6),
'foo2': ['one', 'two', 'two', 'three', 'one', 'two']})
result = df.apply(lambda x: x, axis=1)
tm.assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
# GH 3610 incorrect dtype conversion with as_index=False
df = DataFrame({"c1": [1, 2, 6, 6, 8]})
df["c2"] = df.c1 / 2.0
result1 = df.groupby("c2").mean().reset_index().c2
result2 = df.groupby("c2", as_index=False).mean().c2
tm.assert_series_equal(result1, result2)
def test_groupby_as_index_apply(df):
# GH #4648 and #3417
df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
'user_id': [1, 2, 1, 1, 3, 1],
'time': range(6)})
g_as = df.groupby('user_id', as_index=True)
g_not_as = df.groupby('user_id', as_index=False)
res_as = g_as.head(2).index
res_not_as = g_not_as.head(2).index
exp = Index([0, 1, 2, 4])
tm.assert_index_equal(res_as, exp)
tm.assert_index_equal(res_not_as, exp)
res_as_apply = g_as.apply(lambda x: x.head(2)).index
res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
# apply doesn't maintain the original ordering
# changed in GH5610 as the as_index=False returns a MI here
exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (
2, 4)])
tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
tm.assert_index_equal(res_as_apply, exp_as_apply)
tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)
ind = Index(list('abcde'))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
res = df.groupby(0, as_index=False).apply(lambda x: x).index
tm.assert_index_equal(res, ind)
def test_apply_concat_preserve_names(three_group):
grouped = three_group.groupby(['A', 'B'])
def desc(group):
result = group.describe()
result.index.name = 'stat'
return result
def desc2(group):
result = group.describe()
result.index.name = 'stat'
result = result[:len(group)]
# weirdo
return result
def desc3(group):
result = group.describe()
# names are different
result.index.name = 'stat_%d' % len(group)
result = result[:len(group)]
# weirdo
return result
result = grouped.apply(desc)
assert result.index.names == ('A', 'B', 'stat')
result2 = grouped.apply(desc2)
assert result2.index.names == ('A', 'B', 'stat')
result3 = grouped.apply(desc3)
assert result3.index.names == ('A', 'B', None)
def test_apply_series_to_frame():
def f(piece):
with np.errstate(invalid='ignore'):
logged = np.log(piece)
return DataFrame({'value': piece,
'demeaned': piece - piece.mean(),
'logged': logged})
dr = bdate_range('1/1/2000', periods=100)
ts = Series(np.random.randn(100), index=dr)
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(f)
assert isinstance(result, DataFrame)
tm.assert_index_equal(result.index, ts.index)
def test_apply_series_yield_constant(df):
result = df.groupby(['A', 'B'])['C'].apply(len)
assert result.index.names[:2] == ('A', 'B')
def test_apply_frame_yield_constant(df):
# GH13568
result = df.groupby(['A', 'B']).apply(len)
assert isinstance(result, Series)
assert result.name is None
result = df.groupby(['A', 'B'])[['C', 'D']].apply(len)
assert isinstance(result, Series)
assert result.name is None
def test_apply_frame_to_series(df):
grouped = df.groupby(['A', 'B'])
result = grouped.apply(len)
expected = grouped.count()['C']
tm.assert_index_equal(result.index, expected.index)
tm.assert_numpy_array_equal(result.values, expected.values)
def test_apply_frame_concat_series():
def trans(group):
return group.groupby('B')['C'].sum().sort_values()[:2]
def trans2(group):
grouped = group.groupby(df.reindex(group.index)['B'])
return grouped.sum().sort_values()[:2]
df = DataFrame({'A': np.random.randint(0, 5, 1000),
'B': np.random.randint(0, 5, 1000),
'C': np.random.randn(1000)})
result = df.groupby('A').apply(trans)
exp = df.groupby('A')['C'].apply(trans2)
tm.assert_series_equal(result, exp, check_names=False)
assert result.name == 'C'
def test_apply_transform(ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
tm.assert_series_equal(result, expected)
def test_apply_multikey_corner(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
def f(group):
return group.sort_values('A')[-5:]
result = grouped.apply(f)
for key, group in grouped:
tm.assert_frame_equal(result.loc[key], f(group))
def test_apply_chunk_view():
# Low level tinkering could be unsafe, make sure not
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': compat.lrange(9)})
# return view
f = lambda x: x[:2]
result = df.groupby('key', group_keys=False).apply(f)
expected = df.take([0, 1, 3, 4, 6, 7])
tm.assert_frame_equal(result, expected)
def test_apply_no_name_column_conflict():
df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
'value': | compat.lrange(10) | pandas.compat.lrange |
import copy
import datetime as dt
import pickle
import typing
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# import shap
import statsmodels.graphics.tsaplots as tpl
import tqdm
from bokeh.io import export_png, export_svgs
from bokeh.models import DatetimeTickFormatter, Range1d
from bokeh.plotting import figure
from statsmodels.tsa.tsatools import detrend
from xgboost import XGBRegressor
from mise import data
from mise.constants import SEOUL_STATIONS, SEOULTZ
HOURLY_DATA_PATH = "/input/python/input_seoul_imputed_hourly_pandas.csv"
DAILY_DATA_PATH = "/input/python/input_seoul_imputed_daily_pandas.csv"
def dl_xgboost(station_name="종로구"):
"""Run XGBoost model
data.load_imputed([1], filepath=HOURLY_DATA_PATH)
data.load_imputed([1], filepath=HOURLY_DATA_PATH)
Args:
station_name (str, optional): station name. Defaults to "종로구".
Returns:
None
"""
print("Start Multivariate XGBoost", flush=True)
_df_h = data.load_imputed([1], filepath=HOURLY_DATA_PATH)
df_h = _df_h.query('stationCode == "' + str(SEOUL_STATIONS[station_name]) + '"')
if (
station_name == "종로구"
and not Path("/input/python/input_jongno_imputed_hourly_pandas.csv").is_file()
):
# load imputed result
df_h.to_csv("/input/python/input_jongno_imputed_hourly_pandas.csv")
print("Data loading complete", flush=True)
targets = ["PM10", "PM25"]
features = [
"SO2",
"CO",
"O3",
"NO2",
"PM10",
"PM25",
"temp",
"wind_spd",
"wind_cdir",
"wind_sdir",
"pres",
"humid",
"prep",
]
features_periodic = [
"SO2",
"CO",
"O3",
"NO2",
"PM10",
"PM25",
"temp",
"wind_spd",
"wind_cdir",
"wind_sdir",
"pres",
"humid",
]
features_nonperiodic = ["prep"]
# use one step input
sample_size = 1
output_size = 24
train_fdate = dt.datetime(2008, 1, 3, 0).astimezone(SEOULTZ)
train_tdate = dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ)
test_fdate = dt.datetime(2019, 1, 1, 0).astimezone(SEOULTZ)
test_tdate = dt.datetime(2020, 10, 31, 23).astimezone(SEOULTZ)
# consective dates between train and test
assert train_tdate + dt.timedelta(hours=1) == test_fdate
# check date range assumption
assert test_tdate > train_fdate
assert test_fdate > train_tdate
for target in targets:
train_set = data.MultivariateMeanSeasonalityDataset(
station_name=station_name,
target=target,
filepath=HOURLY_DATA_PATH,
features=features,
features_1=features_nonperiodic,
features_2=features_periodic,
fdate=train_fdate,
tdate=train_tdate,
sample_size=sample_size,
output_size=output_size,
train_valid_ratio=0.8,
)
train_set.preprocess()
# set fdate=test_fdate,
test_set = data.MultivariateMeanSeasonalityDataset(
station_name=station_name,
target=target,
filepath=HOURLY_DATA_PATH,
features=features,
features_1=features_nonperiodic,
features_2=features_periodic,
fdate=test_fdate,
tdate=test_tdate,
sample_size=sample_size,
output_size=output_size,
scaler_X=train_set.scaler_X,
scaler_Y=train_set.scaler_Y,
)
test_set.transform()
df_test_org = test_set.ys_raw.loc[test_fdate:test_tdate, :].copy()
# for lag in range(23, 24):
input_lag = 0
output_dir = Path("/mnt/data/XGBoost/" + station_name + "/" + target + "/")
png_dir = output_dir / Path("png/")
svg_dir = output_dir / Path("svg/")
data_dir = output_dir / Path("csv/")
Path.mkdir(data_dir, parents=True, exist_ok=True)
Path.mkdir(png_dir, parents=True, exist_ok=True)
Path.mkdir(svg_dir, parents=True, exist_ok=True)
# prepare dataset
print("Dataset conversion start..", flush=True)
X_train, Y_train, _ = dataset2svinput(train_set, lag=input_lag)
X_test, Y_test, _ = dataset2svinput(test_set, lag=input_lag)
print("Dataset conversion complete..", flush=True)
print("XGBoost " + target + "...", flush=True)
df_obs = mw_df(df_test_org, output_size, input_lag, test_fdate, test_tdate)
dates = df_obs.index
# prediction
df_sim = sim_xgboost(
X_train.copy(),
Y_train,
X_test.copy(),
Y_test,
dates,
copy.deepcopy(features),
target,
output_size,
test_set.scaler_Y,
data_dir,
png_dir,
svg_dir,
)
assert df_obs.shape == df_sim.shape
# join df
plot_xgboost(
df_sim,
df_obs,
target,
data_dir,
png_dir,
svg_dir,
test_fdate,
test_tdate,
output_size,
)
# save to csv
csv_fname = "df_test_obs.csv"
df_obs.to_csv(data_dir / csv_fname)
csv_fname = "df_test_sim.csv"
df_sim.to_csv(data_dir / csv_fname)
def dataset2svinput(dataset, lag=0):
"""Iterate dataset then separate it to X and Y
X: single-step input
Y: lagged multi-step output
if lag == 0, X + 1 hour => 1st item of Y
Args:
dataset (Dataset): Dataset
lag (int, optional): output horizon. Defaults to 0.
Returns:
[type]: [description]
"""
# single step
_Xset = [dataset[i][0] for i in range(len(dataset)) if i + lag < len(dataset)]
# lagged multi step
_Yset = [dataset[i + lag][2] for i in range(len(dataset)) if i + lag < len(dataset)]
# index of single step -> 1 step
x_dates = [
dataset.xs.index[i] for i in range(len(dataset)) if i + lag < len(dataset)
]
# starting index of multi step -> 1 step
# dataset[i + lag][3] : total dates of prediction result of single step
y_dates = [
dataset[i + lag][4][0] for i in range(len(dataset)) if i + lag < len(dataset)
]
ycols = range(len(_Yset[0]))
# 1D inputs -> total time steps x features DataFrame
Xset = | pd.DataFrame(data=_Xset, index=x_dates, columns=dataset.xs.columns) | pandas.DataFrame |
import pandas as pd
# ------------------- #
# inputs
# ------------------- #
results_file = 'uncertainty_results_all.csv'
formations = ['LK1', 'MK1-3', 'UJ1'] # needs to be present in the sheet_name column of the results_file
ocean_data_files = ['LK1_ocean_data.xls', 'MK1_3_ocean_data.xls', 'UJ1_ocean_data.xls']
sizing_file = 'study_results.csv'
# ------------------- #
# begin program
# ------------------- #
# read-in results
df_results = pd.read_csv(results_file)
df_sizing = | pd.read_csv(sizing_file) | pandas.read_csv |
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
class DataUtility:
def __init__(self):
iris = datasets.load_iris()
df_multi_class = pd.DataFrame(data=iris.data, columns=iris.feature_names)
df_multi_class['Species'] = iris.target
features_multi = iris.feature_names
df_bin_class = pd.read_csv("nyoka/tests/titanic_train.csv")
features_bin = [name for name in df_bin_class.columns if name != 'Survived']
df_reg = | pd.read_csv('nyoka/tests/auto-mpg.csv') | pandas.read_csv |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and | is_float(key) | pandas.core.dtypes.common.is_float |
import os
import glob
import pathlib
import re
import base64
import pandas as pd
from datetime import datetime, timedelta
# https://www.pythonanywhere.com/forums/topic/29390/ for measuring the RAM usage on pythonanywhere
class defichainAnalyticsModelClass:
def __init__(self):
workDir = os.path.abspath(os.getcwd())
self.dataPath = workDir[:-9] + '/data/'
# data for controller/views
self.dailyData = pd.DataFrame()
self.hourlyData = pd.DataFrame()
self.minutelyData = pd.DataFrame()
self.lastRichlist = None
self.snapshotData = None
self.changelogData = None
# last update of csv-files
self.updated_nodehubIO = None
self.updated_allnodes = None
self.updated_extractedRichlist = None
self.updated_tradingData = None
self.updated_blocktime = None
self.updated_dexHourly = None
self.update_dexMinutely = None
self.updated_daa = None
self.updated_LastRichlist = None
self.updated_dexVolume = None
self.updated_tokenCryptos = None
self.updated_twitterData = None
self.updated_twitterFollower = None
self.update_snapshotData = None
self.update_changelogData = None
self.update_incomeVisits = None
self.update_portfolioDownloads = None
self.update_promoDatabase = None
self.update_analyticsVisits = None
self.updated_hourlyDEXTrades = None
self.update_MNmonitor = None
self.updated_dfx = None
self.update_DFIsignal = None
# background image for figures
with open(workDir + "/assets/analyticsLandscapeGrey2.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
self.figBackgroundImage = "data:image/png;base64," + encoded_string # Add the prefix that plotly will want when using the string as source
#### DAILY DATA #####
def loadDailyData(self):
self.loadHourlyDEXdata()
self.loadDEXVolume()
self.loadDailyTradingData()
self.loadExtractedRichlistData()
self.calcOverallTVLdata()
self.loadDailyBlocktimeData()
self.loadDAAData()
self.loadTwitterData()
self.loadTwitterFollowerData()
self.loadIncomeVisitsData()
self.loadPortfolioDownloads()
self.loadPromoDatabase()
self.loadMNMonitorDatabase()
self.loadAnalyticsVisitsData()
self.loadDFIsignalDatabase()
def loadMNnodehub(self):
print('>>>> Start update nodehub.IO data ... <<<<')
filePath = self.dataPath + 'mnNodehub.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_nodehubIO:
nodehubData = pd.read_csv(filePath, index_col=0)
nodehubData.rename(columns={"amount": "nbMNNodehub"}, inplace=True)
ind2Delete = self.dailyData.columns.intersection(nodehubData.columns)
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(nodehubData['nbMNNodehub'], how='outer', left_index=True, right_index=True)
self.updated_nodehubIO = fileInfo.stat()
print('>>>> nodehub data loaded from csv-file <<<<')
def loadMNAllnodes(self):
print('>>>> Start update allnodes data ... <<<<')
filePath = self.dataPath + 'mnAllnodes.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_allnodes:
allnodesData = pd.read_csv(filePath, index_col=0)
allnodesData.set_index('date', inplace=True)
ind2Delete = self.dailyData.columns.intersection(allnodesData.columns)
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(allnodesData['nbMNAllnode'], how='outer', left_index=True, right_index=True)
self.updated_allnodes = fileInfo.stat()
print('>>>> allnodes data loaded from csv-file <<<<')
def loadExtractedRichlistData(self):
self.loadMNnodehub() # number masternode hosted by nodehub must be load here to ensure correct values for other and relative representation
self.loadMNAllnodes() # number masternode hosted by Allnodes must be load here to ensure correct values for other and relative representation
print('>>>> Start update extracted richlist data ... <<<<')
filePath = self.dataPath + 'extractedDFIdata.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_extractedRichlist:
extractedRichlist = pd.read_csv(filePath, index_col=0)
ind2Delete = self.dailyData.columns.intersection(extractedRichlist.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(extractedRichlist, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.dailyData['nbMNOther'] = self.dailyData['nbMnId']-self.dailyData['nbMnCakeId']-self.dailyData['nbMydefichainId']-self.dailyData['nbMNNodehub'].fillna(0)-self.dailyData['nbMNAllnode'].fillna(0)
self.dailyData['nbMNnonCake'] = self.dailyData['nbMnId']-self.dailyData['nbMnCakeId']
self.dailyData['nbMnCakeIdRelative'] = self.dailyData['nbMnCakeId']/self.dailyData['nbMnId']*100
self.dailyData['nbMNOtherRelative'] = self.dailyData['nbMNOther'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMydefichainRelative'] = self.dailyData['nbMydefichainId'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNNodehubRelative'] = self.dailyData['nbMNNodehub'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNAllnodeRelative'] = self.dailyData['nbMNAllnode'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNlocked10Relative'] = self.dailyData['nbMNlocked10'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNlocked5Relative'] = self.dailyData['nbMNlocked5'] / self.dailyData['nbMnId'] * 100
# extracting DFI in Liquidity-Mining
lmCoins = pd.DataFrame(index=self.dailyData.index)
lmCoins['BTC_pool'] = self.hourlyData.groupby('Date')['BTC-DFI_reserveB'].first()
lmCoins['ETH_pool'] = self.hourlyData.groupby('Date')['ETH-DFI_reserveB'].first()
lmCoins['USDT_pool'] = self.hourlyData.groupby('Date')['USDT-DFI_reserveB'].first()
lmCoins['DOGE_pool'] = self.hourlyData.groupby('Date')['DOGE-DFI_reserveB'].first()
lmCoins['LTC_pool'] = self.hourlyData.groupby('Date')['LTC-DFI_reserveB'].first()
lmCoins['USDC_pool'] = self.hourlyData.groupby('Date')['USDC-DFI_reserveB'].first()
lmCoins['overall'] = lmCoins['BTC_pool'] + lmCoins['ETH_pool'] + lmCoins['USDT_pool'] + lmCoins['DOGE_pool'].fillna(0) + lmCoins['LTC_pool'].fillna(0) + lmCoins['USDC_pool'] .fillna(0)
self.dailyData['lmDFI'] = lmCoins['overall']
# sum of addresses and DFI
self.dailyData['nbOverall'] = self.dailyData['nbMnId'] + self.dailyData['nbOtherId']
self.dailyData['circDFI'] = self.dailyData['mnDFI'] + self.dailyData['otherDFI'] \
+ self.dailyData['tokenDFI'].fillna(0) + self.dailyData['lmDFI'].fillna(0) + self.dailyData['erc20DFI'].fillna(0) \
- (self.dailyData['nbMNlocked10']+self.dailyData['nbMNlocked5']).fillna(0)*20000
self.dailyData['totalDFI'] = self.dailyData['circDFI'] + self.dailyData['fundDFI'] + self.dailyData['foundationDFI'].fillna(0) \
+ self.dailyData['burnedDFI'].fillna(method="ffill") + (self.dailyData['nbMNlocked10']+self.dailyData['nbMNlocked5']).fillna(0)*20000
# calc market cap data in USD and BTC
print('>>>>>>>> Update market cap in loadExtractedRichlistData... <<<<<<<<')
self.dailyData['marketCapUSD'] = self.dailyData['circDFI']*self.dailyData['DFIPriceUSD']
self.dailyData['marketCapBTC'] = self.dailyData['marketCapUSD'] / self.dailyData['BTCPriceUSD']
# calculate daily change in addresses and DFI amount
self.dailyData['diffDate'] = pd.to_datetime(self.dailyData.index).to_series().diff().values
self.dailyData['diffDate'] = self.dailyData['diffDate'].fillna(pd.Timedelta(seconds=0)) # set nan-entry to timedelta 0
self.dailyData['diffDate'] = self.dailyData['diffDate'].apply(lambda x: float(x.days))
self.dailyData['diffNbOther'] = self.dailyData['nbOtherId'].diff() / self.dailyData['diffDate']
self.dailyData['diffNbMN'] = self.dailyData['nbMnId'].diff() / self.dailyData['diffDate']
self.dailyData['diffNbNone'] = None
self.dailyData['diffotherDFI'] = self.dailyData['otherDFI'].diff() / self.dailyData['diffDate']
self.dailyData['diffmnDFI'] = self.dailyData['mnDFI'].diff() / self.dailyData['diffDate']
self.dailyData['difffundDFI'] = self.dailyData['fundDFI'].diff() / self.dailyData['diffDate']
self.dailyData['difffoundationDFI'] = self.dailyData['foundationDFI'].diff() / self.dailyData['diffDate']
self.dailyData['diffLMDFI'] = self.dailyData['lmDFI'].diff() / self.dailyData['diffDate']
self.updated_extractedRichlist = fileInfo.stat()
print('>>>> Richlist data loaded from csv-file <<<<')
def calcOverallTVLdata(self):
self.dailyData['tvlMNDFI'] = self.dailyData['nbMnId'] * ((pd.to_datetime(self.dailyData.index)<pd.Timestamp('2021-03-02')) * 1 * 1000000 + \
(pd.to_datetime(self.dailyData.index)>=pd.Timestamp('2021-03-02')) * 1 * 20000)
dexLockedDFI = (self.hourlyData['BTC-DFI_lockedDFI']+self.hourlyData['ETH-DFI_lockedDFI']+self.hourlyData['USDT-DFI_lockedDFI'] +
self.hourlyData['DOGE-DFI_lockedDFI'].fillna(0)+self.hourlyData['LTC-DFI_lockedDFI'].fillna(0) +
self.hourlyData['BCH-DFI_lockedDFI'].fillna(0) + self.hourlyData['USDC-DFI_lockedDFI'].fillna(0))
dexLockedDFI.index = dexLockedDFI.index.floor('D').astype(str) # remove time information, only date is needed
self.dailyData['tvlDEXDFI'] = dexLockedDFI.groupby(level=0).first()
def loadDailyTradingData(self):
print('>>>> Start update trading data ... <<<<')
filePath = self.dataPath + 'dailyTradingResultsDEX.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_tradingData:
dailyTradingResults = pd.read_csv(self.dataPath+'dailyTradingResultsDEX.csv',index_col=0)
ind2Delete = self.dailyData.columns.intersection(dailyTradingResults.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyTradingResults, how='outer', left_index=True, right_index=True) # add new columns to daily table
# calc market cap data in USD and BTC (same as in loadExtractedRichlistData to get updated price information
if 'circDFI' in self.dailyData.columns:
print('>>>>>>>> Update market cap in loadDailyTradingData... <<<<<<<<')
self.dailyData['marketCapUSD'] = self.dailyData['circDFI']*self.dailyData['DFIPriceUSD']
self.dailyData['marketCapBTC'] = self.dailyData['marketCapUSD'] / self.dailyData['BTCPriceUSD']
self.updated_tradingData = fileInfo.stat()
print('>>>> Trading data loaded from csv-file <<<<')
def loadDailyBlocktimeData(self):
print('>>>> Start update blocktime data ... <<<<')
filePath = self.dataPath + 'BlockListStatistics.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_blocktime:
dailyBlocktimeData = pd.read_csv(filePath, index_col=0)
dailyBlocktimeData['tps'] = dailyBlocktimeData['txCount'] / (24 * 60 * 60)
ind2Delete = self.dailyData.columns.intersection(dailyBlocktimeData.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyBlocktimeData, how='outer', left_index=True,right_index=True) # add new columns to daily table
self.updated_blocktime = fileInfo.stat()
print('>>>> Blocktime data loaded from csv-file <<<<')
def loadDAAData(self):
print('>>>> Start update DAA data ... <<<<')
filePath = self.dataPath + 'analyzedDataDAA.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_daa:
dailyDAAData = pd.read_csv(filePath, index_col=0)
ind2Delete = self.dailyData.columns.intersection(dailyDAAData.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyDAAData, how='outer', left_index=True, right_on='Date') # add new columns to daily table
self.dailyData.set_index('Date', inplace=True)
self.dailyData.sort_index(inplace=True)
self.updated_daa = fileInfo.stat()
print('>>>> DAA data loaded from csv-file <<<<')
def loadTwitterData(self):
print('>>>> Start update twitter data ... <<<<')
filePath = self.dataPath + 'analyzedTwitterData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_twitterData:
twitterData = pd.read_csv(filePath, index_col=0)
columns2update = ['overall_Activity', 'defichain_Activity', 'dfi_Activity', 'overall_Likes', 'overall_UniqueUserOverall', 'overall_UniqueUserTweet', 'overall_UniqueUserReply', 'overall_UniqueUserRetweet']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(twitterData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_twitterData = fileInfo.stat()
print('>>>> Twitter data loaded from csv-file <<<<')
def loadTwitterFollowerData(self):
print('>>>> Start update twitter follower data ... <<<<')
filePath = self.dataPath + 'TwitterData_follower.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_twitterFollower:
twitterFollowData = pd.read_csv(filePath, index_col=0)
twitterFollowData.set_index('Date',inplace=True)
columns2update = ['Follower', 'followedToday', 'unfollowedToday']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(twitterFollowData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_twitterFollower = fileInfo.stat()
print('>>>> Twitter data loaded from csv-file <<<<')
def loadIncomeVisitsData(self):
print('>>>> Start update income visits data ... <<<<')
filePath = self.dataPath + 'dataVisitsIncome.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_incomeVisits:
incomeVisitsData = pd.read_csv(filePath, index_col=0)
incomeVisitsData.rename(columns={'0': 'incomeVisits'}, inplace=True)
incomeVisitsData.set_index(incomeVisitsData.index.str[:10], inplace=True) # just use date information without hh:mm
columns2update = ['incomeVisits']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(incomeVisitsData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_incomeVisits = fileInfo.stat()
print('>>>> Income visits data loaded from csv-file <<<<')
def loadPortfolioDownloads(self):
print('>>>> Start update portfolio downloads data ... <<<<')
filePath = self.dataPath + 'dataPortfolioDownloads.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_portfolioDownloads:
portfolioRawData = pd.read_csv(filePath)
columns2update = ['PortfolioWindows', 'PortfolioMac', 'PortfolioLinux']
dfPortfolioData = pd.DataFrame(index=portfolioRawData['DateCaptured'].unique(), columns=columns2update)
dfPortfolioData['PortfolioWindows'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Windows.sum()
dfPortfolioData['PortfolioMac'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Mac.sum()
dfPortfolioData['PortfolioLinux'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Linux.sum()
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dfPortfolioData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_portfolioDownloads = fileInfo.stat()
print('>>>> Portfolio downloads data loaded from csv-file <<<<')
def loadPromoDatabase(self):
print('>>>> Start update DefiChain promo database ... <<<<')
filePath = self.dataPath + 'defichainPromoData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_promoDatabase:
promoRawData = | pd.read_csv(filePath, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 6 15:41:19 2019
@Author: <NAME>, <NAME>
@Institution: CBDD Group, Xiangya School of Pharmaceutical Science, CSU, China
@Homepage: http://www.scbdd.com
@Mail: <EMAIL>; <EMAIL>
@Blog: https://blog.moyule.me
♥I love <NAME> forever♥
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.metrics import auc
from load import load
class Enrichment(object):
def __init__(self, loadfile, label_col, score_col, savefile=None):
self.loadfile = loadfile
self.savefile = savefile
self.df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sn
import matplotlib.patches as mpatches
from matplotlib import rcParams
#from brokenaxes import brokenaxes
from natsort import index_natsorted, order_by_index
#sn.set_context("paper", font_scale = 2)
#AUX FUNC
def Vm_groupby(df, group_by, aggr):
df = df.groupby(group_by)
df = df.agg(aggr)
df = df.reset_index()
df = df.reindex(index=order_by_index(df.index, index_natsorted(df['TripID'], reverse=False)))
return df
####
#Loop VM - Single Plot + Compare Plot (BAR/BOXPLOT)
def prep_n_mig_LoopVM(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID', 'VmID'], {'Mig_ID':'count', 'tripdistance': 'first'})
dfaux.rename(columns={'TripID':'TripID', 'VmID':'VmID', 'Mig_ID':'Number of Migrations', 'tripdistance': 'tripdistance'},inplace=True)
#dfaux = Vm_groupby(dfaux, ['TripID'], {'Number of Migrations':'mean'})
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat = pd.concat(df_aux_list)
return dfconcat
def n_mig_LoopVM(df):
fig, ax = plt.subplots()
#BOXPLOT
#ax.set_title("Number of Migrations by Trip " + title)
#sn.boxplot(x='TripID', y='Number of Migrations', hue='Class', palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data = df, ax=ax)
#BAR
ax.set_title("Number of Migrations by Trip")
sn.barplot(x='TripID', y='Number of Migrations', hue='Class', palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data=df, ax=ax)
ax.get_legend().remove()
ax.legend(loc='upper right')
ax.set_ylim(0, 25)
ax.set_xlabel('Trips')
ax.set_ylabel('Number of migrations')
return 1
def normalized_n_mig_LoopVM(df):
df["n_mig_km"] = ""
for i in range(df['TripID'].count()):
tripdistance = df['tripdistance'].values[i]
n_mig = df['Number of Migrations'].values[i]
normalized = n_mig / tripdistance
df['n_mig_km'].values[i] = normalized
#print(df)
fig, ax = plt.subplots()
#BOXPLOT
#ax.set_title("Number of Migrations by Trip " + title)
#sn.boxplot(x='TripID', y='Number of Migrations', hue='Class', palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data = df, ax=ax)
#BAR
ax.set_title("Number of Migrations / km - by Trip")
sn.barplot(x='TripID', y='n_mig_km', hue='Class', palette=['C0', 'C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data=df, ax=ax)
ax.get_legend().remove()
ax.legend(loc='upper right')
ax.set_ylim(0, 1.4)
ax.set_xlabel('Trips')
ax.set_ylabel('Number of migrations / KM')
return 1
####
####
#Loop VM - Single Plot + Compare Plot
def prep_migtime_LoopVM(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID', 'VmID'], {'Mt_real':'sum', 'triptime': 'first'})
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat = | pd.concat(df_aux_list) | pandas.concat |
# -*- coding: utf-8 -*-
# @Time : 2020/10/08 20:09:56
# @Author : RegiusQuant <<EMAIL>>
# @Project : Regius-AI
# @File : preprocessing.py
# @Description: 数据预处理
import pickle
import pandas as pd
from sklearn.preprocessing import StandardScaler, LabelEncoder
def generate_scaler_and_encoder(file_path_list, pkl_path):
temp_data_list = []
for file_path in file_path_list:
temp_data = pd.read_csv(file_path)
temp_data_list.append(temp_data)
all_data = pd.concat(temp_data_list)
cont_data = all_data.filter(regex="X_*")
standard_scaler = StandardScaler()
standard_scaler.fit(cont_data)
cate_data = all_data.filter(regex="C_*")
label_encoders = []
for col in cate_data.columns:
label_encoder = LabelEncoder()
label_encoder.fit(cate_data[col])
label_encoders.append(label_encoder)
# for col, label_encoder in zip(cate_data.columns, label_encoders):
# print(col, label_encoder.classes_)
pkl_dict = {"StandardScaler": standard_scaler, "LabelEncoders": label_encoders}
with open(pkl_path, "wb") as f:
pickle.dump(pkl_dict, f)
def load_processed_data(file_path_list, pkl_path):
temp_data_list = []
for file_path in file_path_list:
temp_data = | pd.read_csv(file_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
# coding: utf-8
from __future__ import absolute_import, division, \
print_function, unicode_literals
import six.moves as sm
import numpy as np
import pandas as pd
from scipy import stats
from matplotlib import pyplot as plt
from magellan_ai.ml.mag_util import mag_metrics
EPS = 1e-7
def show_func():
""" 可视化函数
"""
print("+---------------------------------------+")
print("|analyse methods |")
print("+---------------------------------------+")
print("|feature_coverage_in_diff_people |")
print("|single_enum_feat_eval_diff_people |")
print("|single_continuity_feat_eval_diff_people|")
print("+---------------------------------------+")
def feature_coverage_in_diff_people(
df_,
group_col,
group_dict={},
col_no_cover_dict={},
col_handler_dict={},
cols_skip=[],
is_sorted=True):
"""calculate feature coverage for 2 groups
Parameters
-----------
df_ : DataFrame
Input file.
group_col : list, ndarray, Series or DataFrame
The column about group.
group_dict : dict, optional
Alias of group's name,default is equal to itself.
col_no_cover_dict : dict, optional
A custom feature specifies a non overriding value for the data type.
default = {'int64': [-1], 'float64': [-1.0],
str': ["-1", "unknown"],
'object': ["-1", "unknown"], 'bool': []}
col_handler_dict : dict, optional
Dictionary of no-covered features.
cols_skip : List, optional
Ignore feature names for which feature coverage is calculated.
is_sorted : bool, optional
Whether to sort feature coverage
Returns
----------
feat_coverage_df : DataFrame
The coverage of features and the corresponding data types
Example
-----------
>>> df = pd.read_csv("./car.csv", header=0)
>>> print(feature_coverage_in_diff_people(df,"car4",group_dict={
0:"ins", 1:"water"}))
feature coverage_0 coverage_1 feat_type
0 Unnamed: 0 1.000000 1.000000 int64
1 car_did 1.000000 1.000000 int64
2 car1 1.000000 0.987156 float64
3 car2 1.000000 1.000000 int64
4 car3 1.000000 1.000000 int64
5 car4 1.000000 1.000000 int64
6 car5 0.190126 0.262093 float64
7 own_car 0.190126 0.262093 float64
Notes
-------
There must be two valid tags in column group_df_col
"""
# Default non overlay label
if not col_no_cover_dict:
col_no_cover_dict = {'int64': [-1], 'float64': [-1.0],
'str': ["-1", "unknown"],
'object': ["-1", "unknown"], 'bool': []}
# Take out the characteristic information of the two groups
groups = df_.groupby(group_col)
group_dfs, indexs = [], []
for index, group_df in groups:
if index in col_no_cover_dict[str(df_[group_col].dtype)]:
continue
indexs.append(index)
group_dfs.append(group_df)
# If the number of population types is not 2, throw an exception
try:
if len(group_dfs) != 2:
raise Exception("人群种类数不为 2")
except Exception as err:
print(err)
# Generate datafraame
df1 = mag_metrics.cal_feature_coverage(
group_dfs[0],
col_no_cover_dict,
col_handler_dict,
cols_skip,
is_sorted)
df2 = mag_metrics.cal_feature_coverage(
group_dfs[1],
col_no_cover_dict,
col_handler_dict,
cols_skip,
is_sorted)
# Modify two coverage column information
df1.columns = ['feature', 'coverage_%s' % indexs[0], 'feat_type']
df2.columns = ['feature', 'coverage_%s' % indexs[1], 'feat_type']
del df1['feat_type']
# Merge DataFrame
res_df = pd.merge(df1, df2, how="inner", on="feature")
return res_df
def single_enum_feat_eval_diff_people(
group_df_col,
feature_df_col,
group_dict={},
feature_dict={},
col_no_cover_dict={},
draw_pics=False):
"""
1. The differences between the two groups in the characteristics of single
enumeration class were analyzed.
Examples of features: gender, occupation, favorite app
2. Chi square test was performed when the characteristic values were 2,
the PSI value was calculated when the characteristics were more than 2.
Other statistical methods can be added later.
Parameters:
------------
group_df_col : Series
The column about group.
feature_df_col : Series
The column in which the feature to be analyzed is located
group_dict : dict, optional
Alias of group's name,default is equal to itself.
feature_dict : dict, optional
Alias of feature's name,default is equal to itself.
col_no_cover_dict : dict, optional
dictionary of no-covered features.
draw_pics : bool, optional
Whether need to draw pictures, default is equal to false.
Returns:
-----------
report : dict
"DataFrame" : DataFrame
Proportion of population features.
"chi2" :float64
when the number of features is equal to 2, calculate chi2-square.
"psi" : float64
If the feature number is greater than 2, calculate psi.
"result" : str
Conclusion of difference.
Examples
------------
>>> df = pd.read_csv('car.csv', header = 0)
>>> dic = single_enum_feat_eval_diff_people(
df['car4'], df['own_car'], group_dict={
0: "ins", 1: "water"}, feature_dict={
0: "not own car", 1: "own car"}, draw_pics=True)
features ins 人数 ins 比例 water 人数 water 比例
0 not own car 6458 0.520471 102194 0.679617
1 own car 5950 0.479529 48176 0.320383
>>> print("chi2=%s, result=%s" % (dic["chi2"], dic["result"]))
chi2=1308.0008370237344, result=根据卡方检验,两人群分布有明显差异
>>> df = pd.read_csv("./t3.csv", header=0)
>>> dic = dic = single_enum_feat_eval_diff_people(
df['t3_4'], df['career'], group_dict={
0: "ins", 1: "water"}, draw_pics=True)
>>> print(dic['DataFrame'])
feat_value ins water psi
0 gongwuyuan 0.036647 0.172391 0.210191
1 blue_collar 0.794946 0.687720 0.015536
2 courier 0.029653 0.013666 0.012385
3 it 0.022939 0.011836 0.007346
4 individual_business 0.108635 0.106713 0.000034
5 finance 0.007180 0.007674 0.000033
>>> print("psi=%s, result=%s" % (dic["psi"], dic["result"]))
psi=0.2455246396939325, result=有一定差异
Notes:
----------
1. group_df_col must have two valid tags, featute_ Df_ Col must have
at least two valid tags,
otherwise an exception is thrown.
2. If the number of feature type is equal to 2, calculate chi square,
else if the number of feature type is greater than 2, psi will
be calculated
3. If the number of feature type is greater than 25, the pie chart
will not be drawn.
4. In Chi square test, H0: π1 = π2, H1: π1 ≠ π2,
5. Chi square test degree of freedom is 1, chi square value > = 3.841,
p-value < = 0.05, which can be considered as significant difference.
6. Psi related information:
< 0.1: the difference is small
0.1 ~ 0.25: average difference
>= 0.25: very different
"""
# default no_cover features
if not col_no_cover_dict:
col_no_cover_dict = {'int64': [-1], 'float64': [-1.0],
'str': ["-1", "unknown"],
'object': ["-1", "unknown"], 'bool': []}
# merge group_df_col, featutre_df_col, delete rows
# where no-cover feature locate at
group_list, feature_list = [], []
for group, feature in zip(group_df_col, feature_df_col):
# Keep the information that the group and feature are valid
# at the same time
if group not in col_no_cover_dict[str(
group_df_col.dtype)] and feature not in col_no_cover_dict[
str(feature_df_col.dtype)]:
group_list.append(group)
feature_list.append(feature)
# Set the tag alias of the absent dict to itself
if group not in group_dict:
group_dict[group] = group
if feature not in feature_dict:
feature_dict[feature] = feature
df = | pd.DataFrame({'group': group_list, 'feature': feature_list}) | pandas.DataFrame |
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDatetimeIndex:
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = | DataFrame({"A": idx, "B": dr}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 11:43:06 2021
@author: student
"""
import pandas as pd
import numpy as np
import argparse
import os
import random
import matplotlib.pyplot as plt
from stellargraph.mapper import PaddedGraphGenerator
from stellargraph.layer import DeepGraphCNN, GCNSupervisedGraphClassification
from stellargraph import StellarGraph
from sklearn import model_selection
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, MaxPooling2D, Dropout, Flatten, BatchNormalization
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import pickle
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
from tensorflow.keras.utils import to_categorical
# results directory
RES_DIR = 'results/gcn'
if not os.path.exists(RES_DIR):
os.makedirs(RES_DIR)
MODEL_DIR = 'models/gcn/'
os.makedirs(MODEL_DIR, exist_ok=True)
SEED = 5000
np.random.seed(SEED)
random.seed(SEED)
tf.random.set_seed(SEED)
def _info(s):
print('---')
print(s)
print('---')
def threshold_proportional(W, p, copy=True):
"""
Convert values less than the threshold value to 0
Parameters
----------
W : 2D array, connevtivity matrix to be thresholded.
p : float value between 0 and 1, Cell Value less than threshold value will be set to 0.
copy : boolean, optional, The default is True.
Raises
------
ValueError, If the threshold is not within 0 and 1.
Returns
-------
W : Thresholded 2D array, A matrix that does not contains negative values.
"""
if p >= 1 or p <= 0:
raise ValueError("Threshold value should be between 0 and 1")
if copy:
W = W.copy()
n = len(W) # number of nodes
np.fill_diagonal(W, 0) # clear diagonal
if np.all(W == W.T): # if symmetric matrix
W[np.tril_indices(n)] = 0 # ensure symmetry is preserved
ud = 2 # halve number of removed links
else:
ud = 1
ind = np.where(W) # find all links
I = np.argsort(W[ind])[::-1] # sort indices by magnitude
# number of links to be preserved
en = round((n * n - n) * p / ud)
W[(ind[0][I][en:], ind[1][I][en:])] = 0 # apply threshold
if ud == 2: # if symmetric matrix
W[:, :] = W + W.T # reconstruct symmetry
W[W>0.9999] = 1 # make sure the highest correlation coeff is 1
return W
def conv2list(adj_m):
"""
converts adjacency matrix to adj list to load into stellargraph
Parameters
----------
adj_m : 2D array to be converted to adjacency list.
Raises
------
ValueError
if connectivity matrix has length 0.
Returns
-------
d : DataFrame.
"""
# find non-zero elements in adj_mat
if (len(adj_m) == 0):
raise ValueError("Invalid adjacency matrix")
indices = np.argwhere(adj_m)
src, dsts = indices[:,0].reshape(-1, 1),indices[:,1].reshape(-1, 1)
v = adj_m[src,dsts].reshape(-1, 1)
final = np.concatenate((src, dsts, v), axis=1)
d = | pd.DataFrame(final) | pandas.DataFrame |
import datetime
import os
import pandas as pd
from tqdm import tqdm
from clean_data import cleaned_columns, country_to_cca3, dict_countries_to_cca3
from high_level_fetch import get_cleaned_month
final_column_names = ['Day', 'EventCode', 'Source_CountryCode', 'Target_CountryCode',
'Target_Lat', 'Target_Long', 'Target_GeoType', 'IsRootEvent',
'QuadClass', 'GoldsteinScale', 'AvgTone', 'NumMentions',
'NumSources', 'NumArticles']
def load_data():
"""This function loads all the data cleaned from 03/01/201 to 12/12/2017.
In most cases, only this function should be called
"""
if not os.path.isfile('../data/final_data.csv'):
# fetching
print(
"If you see this and you are not expecting to download and clean all the data, please verify you have the file final_data.csv in the folder data")
print("Fetching and cleaning GDELT 2.0 Translingual data...")
start_date = datetime.datetime(2015, 3, 1)
end_date = datetime.datetime(2017, 12, 1)
n_months = (end_date - start_date).days * 12 // 365
df = | pd.DataFrame(columns=cleaned_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Tests for Regression Diagnostics and Specification Tests
Created on Thu Feb 09 13:19:47 2012
Author: <NAME>
License: BSD-3
currently all tests are against R
"""
import os
import json
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import pytest
from sm2.regression.linear_model import OLS
from sm2.tools.tools import add_constant
from sm2.datasets import macrodata
import sm2.stats.sandwich_covariance as sw
from sm2.stats import diagnostic
# import sm2.stats.outliers_influence as oi
oi = None # dummy to prevent flake8 warnings
cur_dir = os.path.abspath(os.path.dirname(__file__))
def compare_t_est(sp, sp_dict, decimal=(14, 14)):
assert_allclose(sp[0],
sp_dict['statistic'],
atol=10**-decimal[0],
rtol=10**-decimal[0])
assert_allclose(sp[1],
sp_dict['pvalue'],
atol=10**-decimal[1],
rtol=10**-decimal[0])
@pytest.mark.not_vetted
class TestDiagnosticG(object):
@classmethod
def setup_class(cls):
d = macrodata.load_pandas().data
# growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
lint = d['realint'][:-1].values
tbilrate = d['tbilrate'][:-1].values
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, lint])
exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])
exogg3 = add_constant(np.c_[gs_l_realgdp])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
res_ols3 = OLS(endogg, exogg3).fit()
cls.res = res_ols
cls.res2 = res_ols2
cls.res3 = res_ols3
cls.endog = cls.res.model.endog
cls.exog = cls.res.model.exog
def test_basic(self):
# mainly to check I got the right regression
# > mkarray(fm$coefficients, "params")
params = np.array([-9.48167277465485, 4.3742216647032,
-0.613996969478989])
assert_almost_equal(self.res.params, params, decimal=12)
def test_hac(self):
res = self.res
# > nw = NeweyWest(fm, lag = 4, prewhite = FALSE, verbose=TRUE)
# > nw2 = NeweyWest(fm, lag=10, prewhite = FALSE, verbose=TRUE)
# > mkarray(nw, "cov_hac_4")
cov_hac_4 = np.array([1.385551290884014, -0.3133096102522685,
-0.0597207976835705, -0.3133096102522685,
0.1081011690351306, 0.000389440793564336,
-0.0597207976835705, 0.000389440793564339,
0.0862118527405036]).reshape(3, 3, order='F')
# > mkarray(nw2, "cov_hac_10")
cov_hac_10 = np.array([1.257386180080192, -0.2871560199899846,
-0.03958300024627573, -0.2871560199899845,
0.1049107028987101, 0.0003896205316866944,
-0.03958300024627578, 0.0003896205316866961,
0.0985539340694839]).reshape(3, 3, order='F')
cov = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov)
assert_almost_equal(cov,
cov_hac_4,
decimal=14)
assert_almost_equal(bse_hac,
np.sqrt(np.diag(cov)),
decimal=14)
cov = sw.cov_hac_simple(res, nlags=10, use_correction=False)
bse_hac = sw.se_cov(cov)
assert_almost_equal(cov,
cov_hac_10,
decimal=14)
assert_almost_equal(bse_hac,
np.sqrt(np.diag(cov)),
decimal=14)
@pytest.mark.skip(reason="het_goldfeldquandt not ported from upstream")
def test_het_goldfeldquandt(self):
# TODO: test options missing
# > gq = gqtest(fm, alternative='greater')
# > mkhtest_f(gq, 'het_gq_greater', 'f')
het_gq_greater = dict(statistic=0.5313259064778423,
pvalue=0.9990217851193723,
parameters=(98, 98), distr='f')
# > gq = gqtest(fm, alternative='less')
# > mkhtest_f(gq, 'het_gq_less', 'f')
het_gq_less = dict(statistic=0.5313259064778423,
pvalue=0.000978214880627621,
parameters=(98, 98), distr='f')
# > gq = gqtest(fm, alternative='two.sided')
# > mkhtest_f(gq, 'het_gq_two_sided', 'f')
het_gq_two_sided = dict(statistic=0.5313259064778423,
pvalue=0.001956429761255241,
parameters=(98, 98), distr='f')
# > gq = gqtest(fm, fraction=0.1, alternative='two.sided')
# > mkhtest_f(gq, 'het_gq_two_sided_01', 'f')
het_gq_two_sided_01 = dict(statistic=0.5006976835928314,
pvalue=0.001387126702579789,
parameters=(88, 87), distr='f')
# > gq = gqtest(fm, fraction=0.5, alternative='two.sided')
# > mkhtest_f(gq, 'het_gq_two_sided_05', 'f')
het_gq_two_sided_05 = dict(statistic=0.434815645134117,
pvalue=0.004799321242905568,
parameters=(48, 47), distr='f')
endogg, exogg = self.endog, self.exog
# tests
gq = diagnostic.het_goldfeldquandt(endogg, exogg, split=0.5)
compare_t_est(gq, het_gq_greater, decimal=(14, 14))
assert gq[-1] == 'increasing'
gq = diagnostic.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='decreasing')
compare_t_est(gq, het_gq_less, decimal=(14, 14))
assert gq[-1] == 'decreasing'
gq = diagnostic.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided, decimal=(14, 14))
assert gq[-1] == 'two-sided'
# TODO: forcing the same split as R 202-90-90-1=21
gq = diagnostic.het_goldfeldquandt(endogg, exogg, split=90, drop=21,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided_01, decimal=(14, 14))
assert gq[-1] == 'two-sided'
# TODO other options ???
def test_het_breusch_pagan(self):
res = self.res
bptest = dict(statistic=0.709924388395087, pvalue=0.701199952134347,
parameters=(2,), distr='f')
bp = diagnostic.het_breuschpagan(res.resid, res.model.exog)
compare_t_est(bp, bptest, decimal=(12, 12))
def test_het_white(self):
res = self.res
# TODO: regressiontest, compare with Greene or Gretl or Stata
hw = diagnostic.het_white(res.resid, res.model.exog)
hw_values = (33.503722896538441, 2.9887960597830259e-06,
7.7945101228430946, 1.0354575277704231e-06)
assert_almost_equal(hw, hw_values)
def test_het_arch(self):
# test het_arch and indirectly het_lm against R
# > library(FinTS)
# > at = ArchTest(residuals(fm), lags=4)
# > mkhtest(at, 'archtest_4', 'chi2')
archtest_4 = dict(statistic=3.43473400836259,
pvalue=0.487871315392619, parameters=(4,),
distr='chi2')
# > at = ArchTest(residuals(fm), lags=12)
# > mkhtest(at, 'archtest_12', 'chi2')
archtest_12 = dict(statistic=8.648320999014171,
pvalue=0.732638635007718, parameters=(12,),
distr='chi2')
at4 = diagnostic.het_arch(self.res.resid, maxlag=4)
at12 = diagnostic.het_arch(self.res.resid, maxlag=12)
compare_t_est(at4[:2], archtest_4, decimal=(12, 13))
compare_t_est(at12[:2], archtest_12, decimal=(12, 13))
def test_het_arch2(self):
# test autolag options, this also test het_lm
# unfortunately optimal lag=1 for this data
resid = self.res.resid
res1 = diagnostic.het_arch(resid, maxlag=1, autolag=None, store=True)
rs1 = res1[-1]
res2 = diagnostic.het_arch(resid, maxlag=5, autolag='aic', store=True)
rs2 = res2[-1]
assert_almost_equal(rs2.resols.params,
rs1.resols.params,
decimal=13)
assert_almost_equal(res2[:4],
res1[:4],
decimal=13)
# test that smallest lag, maxlag=1 works
res3 = diagnostic.het_arch(resid, maxlag=1, autolag='aic')
assert_almost_equal(res3[:4],
res1[:4],
decimal=13)
@pytest.mark.skip(reason="acorr_breusch_godfrey not ported from upstream")
def test_acorr_breusch_godfrey(self):
res = self.res
#> bgf = bgtest(fm, order=4, type="F")
breuschgodfrey_f = dict(statistic=1.179280833676792,
pvalue=0.321197487261203,
parameters=(4, 195,), distr='f')
# > bgc = bgtest(fm, order = 4, type="Chisq")
# > mkhtest(bgc, "breuschpagan_c", "chi2")
breuschgodfrey_c = dict(statistic=4.771042651230007,
pvalue=0.3116067133066697,
parameters=(4,), distr='chi2')
bg = diagnostic.acorr_breusch_godfrey(res, nlags=4)
bg_r = [breuschgodfrey_c['statistic'], breuschgodfrey_c['pvalue'],
breuschgodfrey_f['statistic'], breuschgodfrey_f['pvalue']]
assert_almost_equal(bg, bg_r, decimal=13)
# check that lag choice works
bg2 = diagnostic.acorr_breusch_godfrey(res, nlags=None)
bg3 = diagnostic.acorr_breusch_godfrey(res, nlags=14)
assert_almost_equal(bg2, bg3, decimal=13)
def test_acorr_ljung_box(self):
# unit-test which may be useful later
# ddof correction for fitted parameters in ARMA(p, q) fitdf=p+q
# > bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box", fitdf=2)
# > mkhtest(bt, "ljung_box_4df2", "chi2")
# ljung_box_4df2 = dict(statistic=5.23587172795227,
# pvalue=0.0729532930400377,
# parameters=(2,), distr='chi2')
# > bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce", fitdf=2)
# > mkhtest(bt, "ljung_box_bp_4df2", "chi2")
# ljung_box_bp_4df2 = dict(statistic=5.12462932741681,
# pvalue=0.0771260128929921,
# parameters=(2,), distr='chi2')
res = self.res
# general test
# > bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box")
# > mkhtest(bt, "ljung_box_4", "chi2")
ljung_box_4 = dict(statistic=5.23587172795227,
pvalue=0.263940335284713,
parameters=(4,), distr='chi2')
# > bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce")
# > mkhtest(bt, "ljung_box_bp_4", "chi2")
ljung_box_bp_4 = dict(statistic=5.12462932741681,
pvalue=0.2747471266820692,
parameters=(4,), distr='chi2')
lb, lbpval, bp, bppval = diagnostic.acorr_ljungbox(res.resid, 4,
boxpierce=True)
compare_t_est([lb[-1], lbpval[-1]], ljung_box_4, decimal=(13, 13))
compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_4, decimal=(13, 13))
def test_acorr_ljung_box_big_default(self):
res = self.res
# test with big dataset and default lag
# > bt = Box.test(residuals(fm), type = "Ljung-Box")
# > mkhtest(bt, "ljung_box_none", "chi2")
ljung_box_none = dict(statistic=51.03724531797195,
pvalue=0.11334744923390,
distr='chi2')
# > bt = Box.test(residuals(fm), type = "Box-Pierce")
# > mkhtest(bt, "ljung_box_bp_none", "chi2")
ljung_box_bp_none = dict(statistic=45.12238537034000,
pvalue=0.26638168491464,
distr='chi2')
lb, lbpval, bp, bppval = diagnostic.acorr_ljungbox(res.resid,
boxpierce=True)
compare_t_est([lb[-1], lbpval[-1]],
ljung_box_none,
decimal=(13, 13))
compare_t_est([bp[-1], bppval[-1]],
ljung_box_bp_none,
decimal=(13, 13))
def test_acorr_ljung_box_small_default(self):
res = self.res
# test with small dataset and default lag
# > bt = Box.test(residuals(fm), type = "Ljung-Box")
# > mkhtest(bt, "ljung_box_small", "chi2")
ljung_box_small = dict(statistic=9.61503968281915,
pvalue=0.72507000996945,
parameters=(0,), distr='chi2')
# > bt = Box.test(residuals(fm), type = "Box-Pierce")
# > mkhtest(bt, "ljung_box_bp_small", "chi2")
ljung_box_bp_small = dict(statistic=7.41692150864936,
pvalue=0.87940785887006,
parameters=(0,), distr='chi2')
lb, lbpval, bp, bppval = diagnostic.acorr_ljungbox(res.resid[:30],
boxpierce=True)
compare_t_est([lb[-1], lbpval[-1]],
ljung_box_small,
decimal=(13, 13))
compare_t_est([bp[-1], bppval[-1]],
ljung_box_bp_small,
decimal=(13, 13))
@pytest.mark.skip(reason="linear_harvey_collier not ported from upstream")
def test_harvey_collier(self):
# > hc = harvtest(fm, order.by = NULL, data = list())
# > mkhtest_f(hc, 'harvey_collier', 't')
harvey_collier = dict(statistic=0.494432160939874,
pvalue=0.6215491310408242,
parameters=(198), distr='t')
# > hc2 = harvtest(fm, order.by=ggdp, data = list())
# > mkhtest_f(hc2, 'harvey_collier_2', 't')
harvey_collier_2 = dict(statistic=1.42104628340473,
pvalue=0.1568762892441689,
parameters=(198), distr='t')
hc = diagnostic.linear_harvey_collier(self.res)
compare_t_est(hc, harvey_collier, decimal=(12, 12))
@pytest.mark.skip(reason="linear_rainbow not ported from upstream")
def test_rainbow(self):
# rainbow test
# > rt = raintest(fm)
# > mkhtest_f(rt, 'raintest', 'f')
raintest = dict(statistic=0.6809600116739604, pvalue=0.971832843583418,
parameters=(101, 98), distr='f')
# > rt = raintest(fm, center=0.4)
# > mkhtest_f(rt, 'raintest_center_04', 'f')
raintest_center_04 = dict(statistic=0.682635074191527,
pvalue=0.971040230422121,
parameters=(101, 98), distr='f')
# > rt = raintest(fm, fraction=0.4)
# > mkhtest_f(rt, 'raintest_fraction_04', 'f')
raintest_fraction_04 = dict(statistic=0.565551237772662,
pvalue=0.997592305968473,
parameters=(122, 77), distr='f')
# > rt = raintest(fm, order.by=ggdp)
# Warning message:
# In if (order.by == "mahalanobis") { :
# the condition has length > 1 and only the first element will be used
# > mkhtest_f(rt, 'raintest_order_gdp', 'f')
raintest_order_gdp = dict(statistic=1.749346160513353,
pvalue=0.002896131042494884,
parameters=(101, 98), distr='f')
rb = diagnostic.linear_rainbow(self.res)
compare_t_est(rb, raintest, decimal=(13, 14))
rb = diagnostic.linear_rainbow(self.res, frac=0.4)
compare_t_est(rb, raintest_fraction_04, decimal=(13, 14))
def test_compare_lr(self):
res = self.res
res3 = self.res3 # nested within res
# lrtest
# lrt = lrtest(fm, fm2)
# Model 1: ginv ~ ggdp + lint
# Model 2: ginv ~ ggdp
lrtest = dict(loglike1=-763.9752181602237, loglike2=-766.3091902020184,
chi2value=4.66794408358942, pvalue=0.03073069384028677,
df=(4, 3, 1))
lrt = res.compare_lr_test(res3)
assert_almost_equal(lrt[0],
lrtest['chi2value'],
decimal=11)
assert_almost_equal(lrt[1],
lrtest['pvalue'],
decimal=11)
waldtest = dict(fvalue=4.65216373312492, pvalue=0.03221346195239025,
df=(199, 200, 1))
wt = res.compare_f_test(res3)
assert_almost_equal(wt[0],
waldtest['fvalue'],
decimal=11)
assert_almost_equal(wt[1],
waldtest['pvalue'],
decimal=11)
@pytest.mark.skip(reason="compare_cox, compare_j not ported from upstream")
def test_compare_nonnested(self):
res = self.res
res2 = self.res2
# jt = jtest(fm, lm(ginv ~ ggdp + tbilrate))
# Estimate Std. Error t value Pr(>|t|)
jtest = [('M1 + fitted(M2)', 1.591505670785873, 0.7384552861695823,
2.155182176352370, 0.032354572525314450, '*'),
('M2 + fitted(M1)', 1.305687653016899, 0.4808385176653064,
2.715438978051544, 0.007203854534057954, '**')]
jt1 = diagnostic.compare_j(res2, res)
assert_almost_equal(jt1, jtest[0][3:5], decimal=13)
jt2 = diagnostic.compare_j(res, res2)
assert_almost_equal(jt2, jtest[1][3:5], decimal=14)
# Estimate Std. Error z value Pr(>|z|)
coxtest = [('fitted(M1) ~ M2', -0.782030488930356, 0.599696502782265,
-1.304043770977755, 1.922186587840554e-01, ' '),
('fitted(M2) ~ M1', -2.248817107408537, 0.392656854330139,
-5.727181590258883, 1.021128495098556e-08, '***')]
ct1 = diagnostic.compare_cox(res, res2)
assert_almost_equal(ct1, coxtest[0][3:5], decimal=13)
ct2 = diagnostic.compare_cox(res2, res)
assert_almost_equal(ct2, coxtest[1][3:5], decimal=12)
# TODO: should be approx
# Res.Df Df F Pr(>F)
encomptest = [('M1 vs. ME', 198, -1, 4.644810213266983,
0.032354572525313666, '*'),
('M2 vs. ME', 198, -1, 7.373608843521585,
0.007203854534058054, '**')]
# Estimate Std. Error t value
petest = [('M1 + log(fit(M1))-fit(M2)', -229.281878354594596,
44.5087822087058598, -5.15139, 6.201281252449979e-07),
('M2 + fit(M1)-exp(fit(M2))', 0.000634664704814,
0.0000462387010349, 13.72583, 1.319536115230356e-30)]
@pytest.mark.skip(reason="breaks_cusumolsresid not ported from upstream")
def test_cusum_ols(self):
# R library(strucchange)
# > sc = sctest(ginv ~ ggdp + lint, type="OLS-CUSUM")
# > mkhtest(sc, 'cusum_ols', 'BB')
cusum_ols = dict(statistic=1.055750610401214,
pvalue=0.2149567397376543,
parameters=(), distr='BB') # Brownian Bridge
k_vars = 3
cs_ols = diagnostic.breaks_cusumolsresid(self.res.resid, ddof=k_vars)
compare_t_est(cs_ols, cusum_ols, decimal=(12, 12))
@pytest.mark.skip(reason="breaks_hansen not ported from upstream")
def test_breaks_hansen(self):
# > sc = sctest(ginv ~ ggdp + lint, type="Nyblom-Hansen")
# > mkhtest(sc, 'breaks_nyblom_hansen', 'BB')
breaks_nyblom_hansen = dict(statistic=1.0300792740544484,
pvalue=0.1136087530212015,
parameters=(), distr='BB')
bh = diagnostic.breaks_hansen(self.res)
assert_almost_equal(bh[0], breaks_nyblom_hansen['statistic'],
decimal=13)
# TODO: breaks_hansen doesn't return pvalues
@pytest.mark.skip(reason="recursive_olsresiduals not ported from upstream")
def test_recursive_residuals(self):
reccumres_standardize = np.array([
-2.151, -3.748, -3.114, -3.096, -1.865, -2.230, -1.194, -3.500,
-3.638, -4.447, -4.602, -4.631, -3.999, -4.830, -5.429, -5.435,
-6.554, -8.093, -8.567, -7.532, -7.079, -8.468, -9.320, -12.256,
-11.932, -11.454, -11.690, -11.318, -12.665, -12.842, -11.693,
-10.803, -12.113, -12.109, -13.002, -11.897, -10.787, -10.159,
-9.038, -9.007, -8.634, -7.552, -7.153, -6.447, -5.183, -3.794,
-3.511, -3.979, -3.236, -3.793, -3.699, -5.056, -5.724, -4.888,
-4.309, -3.688, -3.918, -3.735, -3.452, -2.086, -6.520, -7.959,
-6.760, -6.855, -6.032, -4.405, -4.123, -4.075, -3.235, -3.115,
-3.131, -2.986, -1.813, -4.824, -4.424, -4.796, -4.000, -3.390,
-4.485, -4.669, -4.560, -3.834, -5.507, -3.792, -2.427, -1.756,
-0.354, 1.150, 0.586, 0.643, 1.773, -0.830, -0.388, 0.517, 0.819,
2.240, 3.791, 3.187, 3.409, 2.431, 0.668, 0.957, -0.928, 0.327,
-0.285, -0.625, -2.316, -1.986, -0.744, -1.396, -1.728, -0.646,
-2.602, -2.741, -2.289, -2.897, -1.934, -2.532, -3.175, -2.806,
-3.099, -2.658, -2.487, -2.515, -2.224, -2.416, -1.141, 0.650,
-0.947, 0.725, 0.439, 0.885, 2.419, 2.642, 2.745, 3.506, 4.491,
5.377, 4.624, 5.523, 6.488, 6.097, 5.390, 6.299, 6.656, 6.735,
8.151, 7.260, 7.846, 8.771, 8.400, 8.717, 9.916, 9.008, 8.910,
8.294, 8.982, 8.540, 8.395, 7.782, 7.794, 8.142, 8.362, 8.400,
7.850, 7.643, 8.228, 6.408, 7.218, 7.699, 7.895, 8.725, 8.938,
8.781, 8.350, 9.136, 9.056, 10.365, 10.495, 10.704, 10.784,
10.275, 10.389, 11.586, 11.033, 11.335, 11.661, 10.522, 10.392,
10.521, 10.126, 9.428, 9.734, 8.954, 9.949, 10.595, 8.016, 6.636,
6.975])
rr = diagnostic.recursive_olsresiduals(self.res, skip=3, alpha=0.95)
np.testing.assert_equal(np.round(rr[5][1:], 3),
reccumres_standardize) # extra zero in front
assert_almost_equal(rr[3][4:], np.diff(reccumres_standardize), 3)
assert_almost_equal(rr[4][3:].std(ddof=1), 10.7242, decimal=4)
# regression number, visually checked with graph from gretl
ub0 = np.array([13.37318571, 13.50758959, 13.64199346, 13.77639734,
13.91080121])
ub1 = np.array([39.44753774, 39.58194162, 39.7163455, 39.85074937,
39.98515325])
lb, ub = rr[6]
assert_almost_equal(ub[:5], ub0, decimal=7)
assert_almost_equal(lb[:5], -ub0, decimal=7)
assert_almost_equal(ub[-5:], ub1, decimal=7)
assert_almost_equal(lb[-5:], -ub1, decimal=7)
# test a few values with explicit OLS
endog = self.res.model.endog
exog = self.res.model.exog
params = []
ypred = []
for i in range(3, 10):
resi = OLS(endog[:i], exog[:i]).fit()
ypred.append(resi.model.predict(resi.params, exog[i]))
params.append(resi.params)
assert_almost_equal(rr[2][3:10], ypred, decimal=12)
assert_almost_equal(rr[0][3:10], endog[3:10] - ypred, decimal=12)
assert_almost_equal(rr[1][2:9], params, decimal=12)
def test_lilliefors(self):
res = self.res
# TODO: Separate this lilliefors stuff into its own test
# TODO: this should be a separate test
# > library(nortest) # Lilliefors (Kolmogorov-Smirnov) normality test
# > lt = lillie.test(residuals(fm))
# > mkhtest(lt, "lilliefors", "-")
lilliefors1 = dict(statistic=0.0723390908786589,
pvalue=0.01204113540102896,
parameters=(), distr='-')
# > lt = lillie.test(residuals(fm)**2)
# > mkhtest(lt, "lilliefors", "-")
lilliefors2 = dict(statistic=0.301311621898024,
pvalue=1.004305736618051e-51,
parameters=(), distr='-')
# > lt = lillie.test(residuals(fm)[1:20])
# > mkhtest(lt, "lilliefors", "-")
lilliefors3 = dict(statistic=0.1333956004203103,
pvalue=0.20, parameters=(), distr='-')
lf1 = diagnostic.lilliefors(res.resid)
lf2 = diagnostic.lilliefors(res.resid**2)
lf3 = diagnostic.lilliefors(res.resid[:20])
compare_t_est(lf1, lilliefors1, decimal=(14, 14))
compare_t_est(lf2, lilliefors2, decimal=(14, 14)) # pvalue very small
assert_allclose(lf2[1],
lilliefors2['pvalue'],
rtol=1e-10)
compare_t_est(lf3, lilliefors3, decimal=(14, 1))
# R uses different approximation for pvalue in last case
def test_normality(self):
res = self.res
# > ad = ad.test(residuals(fm))
# > mkhtest(ad, "ad3", "-")
adr1 = dict(statistic=1.602209621518313, pvalue=0.0003937979149362316,
parameters=(), distr='-')
# > ad = ad.test(residuals(fm)**2)
# > mkhtest(ad, "ad3", "-")
adr2 = dict(statistic=np.inf, pvalue=np.nan, parameters=(), distr='-')
# TODO: do something with this?
# > ad = ad.test(residuals(fm)[1:20])
# > mkhtest(ad, "ad3", "-")
adr3 = dict(statistic=0.3017073732210775, pvalue=0.5443499281265933,
parameters=(), distr='-')
ad1 = diagnostic.normal_ad(res.resid)
compare_t_est(ad1, adr1, decimal=(11, 13))
ad2 = diagnostic.normal_ad(res.resid**2)
assert np.isinf(ad2[0])
ad3 = diagnostic.normal_ad(res.resid[:20])
compare_t_est(ad3, adr3, decimal=(11, 12))
@pytest.mark.skip(reason="outliers_influence not ported from upstream")
def test_influence(self):
res = self.res
# this test is slow
infl = oi.OLSInfluence(res)
# TODO: is this file ported?
path = os.path.join(cur_dir, "results/influence_lsdiag_R.json")
with open(path, 'r') as fp:
lsdiag = json.load(fp)
# basic
assert_almost_equal(np.array(lsdiag['cov.scaled']).reshape(3, 3),
res.cov_params(),
decimal=14)
assert_almost_equal(np.array(lsdiag['cov.unscaled']).reshape(3, 3),
res.normalized_cov_params,
decimal=14)
c0, c1 = infl.cooks_distance # TODO: what's c1
assert_almost_equal(c0, lsdiag['cooks'], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, lsdiag['hat'], decimal=14)
assert_almost_equal(infl.resid_studentized_internal,
lsdiag['std.res'],
decimal=14)
# slow:
#infl._get_all_obs() # slow, nobs estimation loop, called implicitly
dffits, dffth = infl.dffits
assert_almost_equal(dffits,
lsdiag['dfits'],
decimal=14)
assert_almost_equal(infl.resid_studentized_external,
lsdiag['stud.res'],
decimal=14)
# TODO: are these files ported?
fn = os.path.join(cur_dir, "results", "influence_measures_R.csv")
infl_r = pd.read_csv(fn, index_col=0)
# not used yet:
# fn = os.path.join(cur_dir, "results", "influence_measures_bool_R.csv")
# conv = lambda s: 1 if s == 'TRUE' else 0
# converters = dict(zip(lrange(7), [conv] * 7))
#infl_bool_r = pd.read_csv(fn, index_col=0, converters=converters)
infl_r2 = np.asarray(infl_r)
assert_almost_equal(infl.dfbetas, infl_r2[:, :3], decimal=13)
assert_almost_equal(infl.cov_ratio, infl_r2[:, 4], decimal=14)
# duplicates
assert_almost_equal(dffits, infl_r2[:, 3], decimal=14)
assert_almost_equal(c0, infl_r2[:, 5], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, infl_r2[:, 6], decimal=14)
# Note: for dffits, R uses a threshold
# around 0.36, mine: dffits[1]=0.24373
# TODO: finish and check thresholds and pvalues
"""
R has
>>> np.nonzero(np.asarray(infl_bool_r["dffit"]))[0]
array([6, 26, 63, 76, 90, 199])
>>> np.nonzero(np.asarray(infl_bool_r["cov.r"]))[0]
array([4, 26, 59, 61, 63, 72, 76, 84, 91, 92, 94, 95, 108,
197, 198])
>>> np.nonzero(np.asarray(infl_bool_r["hat"]))[0]
array([62, 76, 84, 90, 91, 92, 95, 108, 197, 199])
"""
@pytest.mark.not_vetted
class TestDiagnosticGPandas(TestDiagnosticG):
@classmethod
def setup_class(cls):
d = macrodata.load_pandas().data
# growth rates
d['gs_l_realinv'] = 400 * np.log(d['realinv']).diff()
d['gs_l_realgdp'] = 400 * np.log(d['realgdp']).diff()
d['lint'] = d['realint'].shift(1)
d['tbilrate'] = d['tbilrate'].shift(1)
d = d.dropna()
cls.d = d
endogg = d['gs_l_realinv']
exogg = add_constant(d[['gs_l_realgdp', 'lint']])
exogg2 = add_constant(d[['gs_l_realgdp', 'tbilrate']])
exogg3 = add_constant(d[['gs_l_realgdp']])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
res_ols3 = OLS(endogg, exogg3).fit()
cls.res = res_ols
cls.res2 = res_ols2
cls.res3 = res_ols3
cls.endog = cls.res.model.endog
cls.exog = cls.res.model.exog
@pytest.mark.skip(reason="outliers_influence not ported from upstream")
@pytest.mark.smoke
@pytest.mark.not_vetted
def test_outlier_influence_funcs(reset_randomstate):
x = add_constant(np.random.randn(10, 2))
y = x.sum(1) + np.random.randn(10)
res = OLS(y, x).fit()
out_05 = oi.summary_table(res)
# GH#3344 : Check alpha has an effect
out_01 = oi.summary_table(res, alpha=0.01)
assert np.all(out_01[1][:, 6] <= out_05[1][:, 6])
assert np.all(out_01[1][:, 7] >= out_05[1][:, 7])
res2 = OLS(y, x[:, 0]).fit()
oi.summary_table(res2, alpha=0.05)
infl = res2.get_influence()
infl.summary_table()
@pytest.mark.skip(reason="outliers_influence not ported from upstream")
@pytest.mark.not_vetted
def test_influence_wrapped():
d = macrodata.load_pandas().data
# growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1]
# re-index these because they won't conform to lint
gs_l_realgdp.index = lint.index
gs_l_realinv.index = lint.index
data = dict(const=np.ones_like(lint), lint=lint, lrealgdp=gs_l_realgdp)
# order is important
exog = pd.DataFrame(data, columns=['const', 'lrealgdp', 'lint'])
res = OLS(gs_l_realinv, exog).fit()
# basic
# already tested
# cov.scaled and cov.unscaled have already been tested
# TODO: check that above is correct;
# comment is (roughly) copied from upstream
infl = oi.OLSInfluence(res)
# smoke test just to make sure it works, results separately tested
df = infl.summary_frame()
assert isinstance(df, pd.DataFrame)
# this test is slow
path = os.path.join(cur_dir, "results", "influence_lsdiag_R.json")
with open(path, 'r') as fp:
lsdiag = json.load(fp)
c0, c1 = infl.cooks_distance # TODO: what's c1, it's pvalues? -ss
# NOTE: we get a hard-cored 5 decimals with pandas testing
assert_almost_equal(c0, lsdiag['cooks'], 14)
assert_almost_equal(infl.hat_matrix_diag, (lsdiag['hat']), 14)
assert_almost_equal(infl.resid_studentized_internal,
lsdiag['std.res'], 14)
# slow
dffits, dffth = infl.dffits
assert_almost_equal(dffits, lsdiag['dfits'], 14)
assert_almost_equal(infl.resid_studentized_external,
lsdiag['stud.res'], 14)
fn = os.path.join(cur_dir, "results", "influence_measures_R.csv")
infl_r = | pd.read_csv(fn, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
'''
データの読み込みと確認
'''
# ライブラリのインポート
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ランダムシードの設定
import random
np.random.seed(1234)
random.seed(1234)
# データの読み込み
train = | pd.read_csv('./data/train.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib
from importlib import reload
import matplotlib.pyplot as plt
import elements
elements = reload(elements)
from elements.event import Event
import os
from scipy.fft import fft, fftfreq, ifft
#%%
#meta data
meta_event = | pd.read_csv('data/meta_data.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from tqdm import tqdm
from bs4 import BeautifulSoup
import argparse, requests, glob, os, re
def main(args, url='http://archive.stsci.edu/tess/bulk_downloads/bulk_downloads_ffi-tp-lc-dv.html',
cadences=['short','fast'], columns=['files','sectors'],):
"""
Main function call to run the `ticguide` search
Parameters
----------
args : argparse.Namespace
the command line arguments
url : str
link to MAST bulk downloads page -> this should NOT need to be changed for any reason
cadences : List(str)
dlfkajd
columns: List(str)
dfjlda
"""
args.cadences, args.columns, args.url = cadences, columns, url
# Check that the input is correct first
if check_input(args):
# Retrieve massive table of observed TESS targets
df_all = check_table(args)
# Filter based on the TIC (or TICs) provided
get_info(df_all, args)
def check_input(args):
"""
Checks input arguments and returns `True` if the pipeline has enough information to run.
If an input csv is provided, this function will load in the csv and save the list of targets
to args.stars
Parameters
----------
args : argparse.Namespace
command-line arguments
args.fname : str
path to targets with updated observing information
Returns
-------
bool
"""
if not os.path.isfile(args.input):
args.input = os.path.join(args.path,args.input)
if not os.path.isfile(args.output):
args.output = os.path.join(args.path,args.output)
args.fname = os.path.join(args.path,'my_observed_tics.csv')
# If no targets are provided via CLI, check for todo file
if args.stars is None:
if os.path.exists(args.input):
if args.input.split('/')[-1].split('.')[-1] == 'csv' or args.input.split('/')[-1].split('.')[-1] == 'txt':
with open (args.input, "r") as f:
args.stars = [int(line.strip()) for line in f.readlines() if line[0].isnumeric()]
else:
print('\nERROR: Did not understand input file type. Please try again.\n')
else:
print('\nERROR: No targets were provided')
print('*** please either provide entries via command line \n or an input csv file with a list of TICs (via todo.csv) ***\n')
return False
return True
def check_table(args):
"""
Before crossmatching the observed TESS target list with personal targets of interest,
this function checks that the observed target list exists and is up-to-date (but there is
still work that needs to be done for this). The goal is to update this so that it will:
1) create and save the observed TESS targets if the file does not already exist,
2) otherwise it will check MAST for the current TESS sector and check if the column exists, and
3) add the new observing sector information if not already available. (TODO!!)
Parameters
----------
args : argparse.Namespace
command-line arguments
args.output : str
path to csv file for all observed TESS targets
Returns
-------
df : pandas.DataFrame
pandas dataframe containing all targets observed in TESS short- and fast-cadence by 'tic' id
"""
# If the table does not already exist, it will make a new one
# note: it asks this because it will take a little while to make
# ** you can dl the table from my github repo to skip this **
if not os.path.exists(args.output):
if args.verbose:
if args.progress:
print('\nCreating full observed target list:')
else:
print('\nCreating master list of all observed TESS targets\n *note: this will take a couple minutes if running for the first time')
df = make_table(args)
# If there is a local copy, it will first check if it is up-to-date,
# which is done by getting the latest sector from MAST and seeing if
# there is a column for the current sector -> STILL TODO
else:
# sector = get_current_sector()
df = pd.read_csv(args.output)
# filter_col = [col for col in df if not col.endswith('T')]
# cols = list(set([int(column[1:]) for column in filter_col]))
# if sector not in cols:
# update_table(args)
return df
def make_table(args):
"""
Creates a large dataframe for all targets observed by TESS in short- and fast-cadence
data. Given the success of TESS, we fortunately have many sectors so this step takes a few
minutes if creating the table from scratch. By default, it will save the table as a csv file
(which is currently ~150 Mb).
"""
# Get observed targets
get_observed_sectors(args)
# Combine them into one large dataframe
df = combine_sectors(args)
# Fill nan values
df = df.fillna(False)
if args.save:
df.index.name = 'tic'
df.to_csv(args.output)
df.reset_index(inplace=True)
return df
def get_observed_sectors(args, links=[]):
"""
Downloads bulk downloads scripts from MAST and iterates through these files to create
a complete list of observed TESS targets for a given sector and cadence. It will delete
the bash scripts when finished.
"""
# Start request session to webscrape
s = requests.session()
r = s.get(args.url, headers=dict(Referer=args.url))
soup = BeautifulSoup(r.content, 'html.parser')
# Iterate through links on website and append relevant ones to list
for l in soup.find_all("a", href=re.compile('lc.sh')):
links.append('%s%s'%('/'.join(args.url.split('/')[:3]),l.get('href')))
links = list(set(links))
# Save shell scripts to local directory (path)
for link in links:
response = s.get(link)
with open('%s/%s'%(args.path,link.split('/')[-1]), "wb") as file:
file.write(response.content)
# Open files to save observed targets, which will remove the script after the text file is saved
files = glob.glob('%s/*%s'%(args.path,'lc.sh'))
for file in files:
fn = '%s/sector_%s'%(args.path,file.split('/')[-1].split('_')[-2])
if 'fast' in file:
fn+='_fast'
fn += '.txt'
with open(file, "r") as sh:
lines = sh.readlines()
lines = lines[1:]
# Iterate through lines and save the tics to text
text=''
for line in lines:
text += '%d\n'%int(line.split()[5].split('-')[2])
with open(fn, "w") as f:
f.write(text)
# Remove file once completed
os.remove(file)
def combine_sectors(args, observed={}, cols=[], all_tic=[],):
"""
Combines observed target lists by sectors and cadences into one large list/table. For now,
it iterates by file so it does not open up multiple files for each target. I am unsure if this
is the most efficient way to do this (so TBD).
Parameters
----------
args : argparse.Namespace
command-line arguments
observed : dict
observed TESS target list container
cols : List(str)
array of column names for the final observed table
all_tic : List(int)
complete list of observed tics over all cadences -> which become the indices for the final observed table
"""
# Make dictionary to save file information for all current sectors+cadences
for cadence in args.cadences:
observed[cadence]={}
for column in args.columns:
observed[cadence][column]=[]
files = glob.glob('%s/*.txt'%args.path)
for file in files:
if 'fast' in file:
idx='fast'
else:
idx='short'
observed[idx]['files'].append(file)
observed[idx]['sectors'].append(int(file.split('/')[-1].split('.')[0].split('_')[1]))
# Iterate through files and add up n_sectors per tic per cadence
for cadence in observed:
cols += ['%s%03d'%(cadence[0].upper(),sector) for sector in sorted(observed[cadence]['sectors'])]
series={}
for file in observed[cadence]['files']:
with open(file,"r") as f:
lines = f.readlines()
tics = [int(line.strip()) for line in lines]
for tic in tics:
if tic not in series:
series[tic]=1
else:
series[tic]+=1
# Make csv with totals for each cadence
s = pd.Series(series, name='n_sectors')
s.index.name = 'tic'
s.sort_values(ascending=False, inplace=True)
if args.save and args.total:
s.to_csv('%s/totals_%s.csv'%(args.path,cadence))
all_tic += s.index.values.tolist()
# Make large csv with all observed targets per sector per cadence
tics = sorted(list(set(all_tic)))
df = pd.DataFrame(index=tics, columns=cols)
# I think easiest way for now is to search by file, so we aren't opening 20+ files per target entry (but TBD)
files = glob.glob('%s/*.txt'%args.path)
if args.verbose and args.progress:
pbar = tqdm(total=len(files))
for file in files:
sector=int(file.split('/')[-1].split('.')[0].split('_')[1])
if 'fast' in file:
cadence='fast'
else:
cadence='short'
column = '%s%03d'%(cadence[0].upper(),sector)
with open(file,"r") as f:
lines = f.readlines()
tics = [int(line.strip()) for line in lines]
for tic in tics:
df.loc[tic,column]=True
os.remove(file)
if args.verbose and args.progress:
pbar.update(1)
if args.verbose and args.progress:
pbar.close()
return df
def add_target_totals(df, args, reorder=[], d={}):
"""
Adds
"""
# add total number of sectors per target per cadence
# add part to double check this with the other csvs
for cadence in args.cadences:
d.update({'%sTOT'%cadence[0].upper():'int64'})
filter_col = [col for col in df if col.startswith(cadence[0].upper())]
filter_df = df[filter_col]
for index in df.index.values.tolist():
df.loc[index,'%sTOT'%cadence[0].upper()] = int(filter_df.loc[index].sum())
# reorder columns so that cadences are displayed together
filter_col = [col for col in df if col.startswith(cadence[0].upper())]
df_temp = df[filter_col]
reorder += sorted(df_temp.columns.values.tolist())
# reorder columns of final dataframe
df_final = | pd.DataFrame(columns=reorder) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 20:46:16 2019
@author: xiazizhe
"""
import pandas as pd
import numpy as np
from itertools import product
from functools import partial
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_squared_error
from pathlib import Path
import multiprocessing
import environment
def regression(df, indeps, deps):
m = LinearRegression()
X = df[indeps].values
y = df[deps].values
m.fit(X,y)
pred = m.predict(X)
mse = mean_squared_error(pred, y)
return [m.coef_, m.intercept_, mse, m.score(X,y)]
def get_manhatten_distance(df, birthplace):
df['manhatten_distance'] = (df['pickup_longitude_bin'] - birthplace[0]).apply(np.abs)\
+ (df['pickup_latitude_bin'] - birthplace[1]).apply(np.abs)
return df
def backride_reg(file, birthplace):
dtypes = {'reward':float,
'trip_distance':float,
'trip_time_in_secs':int,
'passenger_count':int,
'month':int,
'day':int,
'time':int,
'pickup_longitude_bin':int,
'pickup_latitude_bin':int,
'dropoff_longitude_bin':int,
'dropoff_latitude_bin':int}
df = pd.read_csv(file,dtype=dtypes)
df = get_manhatten_distance(df, birthplace)
reg_time = regression(df, ['manhatten_distance'], ['trip_time_in_secs'])
reg_distance = regression(df, ['manhatten_distance'], ['trip_distance'])
return reg_time, reg_distance
def backride_analysis(data_dir, birthplace):
regs_time, regs_distance = [], []
for f in Path(data_dir).glob('*'):
reg_time, reg_distance = backride_reg(f, birthplace)
regs_time.append(reg_time)
regs_distance.append(reg_distance)
regs_time = pd.DataFrame(regs_time, columns=['coef_mdist','intercept','mse','r2'])
regs_distance = pd.DataFrame(regs_distance, columns=['coef_mdist','intercept','mse','r2'])
return regs_time, regs_distance
def get_traffic_at_times(location, agent):
agent._location = location
_ = agent.reset()
traffic_at_times = agent.get_traffic_at_times()
return traffic_at_times
if __name__ == '__main__':
output_dir = './../output/'
data_dir = './../input/'
##### Predict back ride time and distance
# birthplace = (165,1356)
# regs_time, regs_distance = backride_analysis(data_dir, birthplace)
# regs_time.to_csv(output_dir+'time_on_mdist.csv', index=False)
# regs_distance.to_csv(output_dir+'distance_on_mdist.csv', index=False)
##### check location / time traffic status
driver = environment.NYCTaxiEnv()
### traffic at all times at some location
locations = [(165,1356)] + [(100*(i-1), 900+100*i) for i,j in product(range(10),range(10))]
traffic = []
# Single process
# for location in locations:
# driver._location = location
# _ = driver.reset()
# traffic_at_times = driver.get_traffic_at_times()
# traffic.append(traffic_at_times)
# traffic_df = pd.concat(traffic, axis=1)
# Multi process
p = multiprocessing.Pool(processes=12)
f = partial(get_traffic_at_times, agent=driver)
traffic = p.map(f, locations)
traffic_df = | pd.concat(traffic, axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import requests
from fake_useragent import UserAgent
import io
import os
import time
import json
import demjson
from datetime import datetime
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Main Economic Indicators: https://alfred.stlouisfed.org/release?rid=205
url = {
"fred_econ": "https://fred.stlouisfed.org/graph/fredgraph.csv?",
"philfed": "https://www.philadelphiafed.org/surveys-and-data/real-time-data-research/",
"chicagofed": "https://www.chicagofed.org/~/media/publications/",
"OECD": "https://stats.oecd.org/sdmx-json/data/DP_LIVE/"
}
def date_transform(df, format_origin, format_after):
return_list = []
for i in range(0, len(df)):
return_list.append(datetime.strptime(df[i], format_origin).strftime(format_after))
return return_list
def gdp_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: <NAME>omestic Product
Description: Billions of Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDP",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "GDP"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["GDP"] = df["GDP"].astype(float)
return df
def gdpc1_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDPC1",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def oecd_gdp_monthly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USALORSGPNOSTSAM",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def payems_monthly(startdate="1939-01-01", enddate="2021-01-01"):
"""
Full Name: All Employees, Total Nonfarm
Description: Thousands of Persons,Seasonally Adjusted, Monthly
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "PAYEMS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "Payems"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["Payems"] = df["Payems"].astype(float)
return df
def ppi():
tmp_url = url["fred_econ"] + "bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=968&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=PPIACO,PCUOMFGOMFG&scale=left,left&cosd=1913-01-01,1984-12-01&coed=2021-04-01,2021-04-01&line_color=%234572a7,%23aa4643&link_values=false,false&line_style=solid,solid&mark_type=none,none&mw=3,3&lw=2,2&ost=-99999,-99999&oet=99999,99999&mma=0,0&fml=a,a&fq=Monthly,Monthly&fam=avg,avg&fgst=lin,lin&fgsnd=2020-02-01,2020-02-01&line_index=1,2&transformation=lin,lin&vintage_date=2021-06-10,2021-06-10&revision_date=2021-06-10,2021-06-10&nd=1913-01-01,1984-12-01"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
r = requests.get(tmp_url, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df["DATE"] = | pd.to_datetime(df["DATE"], format="%Y-%m-%d") | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
data_proc.py
Calculate and plot analyzed data from centrifugation experiment
Handles the primary functions
"""
import sys
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
SUCCESS = 0
INVALID_DATA = 1
IO_ERROR = 2
DEF_CSV_FILE = 'data.csv'
DEF_EXCEL_FILE = 'data.xlsx'
def warning(*objs):
"""Writes a message to stderr."""
print("WARNING: ", *objs, file=sys.stderr)
def csv_data_analysis(csv_data_file):
"""
Calculates solvent concentration. Finds aging time and g dried cake/g oil for each row
Parameters
----------
csv_data_file : excel file containing array of experiment data
first row: solution information
second row and below: each row contains data from a run of centrifugation
(see README.md for more detailed description)
Returns
-------
cent_data : numpy array
first row: solvent concentration
second row and below: each row contains columns of aging time (h), aging time (s),
dried cake concentration (g/g oil)
"""
data_array = pd.read_csv(csv_data_file, comment='#', header=None)
# calculate solvent concentration
solvent_conc = np.divide(np.subtract(data_array.loc[[0], [3]], data_array.loc[[0], [2]]),
np.subtract(data_array.loc[[0], [3]], data_array.loc[[0], [1]]))
# find the start time of the experiment
start_time = data_array.loc[[0], [0]].values
# gather centrifugation data into separate arrays
expt_array = data_array[[1, 2, 3]].iloc[1:]
cent_time = data_array[[0]].iloc[1:]
# assign variables to each column of expt_array
empty_tube = expt_array[1]
tube_liquid = expt_array[2]
tube_dried_cake = expt_array[3]
# calculate mass of tube contents
mass_liquid = tube_liquid - empty_tube
mass_dried_cake = tube_dried_cake - empty_tube
mass_oil = (1-solvent_conc.iloc[0][3])*mass_liquid
# calculate solution aging time at each centrifugation
aging_time = [
pd.to_datetime(cent_time.values[i, 0])-pd.to_datetime(start_time[0, 0])
for i in range(len(cent_time.values))
]
aging_time_sec = pd.Series(aging_time).dt.total_seconds()
aging_time_hrs = aging_time_sec/3600
# calculate dried cake concentration
conc_dried_cake = mass_dried_cake/mass_oil
cent_data = pd.concat([aging_time_hrs, aging_time_sec, conc_dried_cake.reset_index(drop=True)], axis=1)
return cent_data
def excel_data_analysis(excel_data_file):
"""
Calculates solvent concentration. Finds aging time in hrs and seconds, and g dried cake/g oil for each row.
Works for excel file with multiple sheets
Parameters
----------
excel_data_file : excel file containing array of experiment data
first row: solution information
second row and below: each row contains data from a run of centrifugation
(see README.md for more detailed description)
data from an experiment in each sheet
Returns
-------
cent_data : pandas DataFrame
first row: solvent concentration
second row and below: each row contains columns of aging time (h), aging time (s),
dried cake concentration (g/g oil)
"""
i = 0
frame = None
# Concatenate analyzed data from each sheet in excel file
while i >= 0:
try:
calc_data = calcAndConc(excel_data_file, i)
frame = pd.concat([frame, calc_data], axis=1)
i = i + 1
except:
break
cent_data = frame
return cent_data
def calcAndConc(excel_data_file, i):
"""
Calculates solvent concentration. Finds aging time in hrs and seconds, and g dried cake/g oil for each row.
:param excel_data_file: excel file to read data from
:param i: sheet number of excel file data will be pulled from
:return: pandas DataFrame of aging time (hrs), aging time (sec), and dried cake conc (g/g oil) from data set
of sheet i
"""
all_data = pd.read_excel(excel_data_file, sheet_name=i)
# Separate solvent addition data
solvent_add_data = all_data.iloc[[0], [0, 1, 2, 3]]
# Separate centrifugation data
data_array = all_data.iloc[:, 4:8].dropna(0)
# Calculate solvent concentration
solvent_conc = np.divide(np.subtract(solvent_add_data.values[0, 3], solvent_add_data.values[0, 2]),
np.subtract(solvent_add_data.values[0, 3], solvent_add_data.values[0, 1]))
# find the start time of the experiment
start_time = solvent_add_data.values[0, 0]
# gather centrifugation data into separate arrays
expt_array = data_array.iloc[:, 1:4]
cent_time = data_array.iloc[:, 0]
# assign variables to each column of expt_array
empty_tube = expt_array.values[:, 0]
tube_liquid = expt_array.values[:, 1]
tube_dried_cake = expt_array.values[:, 2]
# calculate mass of tube contents
mass_liquid = tube_liquid - empty_tube
mass_dried_cake = tube_dried_cake - empty_tube
mass_oil = (1-solvent_conc)*mass_liquid
# calculate solution aging time at each centrifugation
aging_time = cent_time - start_time
aging_time_sec = pd.Series(aging_time).dt.total_seconds()
aging_time_hrs = aging_time_sec/3600
# calculate dried cake concentration
conc_dried_cake = pd.Series(mass_dried_cake/mass_oil)
cent_data = pd.concat([aging_time_hrs, aging_time_sec, conc_dried_cake], axis=1)
cent_data.columns = [0, 1, 2]
return cent_data
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser()
# add csv file as argument
parser.add_argument("-c", "--csv_data_file", help="Location of csv file with data "
"from a single experiment to be analyzed",
default=DEF_CSV_FILE)
parser.add_argument("-e", "--excel_data_file", help="Location of excel file with data "
"from multiple experiments to be analyzed",
default=DEF_EXCEL_FILE)
args = parser.parse_args(argv)
if args.excel_data_file == DEF_EXCEL_FILE:
try:
args.csv_data = | pd.read_csv(args.csv_data_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas import Index
from pandas.compat import long, u, PY2
class TestInference(tm.TestCase):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_isinf_scalar(self):
# GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse(lib.isneginf_scalar(np.inf))
self.assertFalse(lib.isneginf_scalar(1))
self.assertFalse(lib.isneginf_scalar('a'))
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assertRaisesRegexp(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
for coerce_type in (True, False):
out = lib.maybe_convert_numeric(data, nan_values, coerce_type)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
self.assertTrue(np.all(np.isnan(result)))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
class TestTypeInference(tm.TestCase):
_multiprocess_can_split_ = True
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
self.assertEqual(result, 'integer')
result = lib.infer_dtype([])
self.assertEqual(result, 'empty')
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
def test_string(self):
pass
def test_unicode(self):
pass
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'datetime64')
def test_date(self):
dates = [date(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'date')
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
class TestConvert(tm.TestCase):
def test_convert_objects(self):
arr = np.array(['a', 'b', np.nan, np.nan, 'd', 'e', 'f'], dtype='O')
result = lib.maybe_convert_objects(arr)
self.assertTrue(result.dtype == np.object_)
def test_convert_objects_ints(self):
# test that we can detect many kinds of integers
dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
for dtype_str in dtypes:
arr = np.array(list(np.arange(20, dtype=dtype_str)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype_str))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.integer))
def test_convert_objects_complex_number(self):
for dtype in np.sctypes['complex']:
arr = np.array(list(1j * np.arange(20, dtype=dtype)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.complexfloating))
class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
self.assertTrue(lib.isscalar(None))
self.assertTrue(lib.isscalar(True))
self.assertTrue(lib.isscalar(False))
self.assertTrue(lib.isscalar(0.))
self.assertTrue(lib.isscalar(np.nan))
self.assertTrue(lib.isscalar('foobar'))
self.assertTrue(lib.isscalar(b'foobar'))
self.assertTrue(lib.isscalar(u('efoobar')))
self.assertTrue(lib.isscalar(datetime(2014, 1, 1)))
self.assertTrue(lib.isscalar(date(2014, 1, 1)))
self.assertTrue(lib.isscalar(time(12, 0)))
self.assertTrue(lib.isscalar(timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.NaT))
def test_isscalar_builtin_nonscalars(self):
self.assertFalse(lib.isscalar({}))
self.assertFalse(lib.isscalar([]))
self.assertFalse(lib.isscalar([1]))
self.assertFalse(lib.isscalar(()))
self.assertFalse(lib.isscalar((1, )))
self.assertFalse(lib.isscalar(slice(None)))
self.assertFalse(lib.isscalar(Ellipsis))
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(lib.isscalar(np.int64(1)))
self.assertTrue(lib.isscalar(np.float64(1.)))
self.assertTrue(lib.isscalar(np.int32(1)))
self.assertTrue(lib.isscalar(np.object_('foobar')))
self.assertTrue(lib.isscalar(np.str_('foobar')))
self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
self.assertFalse(lib.isscalar(zerodim))
self.assertTrue(lib.isscalar(lib.item_from_zerodim(zerodim)))
def test_isscalar_numpy_arrays(self):
self.assertFalse(lib.isscalar(np.array([])))
self.assertFalse(lib.isscalar(np.array([[]])))
self.assertFalse(lib.isscalar(np.matrix('1; 2')))
def test_isscalar_pandas_scalars(self):
self.assertTrue(lib.isscalar(pd.Timestamp('2014-01-01')))
self.assertTrue(lib.isscalar(pd.Timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.Period('2014-01-01')))
def test_lisscalar_pandas_containers(self):
self.assertFalse(lib.isscalar(pd.Series()))
self.assertFalse(lib.isscalar( | pd.Series([1]) | pandas.Series |
import pandas as pd
import numpy as np
# from pandas.core.tools.datetimes import normalize_date
from pandas._libs import tslib
from backend.robinhood_api import RobinhoodAPI
class RobinhoodData:
"""
Wrapper to download orders and dividends from Robinhood accounts
Downloads two dataframes and saves to datafile
----------
Parameters:
datafile : location of h5 datafile
"""
def __init__(self, datafile):
self.datafile = datafile
def _login(self, user, password):
self.client = RobinhoodAPI()
# try import the module with passwords
try:
_temp = __import__('auth')
self.client.login(_temp.local_user, _temp.local_password)
except:
self.client.login(username=user, password=password)
return self
# private method for getting all orders
def _fetch_json_by_url(self, url):
return self.client.session.get(url).json()
# deleting sensitive or redundant fields
def _delete_sensitive_fields(self, df):
for col in ['account', 'url', 'id', 'instrument']:
if col in df:
del df[col]
return df
# download orders and fields requiring RB client
def _download_orders(self):
print("Downloading orders from Robinhood")
orders = []
past_orders = self.client.order_history()
orders.extend(past_orders['results'])
while past_orders['next']:
next_url = past_orders['next']
past_orders = self._fetch_json_by_url(next_url)
orders.extend(past_orders['results'])
df = pd.DataFrame(orders)
df['symbol'] = df['instrument'].apply(
self.client.get_symbol_by_instrument)
df.sort_values(by='created_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_ord = self._delete_sensitive_fields(df)
return df_ord
# download dividends and fields requiring RB client
def _download_dividends(self):
print("Downloading dividends from Robinhood")
dividends = self.client.dividends()
dividends = [x for x in dividends['results']]
df = pd.DataFrame(dividends)
if df.shape[0] > 0:
df['symbol'] = df['instrument'].apply(
self.client.get_symbol_by_instrument)
df.sort_values(by='paid_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_div = self._delete_sensitive_fields(df)
else:
df_div = pd.DataFrame(columns=['symbol', 'amount', 'position',
'rate', 'paid_at', 'payable_date'])
return df_div
# process orders
def _process_orders(self, df_ord):
# assign to df and reduce the number of fields
df = df_ord.copy()
fields = [
'created_at',
'average_price', 'cumulative_quantity', 'fees',
'symbol', 'side']
df = df[fields]
# convert types
for field in ['average_price', 'cumulative_quantity', 'fees']:
df[field] = | pd.to_numeric(df[field]) | pandas.to_numeric |
from collections import defaultdict
from ..apps.clashfilter import df_ideal_ala, rel_coords_dict, Clash, ClashVDM, make_pose_df, \
backbone_str, Contact, make_df_corr, VdmReps
import pickle
import numpy as np
import pandas as pd
from ..apps.transformation import get_rot_trans
from prody import calcPhi, calcPsi, writePDB, AtomGroup
from sklearn.neighbors import NearestNeighbors
from ..apps.convex_hull import AlphaHull
from numba import jit
import time
import os
import copy
import random
import itertools
from scipy.spatial.distance import cdist
coords = ['c_x', 'c_y', 'c_z', 'c_D_x', 'c_D_y',
'c_D_z', 'c_H1_x', 'c_H1_y', 'c_H1_z',
'c_H2_x', 'c_H2_y', 'c_H2_z',
'c_H3_x', 'c_H3_y', 'c_H3_z',
'c_H4_x', 'c_H4_y', 'c_H4_z',
'c_A1_x', 'c_A1_y', 'c_A1_z',
'c_A2_x', 'c_A2_y', 'c_A2_z']
class Template:
def __init__(self, pdb):
self.pdb = pdb # pdb should be prody object poly-gly with CA hydrogens for design.
self.dataframe = make_pose_df(self.pdb)
self.alpha_hull = None
@staticmethod
def get_bb_sel(pdb):
return pdb.select(backbone_str).copy()
def get_phi_psi(self, seg, chain, resnum):
res = self.pdb[seg, chain, resnum]
try:
phi = calcPhi(res)
except ValueError:
phi = None
try:
psi = calcPsi(res)
except ValueError:
psi = None
return phi, psi
def set_alpha_hull(self, pdb_w_CB, alpha=9):
self.pdb_w_CB = pdb_w_CB
self.alpha_hull = AlphaHull(alpha)
self.alpha_hull.set_coords(pdb_w_CB)
self.alpha_hull.calc_hull()
class Load:
"""Doesn't yet deal with terminal residues (although phi/psi does)"""
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.path = kwargs.get('path', './') # path to sig reps
self.sequence_csts = kwargs.get('sequence_csts') # keys1 are tuples (seq, ch, #), keys2 are label,
# vals are allowed residue names (three letter code).
self.dataframe = pd.DataFrame()
self.dataframe_grouped = None
self._rot = defaultdict(dict)
self._mobile_com = defaultdict(dict)
self._target_com = defaultdict(dict)
self._sig_reps = defaultdict(dict)
self._ideal_ala_df = defaultdict(dict)
self._nonclashing = list()
self.remove_from_df = kwargs.get('remove_from_df') # e.g. {1: {'chain': 'Y', 'name': 'CB', 'resname': 'ASN'},
# 2: {'chain': 'Y', 'name': 'CG', 'resname': 'GLN'}}
@staticmethod
def _get_targ_coords(template, label, seg, chain, resnum):
sel_str = 'segment ' + seg + ' chain ' + chain + ' resnum ' + str(resnum) + ' name '
cs = []
for n in rel_coords_dict[label]:
try:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
except AttributeError:
try:
cs = []
for n in ['N', '1H', 'CA']:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
return np.stack(cs)
except AttributeError:
try:
cs = []
for n in ['N', 'H1', 'CA']:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
return np.stack(cs)
except AttributeError:
sel_str = 'chain ' + chain + ' resnum ' + str(resnum) + ' name '
cs = []
for n in rel_coords_dict[label]:
try:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
except AttributeError:
cs = []
for n in ['N', '1H', 'CA']:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
return np.stack(cs)
return np.stack(cs)
return np.stack(cs)
@staticmethod
def _get_mob_coords(df, label):
return np.stack(df[df['name'] == n][['c_x', 'c_y', 'c_z']].values.flatten()
for n in rel_coords_dict[label])
def set_rot_trans(self, template):
for seg, chain, resnum in self.sequence_csts.keys():
for label, df in df_ideal_ala.items():
mob_coords = self._get_mob_coords(df, label)
targ_coords = self._get_targ_coords(template, label, seg, chain, resnum)
R, m_com, t_com = get_rot_trans(mob_coords, targ_coords)
self._rot[label][(seg, chain, resnum)] = R
self._mobile_com[label][(seg, chain, resnum)] = m_com
self._target_com[label][(seg, chain, resnum)] = t_com
df_ = df.copy()
df_[['c_x', 'c_y', 'c_z']] = np.dot(df_[['c_x', 'c_y', 'c_z']] - m_com, R) + t_com
self._ideal_ala_df[label][(seg, chain, resnum)] = df_
def _import_sig_reps(self):
labels_resns = defaultdict(set)
for tup in self.sequence_csts.keys():
for label in self.sequence_csts[tup].keys():
labels_resns[label] |= set(self.sequence_csts[tup][label])
for label in labels_resns.keys():
for resn in labels_resns[label]:
try:
with open(self.path + label + '/' + resn + '.pkl', 'rb') as infile:
self._sig_reps[label][resn] = pickle.load(infile)
except FileNotFoundError:
pass
@staticmethod
def _get_phi_psi_df(df, phi, psi, phipsi_width=60):
if phi is not None:
phi_high = df['phi'] < (phi + (phipsi_width / 2))
phi_low = df['phi'] > (phi - (phipsi_width / 2))
else:
phi_high = np.array([True] * len(df))
phi_low = phi_high
if psi is not None:
psi_high = df['psi'] < (psi + (phipsi_width / 2))
psi_low = df['psi'] > (psi - (phipsi_width / 2))
else:
psi_high = np.array([True] * len(df))
psi_low = psi_high
return df[phi_high & phi_low & psi_high & psi_low]
@staticmethod
def chunk_df(df_gr, gr_chunk_size=100):
grs = list()
for i, (n, gr) in enumerate(df_gr):
grs.append(gr)
if (i + 1) % gr_chunk_size == 0:
yield | pd.concat(grs) | pandas.concat |
import operator
import os
import networkx as nx
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from .constants import NUMBER_OF_BINS
from .largest_connected_component import largest_weakly_connected_component
from .logger import logger
def get_travel_times(portcalls: pd.DataFrame) -> np.ndarray:
"""Return an array containing all travel times in seconds for all ships."""
cols = ('ship', 'arrival', 'departure')
assert all([col in portcalls.columns for col in cols])
return (
portcalls
.groupby('ship')
.apply(lambda x: (x.arrival - x.departure.shift(1)).dt.seconds // 60)
.dropna()
.values
)
def get_port_stays(portcalls: pd.DataFrame) -> np.ndarray:
"""Return an array containing for each portcall how long the ship was in the
port."""
df = ((portcalls['departure'] - portcalls['arrival']).dt.seconds // 60)
return df.values
def hist(x: np.ndarray, bins: int) -> np.ndarray:
"""Return the values of the histogram."""
return np.histogram(x, bins)[0]
def get_features(
portcalls_network: pd.DataFrame,
portcalls_classification: pd.DataFrame,
network: nx.DiGraph,
number_of_bins: int = NUMBER_OF_BINS) -> pd.DataFrame:
"""Do the entire feature engineering. These features serve as input for the
fair random forest classifier. It models behaviour of the ships, which is
derived from the portcall data.
"""
def div(x, y):
"""Same as x / y, except that it handles the division by zero."""
large_int = 2000 # Largest encountered value is 1184
if isinstance(x, pd.Series):
return (x / y).replace({np.inf: large_int, np.nan: 0})
else:
if x == 0:
return 0
elif y != 0:
return x / y
else:
return large_int
# Get the following node measures:
# - degree
# - in-degree
# - out-degree
# - strength
# - in-strength
# - out-strength
# - closeness centrality (weighted and unweighted)
# - betweenness centrality (weighted and unweighted)
# - eigenvector centrality (weighted and unweighted)
network = largest_weakly_connected_component(network)
node_dict = dict(network.nodes(data=True))
node_measures = | pd.DataFrame.from_dict(node_dict, orient='index') | pandas.DataFrame.from_dict |
import argparse
from datetime import datetime
import pandas as pd
import re
import os
from tabulate import tabulate
from ast import literal_eval
import numpy as np
def init_data(
) -> pd.DataFrame:
"""
Return
-------
Plan: pd.DataFrame. Item of the planner
Notes
-------
Reads the plan from the file in "pwd/../data/data.csv" and initialise it
into the plan pandas DataFrame. If either "data" folder or "data.csv" or both
of them do not exist, it creates this file.
"""
# Features of the plan
features = ["title", "note", "date", "tags"]
# Initialise the plan as dataframe object
plan = | pd.DataFrame(columns=features) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
class TestSeriesInterpolateData(TestData, tm.TestCase):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = | Series([1, 3., 6.769231, 12., 18.230769, 25.]) | pandas.Series |
from Modules.FileManager import FileManager, ProjectFileManager
from PIL import Image
from PIL import ImageEnhance
import matplotlib.pyplot as plt
# These disable some of the default key strokes that we will use
plt.rcParams['keymap.all_axes'] = '' # a
plt.rcParams['keymap.back'] = ['left', 'backspace', 'MouseButton.BACK'] # c
plt.rcParams['keymap.pan'] = '' # p
plt.rcParams['keymap.quit'] = ['ctrl+w', 'cmd+w']
# Import buttons that we will use to make this interactive
from matplotlib.widgets import Button
from matplotlib.widgets import RadioButtons
from matplotlib.widgets import RectangleSelector
from matplotlib.widgets import Slider
# Import some patches that we will use to display the annotations
from matplotlib.patches import Rectangle
from matplotlib.colors import Normalize
import pdb, datetime, os, subprocess, argparse, random
import pandas as pd
class Annotation:
def __init__(self, other):
self.other = other
self.sex = ''
self.coords = ()
self.poses = ()
self.rectangle = None
def addRectangle(self):
if self.rectangle is None:
self.rectangle = Rectangle((self.coords[0], self.coords[1]), self.coords[2], self.coords[3],
fill=False, edgecolor='green', linewidth=1.4, figure=self.other.fig)
self.other.ax_image.add_patch(self.rectangle)
self.other.cur_text.set_text('BB: ' + str(self.coords))
else:
self.other.error_text.set_text('Error: Rectangle already exists')
def removePatches(self):
if self.lastRectangle is None:
self.other.error_text.set_text('Cant remove annotation. Reset frame instead')
self.other.fig.canvas.draw()
return False
try:
self.other.ax_image.patches.remove(self.lastRectangle)
except ValueError:
pass
return True
def retRow(self):
if self.coords == ():
return 'Must create bounding box before saving an annotation'
return [self.other.pid, self.other.frames[self.other.frame_index], self.sex, self.coords, self.other.user,
self.other.now]
def reset(self):
self.sex = ''
self.coords = ()
self.poses = ()
self.lastRectangle = self.rectangle
self.rectangle = None
class Annotator:
def __init__(self, project_file_manager: ProjectFileManager, number=10, dry=False):
self.pfm = project_file_manager
self.image_dir = self.pfm.local_paths['image_dir']
self.labeled_frames_csv = self.pfm.local_paths['labeled_frames_csv']
self.boxed_fish_csv = self.pfm.local_paths['boxed_fish_csv']
self.number = number
self.pid = self.pfm.pid
self.dry = dry
self.frames = sorted([x for x in os.listdir(self.image_dir) if '.jpg' in x and '._' not in x])
random.Random(4).shuffle(self.frames)
# self.frames = sorted([x for x in os.listdir(self.frameDirectory) if '.jpg' in x and '._' not in x]) # remove annoying mac OSX files
assert len(self.frames) > 0
# Keep track of the frame we are on and how many we have annotated
self.frame_index = 0
self.annotated_frames = []
# Intialize lists to hold annotated objects
self.coords = ()
# Create dataframe to hold annotations
if os.path.exists(self.labeled_frames_csv):
self.dt = pd.read_csv(self.labeled_frames_csv, index_col=0)
else:
self.dt = pd.DataFrame(columns=['ProjectID', 'Framefile', 'Nfish', 'Sex', 'Box', 'User', 'DateTime'])
self.f_dt = pd.DataFrame(columns=['ProjectID', 'Framefile', 'Sex', 'Box', 'User', 'DateTime'])
# Get user and current time
self.user = os.getenv('USER')
self.now = datetime.datetime.now()
# Create Annotation object
self.annotation = Annotation(self)
#
self.annotation_text = ''
# Start figure
self._createFigure()
# if not a dry run, upload results
if not dry:
self._upload_results()
def _upload_results(self):
labeled_frames_df = | pd.read_csv(self.labeled_frames_csv, index_col=0) | pandas.read_csv |
import pandas as pd
import sqlalchemy as db
import configparser
import logging
from logging.config import fileConfig
# Configs
config = configparser.ConfigParser()
config.read('conf/.env')
fileConfig('conf/logging_config.ini')
logger = logging.getLogger()
# Database connection URI
db_engine = db.create_engine(
"mysql+pymysql://{}:{}@{}:{}/{}".format(config['database']['user'],
config['database']['password'],
config['database']['host'],
config['database']['port'],
config['database']['db']))
# Data warehouse connection URI
dw_engine = db.create_engine(
"mysql+pymysql://{}:{}@{}:{}/{}".format(config['data-warehouse']['user'],
config['data-warehouse']['password'],
config['data-warehouse']['host'],
config['data-warehouse']['port'],
config['data-warehouse']['db']))
def get_factSales_last_id(db_engine):
"""Function to get last sales_key from fact table `factSales`"""
query = "SELECT max(sales_key) AS last_id FROM factSales"
tdf = | pd.read_sql(query, db_engine) | pandas.read_sql |
#!/usr/bin/env python3
# Author: <NAME> (<EMAIL>)
# License: BSD-3-Clause
import multiprocessing
import time
import os
import sys
import re
import logging
import argparse
import tarfile
from ztflc import forcephotometry
from ztflc.io import LOCALDATA
import numpy as np
import ztfquery
import pandas as pd
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.utils.console import ProgressBar
import requests.exceptions
from ztffps import database, credentials
from ztffps.thumbnails import generate_thumbnails
from ztffps.utils import calculate_magnitudes
try:
ZTFDATA = os.getenv("ZTFDATA")
FORCEPHOTODATA = os.path.join(ZTFDATA, "forcephotometry")
except (TypeError, NameError):
print(
"You have to export the environment variable ZTFDATA in your bash profile; e.g. export ZTFDATA='ABSOLUTE/PATH/TO/ZTFDATA/'\nNote the trailing slash is important!"
)
# Define servers
MAILSERVER = "smtp-auth.desy.de"
MAILPORT = 587
# Define and create directories
METADATA = os.path.join(FORCEPHOTODATA, "meta")
COSMODATA = os.path.join(ZTFDATA, "cosmology")
MARSHALDATA = os.path.join(ZTFDATA, "marshal")
SALTDATA = os.path.join(FORCEPHOTODATA, "salt")
PLOTDATA = os.path.join(FORCEPHOTODATA, "plots")
PLOT_DATAFRAMES = os.path.join(PLOTDATA, "dataframes")
THUMBNAILS = os.path.join(PLOTDATA, "thumbnails")
for path in [
METADATA,
COSMODATA,
MARSHALDATA,
SALTDATA,
PLOTDATA,
PLOT_DATAFRAMES,
THUMBNAILS,
]:
if not os.path.exists(path):
os.makedirs(path)
class ForcedPhotometryPipeline:
""" """
def __init__(
self,
file_or_name=None,
daysago=None,
daysuntil=None,
jdmin=None,
jdmax=None,
snt=5.0,
mag_range=None,
flux_range=None,
ra=None,
dec=None,
nprocess=4,
reprocess=False,
sciimg=False,
update_enforce=False,
update_disable=False,
ampel=False,
download_newest=True,
filecheck=False,
verbose=False,
logger=None,
):
self.startime = time.time()
if logger is None:
self.logger = logging.getLogger("pipeline")
else:
self.logger = logger
# check for IRSA credentials
_, _ = credentials.get_user_and_password("<PASSWORD>")
if file_or_name is None:
self.logger.error(
"You have to initialize this class with at least one name of a ZTF object for which to perform forced photometry a textfile containing one ZTF name per line or an arbitrary name if the -radec option is chosen."
)
else:
self.file_or_name = file_or_name
if not self.logger.hasHandlers():
logFormatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s"
)
fileHandler = logging.FileHandler("./log")
fileHandler.setFormatter(logFormatter)
self.logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
self.logger.addHandler(consoleHandler)
self.logger.setLevel(logging.INFO)
self.daysago = daysago
self.daysuntil = daysuntil
self.jdmin = jdmin
self.jdmax = jdmax
self.snt = snt
self.mag_range = mag_range
self.flux_range = flux_range
self.reprocess = reprocess
self.nprocess = nprocess
self.sciimg = sciimg
self.update_enforce = update_enforce
self.update_disable = update_disable
self.ampel = ampel
self.download_newest = download_newest
self.verbose = verbose
self.filecheck = filecheck
if self.jdmin or self.jdmax:
self.convert_jd_to_days()
else:
self.convert_daysago_to_jd()
# parse different formats of ra and dec
if ra is not None and dec is not None:
if str(ra)[2] == ":" or str(ra)[2] == "h":
coords = SkyCoord(f"{ra} {dec}", unit=(u.hourangle, u.deg))
else:
coords = SkyCoord(f"{ra} {dec}", unit=u.deg)
self.ra = np.float(
coords.ra.to_string(decimal=True, unit=u.deg, precision=8)
)
self.dec = np.float(
coords.dec.to_string(decimal=True, unit=u.deg, precision=8)
)
if isinstance(self.file_or_name, list):
self.object_list = self.file_or_name
else:
self.object_list = [self.file_or_name]
self.update_database_with_given_radec()
elif (ra is None and dec is not None) or (ra is not None and dec is None):
self.logger.info("Either both set ra and dec or none.")
raise ValueError
else:
self.ra = None
self.dec = None
if isinstance(self.file_or_name, str):
self.use_if_ztf()
elif isinstance(self.file_or_name, list):
self.object_list = self.file_or_name
else:
raise TypeError
self.check_for_duplicates()
if not self.update_disable:
self.get_position_and_timerange()
self.check_if_present_in_metadata()
def is_ztf_name(self, name):
"""
Checks if a string adheres to the ZTF naming scheme
"""
return re.match("^ZTF[1-2]\d[a-z]{7}$", name)
def convert_daysago_to_jd(self):
"""
Converts days since now and daysuntil to Julian dates
"""
now = Time(time.time(), format="unix", scale="utc").jd
if self.daysago is None:
self.jdmin = 2458100
else:
self.jdmin = now - self.daysago
if self.daysuntil is None:
self.jdmax = now
else:
self.jdmax = now - self.daysuntil
def convert_jd_to_days(self):
"""
Converts jdmin and jdmax to integers (daysfromnow and daysuntil)
"""
now = Time(time.time(), format="unix", scale="utc").jd
if self.jdmin:
self.daysago = now - self.jdmin
else:
self.daysago = now - 2458100
if self.jdmax:
self.daysuntil = now - self.jdmax
else:
self.jdmax = now
def use_if_ztf(self):
"""
Checks if name argument is a ZTF name (must fit ZTF naming convention),
an ascii file containing ZTF names (1 per line) in the program
directory or an arbitrary name if -radec argument to the
pipeline class
"""
errormessage = "\nYou have to provide a either a ZTF name (a string adhering to the ZTF naming scheme), an ascii file containing ZTF names (1 per line) in the same directory or an arbitrary name if using the radec option.\n"
if self.is_ztf_name(self.file_or_name):
self.object_list = [self.file_or_name]
else:
self.object_list = []
try:
file = open(f"{self.file_or_name}", "r")
self.lines = file.read().splitlines()
for line in self.lines:
if self.is_ztf_name(line):
self.object_list.append(line)
except FileNotFoundError as error:
self.logger.error(errormessage)
raise error
assert (
self.object_list[0][:3] == "ZTF" and len(self.object_list[0]) == 12
), errormessage
# Grammar check
if len(self.object_list) == 1:
self.logger.info(
f"Doing forced photometry for {len(self.object_list)} transient"
)
else:
self.logger.info(
f"Doing forced photometry for {len(self.object_list)} transients"
)
self.logger.info("Logs are stored in log")
def check_for_duplicates(self):
"""
Removes duplicates from the list of ZTF objects
"""
self.object_list = list(dict.fromkeys(self.object_list))
def update_database_with_given_radec(self):
"""
Updates the MongoDB entry of the first entry in the object
list with -radec passed to pipeline class
"""
name = self.object_list[0]
database.update_database(
name,
{
"name": name,
"ra": self.ra,
"dec": self.dec,
"jdmin": self.jdmin,
"jdmax": self.jdmax,
"entries": 0,
"coords_per_filter": [np.nan, np.nan, np.nan],
},
)
def get_position_and_timerange(self):
"""
Check for entry in Mongo database and update it via AMPEL or Marshal
"""
self.logger.info("\nChecking database.")
progress_bar = ProgressBar(len(self.object_list))
needs_external_database = []
if self.update_enforce:
self.logger.info("\nForced update of alert data data from AMPEL/Fritz.")
query = database.read_database(self.object_list, ["_id", "entries", "ra"])
for index, name in enumerate(self.object_list):
if (
query["entries"][index] == None
or query["entries"][index] < 10
or self.update_enforce
or np.isnan(query["ra"][index])
):
needs_external_database.append(name)
progress_bar.update(index)
progress_bar.update(len(self.object_list))
if not self.ampel:
self.logger.info("\nConnecting to Marshal (or AMPEL if Marshal is down).")
else:
self.logger.info("\nConnecting to AMPEL.")
from ztffps import connectors
marshal_failed = True
ampel_failed = True
if not self.ampel:
try:
connector = connectors.MarshalInfo(needs_external_database, nprocess=32)
marshal_failed = False
except (
ConnectionError,
requests.exceptions.ConnectionError,
ValueError,
):
marshal_failed = True
if marshal_failed or self.ampel:
# try:
connector = connectors.AmpelInfo(
ztf_names=needs_external_database, logger=self.logger
)
ampel_failed = False
# except:
# ampel_failed = True
if marshal_failed and ampel_failed:
self.logger.error(
"\nConnection to Marshal and AMPEL failed. Temporary outages for the\n"
"Marshal are frequent. Problems with AMPEL are most likely due to a \n"
"problem with your .ssh/config.\nProceeding with local database.\n"
"CAUTION: Data could be missing or not be up-to-date!!!"
)
if self.jdmin is None:
if self.daysago is None:
self.logger.info(
"\nNo 'daysago' given, full timerange since ZTF operations used."
)
else:
if self.daysuntil is None:
self.logger.info(
f"\nData from {self.daysago:.2f} days ago till today is used."
)
else:
self.logger.info(
f"\nData from {self.daysago:.2f} days ago till {self.daysuntil:.2f} days ago is used."
)
now = Time(time.time(), format="unix", scale="utc").jd
if not (marshal_failed and ampel_failed):
self.logger.info("\nUpdating local database.")
progress_bar = ProgressBar(len(connector.queryresult))
for index, result in enumerate(connector.queryresult):
if result is not None:
database.update_database(
result[0],
{
"_id": result[0],
"ra": result[1],
"dec": result[2],
"jdmin": self.jdmin,
"jdmax": self.jdmax,
"entries": result[3],
"lastobs": result[10],
"jdobs_alert": result[4],
"mag_alert": result[5],
"magerr_alert": result[6],
"maglim_alert": result[7],
"fid_alert": result[8],
"magzp_alert": result[11],
"magzp_err_alert": result[12],
"coords_per_filter": result[13],
},
)
progress_bar.update(index)
progress_bar.update(len(connector.queryresult))
def check_if_present_in_metadata(self):
"""
Check for which objects there are infos available
Delete from object-list if no info is available
"""
self.logger.info("\nChecking if alert data is present in the local database.")
query = database.read_database(self.object_list, ["name", "entries"])
not_found = []
del_indices = []
for index, entry in enumerate(query["entries"]):
if entry == None:
not_found.append(self.object_list[index])
del_indices.append(index)
if not_found:
for index in sorted(del_indices, reverse=True):
del self.object_list[index]
self.logger.info(
f"\nThese could not be found in meta database. Will not be downloaded or fit: {not_found}"
)
def download(self):
"""
Download the requested objects in self.object_list from
IPAC using ztfquery
"""
number_of_objects = len(self.object_list)
download_requested = []
query = database.read_database(self.object_list, ["ra", "dec", "last_download"])
last_download = query["last_download"]
# In case no_new_downloads option is passed (download_newest = False): Download only if it has never been downloaded before (useful for bulk downloads which repeatedly fail because IPAC is unstable) Else: try to download everything.
if self.download_newest is False:
for index, name in enumerate(self.object_list):
if last_download[index] is None:
download_requested.append(name)
else:
download_requested = self.object_list
# Check with IRSA how many images are present for each object. Only if this number is bigger than the local number of images, download will start.
download_needed = []
query = database.read_database(
download_requested, ["ra", "dec", "local_filecount"]
)
ras = query["ra"]
decs = query["dec"]
local_filecounts = query["local_filecount"]
from ztffps.connectors import get_irsa_filecount
self.logger.info(f"\nObtaining information on available images at IRSA.")
irsa_filecounts = get_irsa_filecount(
ztf_names=download_requested,
ras=ras,
decs=decs,
jdmin=self.jdmin,
jdmax=self.jdmax,
nprocess=16,
)
for index, name in enumerate(download_requested):
if local_filecounts[index] is None:
local_filecounts[index] = 0
if local_filecounts[index] < irsa_filecounts[name]:
download_needed.append(name)
self.logger.info(
f"\n{len(download_needed)} of {len(self.object_list)} objects have additional images available at IRSA.\nThese will be downloaded now."
)
for i, name in enumerate(download_needed):
query = database.read_database(
name, ["ra", "dec", "jdmin", "jdmax", "local_filecount"]
)
ra = query["ra"][0]
dec = query["dec"][0]
jdmin = self.jdmin
jdmax = self.jdmax
fp = forcephotometry.ForcePhotometry.from_coords(
ra=ra, dec=dec, jdmin=jdmin, jdmax=jdmax, name=name
)
self.logger.info(
f"\n{name} ({i+1} of {len(download_needed)}) Downloading data."
)
if not os.path.exists(
os.path.join(MARSHALDATA, "Cosmology_target_sources.csv")
):
fp.io.update_marshal()
fp.load_metadata()
if self.sciimg:
fp.io.download_data(
nprocess=32,
overwrite=False,
show_progress=True,
verbose=self.verbose,
ignore_warnings=True,
which=[
"scimrefdiffimg.fits.fz",
"diffimgpsf.fits",
"sciimg.fits",
],
)
else:
fp.io.download_data(
nprocess=32,
overwrite=False,
show_progress=True,
verbose=self.verbose,
ignore_warnings=True,
)
last_download = Time(time.time(), format="unix", scale="utc").jd
local_filecount = irsa_filecounts[name]
database.update_database(
name,
{
"lastdownload": last_download,
"local_filecount": local_filecount,
},
)
def check_if_psf_data_exists(self):
"""
Checks if a csv file containing PSF fit results
exists for each element in self.cleaned_object_list
"""
self.cleaned_object_list = []
for name in self.object_list:
try:
pd.read_csv(os.path.join(LOCALDATA, f"{name}.csv"))
self.cleaned_object_list.append(name)
except FileNotFoundError:
pass
def psffit(self, nprocess=None, force_refit=False):
"""
Perform the PSF fit using ztflc
"""
if nprocess is None:
nprocess = self.nprocess
query = database.read_database(
self.object_list,
[
"ra",
"dec",
"jdmin",
"jdmax",
"lastobs",
"lastfit",
"coords_per_filter",
"fitted_datapoints",
],
)
for i, name in enumerate(self.object_list):
objects_total = len(self.object_list)
ra = query["ra"][i]
dec = query["dec"][i]
jdmin = self.jdmin
jdmax = self.jdmax
lastobs = query["lastobs"][i]
lastfit = query["lastfit"][i]
coords_per_filter = query["coords_per_filter"][i]
fitted_datapoints = query["fitted_datapoints"][i]
# Check if there are different centroids for the
# different filters
# If a filter is missing, replace with total (all filters)
# median ra/dec
coords_per_filter[0] = np.nan_to_num(
x=coords_per_filter[0], nan=ra
).tolist()
coords_per_filter[1] = np.nan_to_num(
x=coords_per_filter[1], nan=dec
).tolist()
fp = forcephotometry.ForcePhotometry.from_coords(
ra=coords_per_filter[0],
dec=coords_per_filter[1],
jdmin=jdmin,
jdmax=jdmax,
name=name,
)
self.logger.info(f"\n{name} ({i+1} of {objects_total}) loading metadata.")
fp.load_metadata()
self.logger.info(f"\n{name} ({i+1} of {objects_total}) metadata loaded.")
self.logger.info(
f"\n{name} ({i+1} of {objects_total}) loading paths to files."
)
fp.load_filepathes(filecheck=self.filecheck)
self.logger.info(
f"\n{name} ({i+1} of {objects_total}) paths to files loaded."
)
# Check how many forced photometry datapoints
# there SHOULD exist for this object
number_of_fitted_datapoints_expected = len(fp.filepathes)
if fitted_datapoints is None:
fitted_datapoints = 0
# Compare to number of fitted datapoints from database
if number_of_fitted_datapoints_expected > fitted_datapoints or force_refit:
self.logger.info(f"\n{name} ({i+1} of {objects_total}): Fitting PSF.")
fp.run_forcefit(
verbose=self.verbose,
nprocess=nprocess,
store=True,
force_refit=force_refit,
no_badsub=False,
)
fp.store()
lastfit = Time(time.time(), format="unix", scale="utc").jd
database.update_database(
name,
{
"lastfit": lastfit,
"fitted_datapoints": number_of_fitted_datapoints_expected,
},
)
else:
self.logger.info(
f"\n{name} ({i+1} of {objects_total}) No new images to fit, skipping PSF fit."
)
def plot(self, nprocess=4, progress=True, plot_flux=False):
"""
Plots the lightcurve (uses PSF fitted datapoints if available and
checks for alert photometry otherwise)
"""
self.logger.info(f"\nPlotting")
object_count = len(self.object_list)
snt = [self.snt] * object_count
daysago = [self.daysago] * object_count
daysuntil = [self.daysuntil] * object_count
mag_range = [self.mag_range] * object_count
flux_range = [self.flux_range] * object_count
plot_flux = [plot_flux] * object_count
if progress:
progress_bar = ProgressBar(object_count)
else:
progress_bar = None
with multiprocessing.Pool(nprocess) as p:
for j, result in enumerate(
p.imap_unordered(
self._plot_multiprocessing_,
zip(
self.object_list,
snt,
daysago,
daysuntil,
mag_range,
flux_range,
plot_flux,
),
)
):
if progress_bar is not None:
progress_bar.update(j)
if progress_bar is not None:
progress_bar.update(object_count)
def global_filecheck(self):
"""
Check if each image downloaded from IPAC with ztfquery can be opened
"""
self.logger.info(
"Running filecheck. This can take several hours, depending on the size of your $ZTDFATA folder."
)
badfiles = ztfquery.io.run_full_filecheck(
erasebad=True, nprocess=self.nprocess, redownload=True
)
self.logger.info(f"BADFILES:\n{badfiles}")
@staticmethod
def _plot_multiprocessing_(args):
"""
Plots with multiprocessing
"""
name, snt, daysago, daysuntil, mag_range, flux_range, plot_flux = args
from ztffps.plot import plot_lightcurve
plot_lightcurve(
name,
snt=snt,
daysago=daysago,
daysuntil=daysuntil,
mag_range=mag_range,
flux_range=flux_range,
plot_flux=plot_flux,
)
print(f"\n{name} plotted")
def saltfit(self, snt=5, quality_checks=False, progress=True, alertfit=False):
"""
Performs a saltfit
"""
self.check_if_psf_data_exists()
import sfdmap
from astropy.utils.console import ProgressBar
from saltfit import fit_salt
# Read info from metadata databse and update it with mwebv
query = database.read_database(self.cleaned_object_list, ["ra", "dec", "mwebv"])
dustmap = sfdmap.SFDMap()
objectcount = len(self.cleaned_object_list)
progress_bar = ProgressBar(objectcount)
self.logger.info(
"\nChecking if the mwebv Milky Way dust map value is present and compute it if not."
)
for index, name in enumerate(self.cleaned_object_list):
ra = query["ra"][index]
dec = query["dec"][index]
if query["mwebv"][index] is None:
mwebv = dustmap.ebv(
ra,
dec,
)
database.update_database(name, {"mwebv": mwebv})
progress_bar.update(index)
progress_bar.update(objectcount)
object_count = len(self.cleaned_object_list)
if progress:
progress_bar = ProgressBar(object_count)
else:
progress_bar = None
fitresults = []
fitted_models = []
fitresult_df = pd.DataFrame(
columns=[
"name",
"chisquare",
"ndof",
"red_chisq",
"z",
"t0",
"x0",
"x1",
"c",
"t0_err",
"x0_err",
"x1_err",
"c_err",
"peak_mag",
"peak_abs_mag",
"peak_abs_mag_for_comparison",
"peak_abs_mag_corrected",
"peak_abs_mag_corrected_error",
"z_spectro",
"z_precision",
"g_obs",
"r_obs",
"i_obs",
"nr_filters",
"obs_total",
]
)
for index, name in enumerate(self.cleaned_object_list):
if alertfit:
self.logger.info(f"\n{name} performing SALT fit for alert photometry.")
else:
self.logger.info(f"\n{name} performing SALT fit for forced photometry.")
try:
fitresult, fitted_model = fit_salt(
name=name,
snt=snt,
mwebv=database.read_database(name, ["mwebv"])["mwebv"][0],
quality_checks=quality_checks,
alertfit=alertfit,
)
if progress_bar is not None:
progress_bar.update(index)
fitresults.append(fitresult)
fitted_models.append(fitted_model)
except:
self.logger.info(f"\n{name} Error while fitting.")
if progress_bar is not None:
progress_bar.update(index)
if progress_bar is not None:
progress_bar.update(object_count)
for fitresult in fitresults:
if fitresult is not None:
results = | pd.Series(fitresult, index=fitresult_df.columns) | pandas.Series |
#Sentiment analysis of Airline tweets
# Preprocessing of tweets, perform stemming on the tokens of a tweets
#Using Textblob compute the sentiment of tweets
#Visualization such as plot sentiment, mean of sentiment per week, number of tweets per week, mean of positive and negative polarities
#Column chart of mean of positive and negative polarities, top 10 positive words plot using bar chart
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import nltk
import warnings
import os
from textblob import TextBlob
from nltk.stem import PorterStemmer
warnings.filterwarnings("ignore", category=DeprecationWarning)
#os.chdir('Specify current directory')
brand='easyjet'
porter=PorterStemmer()
#preprocess the tweets
def preprocess_tweet(tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
#obtain the sentiment of a tweet
def get_sentiment(tweet):
ana=TextBlob(tweet)
return(ana.sentiment)
#read easyjet.txt into a data frame
data=pd.read_csv('easyjet.txt', sep=';' , header=None)
data.drop(data.index[0],axis=0,inplace=True)
data.drop(data.columns[2],axis=1,inplace=True)
data.columns=['date','tweet']
print(preprocess_tweet(data.iloc[0]['tweet']))
data['sentiment']=np.nan
data['cleanT']=data['tweet'].apply(preprocess_tweet)
print(get_sentiment(data.iloc[0]['cleanT']))
data['sentiment']=data['cleanT'].map(get_sentiment)
data['tokens']=data['cleanT'].apply(lambda x:x.split())
data['stemmed']=data['tokens'].apply(lambda x: [porter.stem(i) for i in x])
data['cleanStemmedT']=data['stemmed'].apply(lambda x:' '.join(i for i in x))
data['polarity']=data['sentiment'].apply(lambda x: x.polarity)
print(data[['tweet','polarity']].head())
#sentiment for each tweet displayed in a plot
fig=plt.figure()
ax=plt.axes()
ax.plot(data['polarity'])
plt.xlabel('Tweets')
plt.ylabel('Polarity')
plt.show()
#Mean sentiment per week in a plot
data['date']= | pd.to_datetime(data['date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Pipeline-GUI for Analysis with MNE-Python
@author: <NAME>
@email: <EMAIL>
@github: https://github.com/marsipu/mne_pipeline_hd
License: BSD (3-clause)
Written on top of MNE-Python
Copyright © 2011-2020, authors of MNE-Python (https://doi.org/10.3389/fnins.2013.00267)
inspired by <NAME>. (2018) (https://doi.org/10.3389/fnins.2018.00006)
"""
import inspect
import os
import shutil
from ast import literal_eval
from functools import partial
from importlib import util
from os import mkdir
from os.path import isdir, isfile, join
from pathlib import Path
from types import FunctionType
import pandas as pd
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import (QButtonGroup, QComboBox, QDialog, QFileDialog, QFormLayout, QGroupBox,
QHBoxLayout, QLabel, QLineEdit, QListView, QListWidget, QListWidgetItem,
QMessageBox, QPushButton, QSizePolicy, QStyle, QTabWidget, QVBoxLayout, QGridLayout,
QProgressBar, QCheckBox)
from mne_pipeline_hd import QS
from mne_pipeline_hd.gui import parameter_widgets
from mne_pipeline_hd.gui.base_widgets import CheckDictList, CheckList, EditDict, EditList, SimpleDialog, SimpleList
from mne_pipeline_hd.gui.gui_utils import CodeEditor, ErrorDialog, center, get_exception_tuple, set_ratio_geometry, \
get_std_icon, MainConsoleWidget
from mne_pipeline_hd.gui.models import CustomFunctionModel, RunModel
from mne_pipeline_hd.pipeline_functions.function_utils import QRunController
class RunDialog(QDialog):
def __init__(self, main_win):
super().__init__(main_win)
self.mw = main_win
self.init_controller()
self.init_ui()
set_ratio_geometry(0.6, self)
self.show()
self.start()
def init_controller(self):
self.rc = QRunController(run_dialog=self, controller=self.mw.ct,
pool=self.mw.mp_pool)
def init_ui(self):
layout = QVBoxLayout()
view_layout = QGridLayout()
view_layout.addWidget(QLabel('Objects: '), 0, 0)
self.object_view = QListView()
self.object_model = RunModel(self.rc.all_objects, mode='object')
self.object_view.setModel(self.object_model)
self.object_view.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
view_layout.addWidget(self.object_view, 1, 0)
view_layout.addWidget(QLabel('Functions: '), 0, 1)
self.func_view = QListView()
self.func_model = RunModel(self.rc.current_all_funcs, mode='func')
self.func_view.setModel(self.func_model)
self.func_view.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
view_layout.addWidget(self.func_view, 1, 1)
view_layout.addWidget(QLabel('Errors: '), 0, 2)
self.error_widget = SimpleList(list())
self.error_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
# Connect Signal from error_widget to function to enable inspecting the errors
self.error_widget.currentChanged.connect(self.show_error)
view_layout.addWidget(self.error_widget, 1, 2)
layout.addLayout(view_layout)
self.console_widget = MainConsoleWidget()
layout.addWidget(self.console_widget)
self.pgbar = QProgressBar()
self.pgbar.setValue(0)
self.pgbar.setMaximum(len(self.rc.all_steps))
layout.addWidget(self.pgbar)
bt_layout = QHBoxLayout()
self.continue_bt = QPushButton('Continue')
self.continue_bt.setFont(QFont('AnyStyle', 14))
self.continue_bt.setIcon(get_std_icon('SP_MediaPlay'))
self.continue_bt.clicked.connect(self.start)
bt_layout.addWidget(self.continue_bt)
self.pause_bt = QPushButton('Pause')
self.pause_bt.setFont(QFont('AnyStyle', 14))
self.pause_bt.setIcon(get_std_icon('SP_MediaPause'))
self.pause_bt.clicked.connect(self.pause_funcs)
bt_layout.addWidget(self.pause_bt)
self.restart_bt = QPushButton('Restart')
self.restart_bt.setFont(QFont('AnyStyle', 14))
self.restart_bt.setIcon(get_std_icon('SP_BrowserReload'))
self.restart_bt.clicked.connect(self.restart)
bt_layout.addWidget(self.restart_bt)
if QS().value('use_qthread'):
self.reload_chbx = None
else:
self.reload_chbx = QCheckBox('Reload Modules')
bt_layout.addWidget(self.reload_chbx)
self.autoscroll_bt = QPushButton('Auto-Scroll')
self.autoscroll_bt.setCheckable(True)
self.autoscroll_bt.setChecked(True)
self.autoscroll_bt.setIcon(get_std_icon('SP_DialogOkButton'))
self.autoscroll_bt.clicked.connect(self.toggle_autoscroll)
bt_layout.addWidget(self.autoscroll_bt)
self.close_bt = QPushButton('Close')
self.close_bt.setFont(QFont('AnyStyle', 14))
self.close_bt.setIcon(get_std_icon('SP_MediaStop'))
self.close_bt.clicked.connect(self.close)
bt_layout.addWidget(self.close_bt)
layout.addLayout(bt_layout)
self.setLayout(layout)
def start(self):
# Set paused to false
self.rc.paused = False
# Enable/Disable Buttons
self.continue_bt.setEnabled(False)
self.pause_bt.setEnabled(True)
self.restart_bt.setEnabled(False)
self.close_bt.setEnabled(False)
self.rc.start()
def pause_funcs(self):
self.rc.paused = True
self.console_widget.write_html('<br><b>Finishing last function...</b><br>')
def restart(self):
# Reinitialize controller
self.init_controller()
if self.reload_chbx and self.reload_chbx.isChecked():
self.mw.init_mp_pool()
# Clear Console-Widget
self.console_widget.clear()
# Redo References to display-widgets
self.object_model._data = self.rc.all_objects
self.object_model.layoutChanged.emit()
self.func_model._data = self.rc.current_all_funcs
self.func_model.layoutChanged.emit()
self.error_widget.replace_data(list(self.rc.errors.keys()))
# Reset Progress-Bar
self.pgbar.setValue(0)
# Restart
self.start()
def toggle_autoscroll(self, state):
if state:
self.console_widget.set_autoscroll(True)
else:
self.console_widget.set_autoscroll(False)
def show_error(self, current, _):
self.console_widget.set_autoscroll(False)
self.autoscroll_bt.setChecked(False)
self.console_widget.scrollToAnchor(str(self.rc.errors[current][1]))
def closeEvent(self, event):
self.mw.pipeline_running = False
event.accept()
class EditGuiArgsDlg(QDialog):
def __init__(self, cf_dialog):
super().__init__(cf_dialog)
self.cf = cf_dialog
self.gui_args = dict()
self.default_gui_args = dict()
if self.cf.current_parameter:
covered_params = ['data', 'param_name', 'param_alias', 'default', 'param_unit', 'description']
# Get possible default GUI-Args additional to those covered by the Main-GUI
gui_type = self.cf.add_pd_params.loc[self.cf.current_parameter, 'gui_type']
if pd.notna(gui_type):
gui_handle = getattr(parameter_widgets, gui_type)
psig = inspect.signature(gui_handle).parameters
self.default_gui_args = {p: psig[p].default for p in psig if p not in covered_params}
# Get current GUI-Args
loaded_gui_args = self.cf.add_pd_params.loc[self.cf.current_parameter, 'gui_args']
if pd.notna(loaded_gui_args):
self.gui_args = literal_eval(loaded_gui_args)
else:
self.gui_args = dict()
# Fill in all possible Options, which are not already changed
for arg_key in [ak for ak in self.default_gui_args if ak not in self.gui_args]:
self.gui_args[arg_key] = self.default_gui_args[arg_key]
if len(self.gui_args) > 0:
self.init_ui()
self.open()
def init_ui(self):
layout = QVBoxLayout()
layout.addWidget(EditDict(data=self.gui_args, ui_buttons=False))
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt)
self.setLayout(layout)
def closeEvent(self, event):
# Remove all options which don't differ from the default
for arg_key in [ak for ak in self.gui_args if self.gui_args[ak] == self.default_gui_args[ak]]:
self.gui_args.pop(arg_key)
if len(self.gui_args) > 0:
self.cf.pguiargs_changed(self.gui_args)
event.accept()
class ChooseOptions(QDialog):
def __init__(self, cf_dialog, gui_type, options):
super().__init__(cf_dialog)
self.cf = cf_dialog
self.gui_type = gui_type
self.options = options
self.init_ui()
# If open(), execution doesn't stop after the dialog
self.exec()
def init_ui(self):
layout = QVBoxLayout()
layout.addWidget(QLabel(f'For {self.gui_type}, you need to specify the options to choose from'))
layout.addWidget(EditList(data=self.options))
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt)
self.setLayout(layout)
# ToDo:
# Bug1: After saving a new function, the parameters stay in the table-view,
# Bug2: When editing existing functions, the proprietary parameters can not be edited (they land in exising_params)
# Bug3: When hitting Enter, the focus still lies on the AddFunc/EditFunc-Buttons which can disrupt setup
class CustomFunctionImport(QDialog):
def __init__(self, main_win):
super().__init__(main_win)
self.mw = main_win
self.ct = main_win.ct
self.file_path = None
self.pkg_name = None
self.current_function = None
self.current_parameter = None
self.oblig_func = ['target', 'tab', 'group', 'matplotlib', 'mayavi']
self.oblig_params = ['default', 'gui_type']
self.exst_functions = list(self.ct.pd_funcs.index)
self.exst_parameters = ['mw', 'pr', 'meeg', 'fsmri', 'group']
self.exst_parameters += list(self.ct.settings.keys())
self.exst_parameters += list(QS().childKeys())
self.exst_parameters += list(self.ct.pr.parameters[self.ct.pr.p_preset].keys())
self.param_exst_dict = dict()
self.code_editor = None
self.code_dict = dict()
# Get available parameter-guis
self.available_param_guis = [pg for pg in dir(parameter_widgets) if 'Gui' in pg and pg != 'QtGui']
self.add_pd_funcs = pd.DataFrame(columns=['alias', 'target', 'tab', 'group', 'matplotlib',
'mayavi', 'dependencies', 'module', 'func_args', 'ready'])
self.add_pd_params = pd.DataFrame(columns=['alias', 'group', 'default', 'unit', 'description', 'gui_type',
'gui_args', 'functions', 'ready'])
self.yes_icon = get_std_icon('SP_DialogApplyButton')
self.no_icon = get_std_icon('SP_DialogCancelButton')
self.setWindowTitle('Custom-Functions-Setup')
self.init_ui()
self.open()
def init_ui(self):
layout = QVBoxLayout()
# Import Button and Combobox
add_bt_layout = QHBoxLayout()
addfn_bt = QPushButton('Load Function/s')
addfn_bt.setFont(QFont(QS().value('app_font'), 12))
addfn_bt.clicked.connect(self.get_functions)
add_bt_layout.addWidget(addfn_bt)
editfn_bt = QPushButton('Edit Function/s')
editfn_bt.setFont(QFont(QS().value('app_font'), 12))
editfn_bt.clicked.connect(self.edit_functions)
add_bt_layout.addWidget(editfn_bt)
layout.addLayout(add_bt_layout)
# Function-ComboBox
func_cmbx_layout = QHBoxLayout()
self.func_cmbx = QComboBox()
self.func_cmbx.currentTextChanged.connect(self.func_item_selected)
func_cmbx_layout.addWidget(self.func_cmbx)
self.func_chkl = QLabel()
self.func_chkl.setPixmap(self.no_icon.pixmap(16, 16))
self.func_chkl.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
func_cmbx_layout.addWidget(self.func_chkl)
layout.addLayout(func_cmbx_layout)
# Hint for obligatory items
# There may be a better way to center the labels instead of with the space-labels
obl_hint_layout = QHBoxLayout()
space_label1 = QLabel('')
obl_hint_layout.addWidget(space_label1)
obl_hint_label1 = QLabel()
obl_hint_label1.setPixmap(self.no_icon.pixmap(16, 16))
obl_hint_label1.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label1)
obl_hint_label2 = QLabel()
obl_hint_label2.setPixmap(get_std_icon('SP_ArrowForward').pixmap(16, 16))
obl_hint_label2.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label2)
obl_hint_label3 = QLabel()
obl_hint_label3.setPixmap(self.yes_icon.pixmap(16, 16))
obl_hint_label3.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label3)
obl_hint_label4 = QLabel('(= The items marked are obligatory)')
obl_hint_label4.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label4)
space_label2 = QLabel('')
obl_hint_layout.addWidget(space_label2)
layout.addLayout(obl_hint_layout)
setup_layout = QHBoxLayout()
# The Function-Setup-Groupbox
func_setup_gbox = QGroupBox('Function-Setup')
func_setup_gbox.setAlignment(Qt.AlignHCenter)
func_setup_formlayout = QFormLayout()
self.falias_le = QLineEdit()
self.falias_le.setToolTip('Set a name if you want something other than the functions-name')
self.falias_le.textEdited.connect(self.falias_changed)
func_setup_formlayout.addRow('Alias', self.falias_le)
target_layout = QHBoxLayout()
self.target_cmbx = QComboBox()
self.target_cmbx.setToolTip('Set the target on which the function shall operate')
self.target_cmbx.setEditable(False)
self.target_cmbx.activated.connect(self.target_cmbx_changed)
target_layout.addWidget(self.target_cmbx)
self.target_chkl = QLabel()
target_layout.addWidget(self.target_chkl)
func_setup_formlayout.addRow('Target', target_layout)
tab_layout = QHBoxLayout()
self.tab_cmbx = QComboBox()
self.tab_cmbx.setToolTip('Choose the Tab for the function (Compute/Plot/...)')
self.tab_cmbx.setEditable(True)
self.tab_cmbx.activated.connect(self.tab_cmbx_changed)
self.tab_cmbx.editTextChanged.connect(self.tab_cmbx_edited)
tab_layout.addWidget(self.tab_cmbx)
self.tab_chkl = QLabel()
tab_layout.addWidget(self.tab_chkl)
func_setup_formlayout.addRow('Tab', tab_layout)
group_layout = QHBoxLayout()
self.group_cmbx = QComboBox()
self.group_cmbx.setToolTip('Choose the function-group for the function or create a new one')
self.group_cmbx.setEditable(True)
self.group_cmbx.activated.connect(self.group_cmbx_changed)
self.group_cmbx.editTextChanged.connect(self.group_cmbx_edited)
group_layout.addWidget(self.group_cmbx)
self.group_chkl = QLabel()
group_layout.addWidget(self.group_chkl)
func_setup_formlayout.addRow('Group', group_layout)
mtpl_layout = QHBoxLayout()
self.mtpl_bts = QButtonGroup(self)
self.mtpl_yesbt = QPushButton('Yes')
self.mtpl_yesbt.setCheckable(True)
self.mtpl_nobt = QPushButton('No')
self.mtpl_nobt.setCheckable(True)
self.mtpl_void = QPushButton('')
self.mtpl_void.setCheckable(True)
self.mtpl_bts.addButton(self.mtpl_yesbt)
self.mtpl_bts.addButton(self.mtpl_nobt)
self.mtpl_bts.addButton(self.mtpl_void)
mtpl_layout.addWidget(self.mtpl_yesbt)
mtpl_layout.addWidget(self.mtpl_nobt)
self.mtpl_yesbt.setToolTip('Choose, if the function contains an interactive Matplotlib-Plot')
self.mtpl_nobt.setToolTip('Choose, if the function contains no interactive Matplotlib-Plot')
self.mtpl_bts.buttonToggled.connect(self.mtpl_changed)
self.mtpl_chkl = QLabel()
mtpl_layout.addWidget(self.mtpl_chkl)
func_setup_formlayout.addRow('Matplotlib?', mtpl_layout)
myv_layout = QHBoxLayout()
self.myv_bts = QButtonGroup(self)
self.myv_yesbt = QPushButton('Yes')
self.myv_yesbt.setCheckable(True)
self.myv_nobt = QPushButton('No')
self.myv_nobt.setCheckable(True)
self.myv_void = QPushButton('')
self.myv_void.setCheckable(True)
self.myv_bts.addButton(self.myv_yesbt)
self.myv_bts.addButton(self.myv_nobt)
self.myv_bts.addButton(self.myv_void)
myv_layout.addWidget(self.myv_yesbt)
myv_layout.addWidget(self.myv_nobt)
self.myv_yesbt.setToolTip('Choose, if the function contains a Pyvista/Mayavi-Plot')
self.myv_nobt.setToolTip('Choose, if the function contains a Pyvista/Mayavi-Plot')
self.myv_bts.buttonToggled.connect(self.myv_changed)
self.myv_chkl = QLabel()
myv_layout.addWidget(self.myv_chkl)
func_setup_formlayout.addRow('Pyvista/Mayavi?', myv_layout)
self.dpd_bt = QPushButton('Set Dependencies')
self.dpd_bt.setToolTip('Set the functions that must be activated before or the files that must be present '
'for this function to work')
self.dpd_bt.clicked.connect(partial(SelectDependencies, self))
func_setup_formlayout.addRow('Dependencies', self.dpd_bt)
func_setup_gbox.setLayout(func_setup_formlayout)
setup_layout.addWidget(func_setup_gbox)
# The Parameter-Setup-Group-Box
self.param_setup_gbox = QGroupBox('Parameter-Setup')
self.param_setup_gbox.setAlignment(Qt.AlignHCenter)
param_setup_layout = QVBoxLayout()
self.exstparam_l = QLabel()
self.exstparam_l.setWordWrap(True)
self.exstparam_l.hide()
param_setup_layout.addWidget(self.exstparam_l)
self.param_view = QListView()
self.param_model = CustomFunctionModel(self.add_pd_params)
self.param_view.setModel(self.param_model)
self.param_view.selectionModel().currentChanged.connect(self.param_item_selected)
param_setup_layout.addWidget(self.param_view)
param_setup_formlayout = QFormLayout()
self.palias_le = QLineEdit()
self.palias_le.setToolTip('Set a name if you want something other than the parameters-name')
self.palias_le.textEdited.connect(self.palias_changed)
param_setup_formlayout.addRow('Alias', self.palias_le)
default_layout = QHBoxLayout()
self.default_le = QLineEdit()
self.default_le.setToolTip('Set the default for the parameter (it has to fit the gui-type!)')
self.default_le.textEdited.connect(self.pdefault_changed)
default_layout.addWidget(self.default_le)
self.default_chkl = QLabel()
default_layout.addWidget(self.default_chkl)
param_setup_formlayout.addRow('Default', default_layout)
self.unit_le = QLineEdit()
self.unit_le.setToolTip('Set the unit for the parameter (optional)')
self.unit_le.textEdited.connect(self.punit_changed)
param_setup_formlayout.addRow('Unit', self.unit_le)
self.description_le = QLineEdit()
self.description_le.setToolTip('Short description of the parameter (optional)')
self.description_le.textEdited.connect(self.pdescription_changed)
param_setup_formlayout.addRow('Description', self.description_le)
guitype_layout = QHBoxLayout()
self.guitype_cmbx = QComboBox()
self.guitype_cmbx.setToolTip('Choose the GUI from the available GUIs')
self.guitype_cmbx.activated.connect(self.guitype_cmbx_changed)
guitype_layout.addWidget(self.guitype_cmbx)
test_bt = QPushButton('Test')
test_bt.clicked.connect(self.show_param_gui)
guitype_layout.addWidget(test_bt)
self.guitype_chkl = QLabel()
guitype_layout.addWidget(self.guitype_chkl)
param_setup_formlayout.addRow('GUI-Type', guitype_layout)
self.guiargs_bt = QPushButton('Edit')
self.guiargs_bt.clicked.connect(partial(EditGuiArgsDlg, self))
self.guiargs_bt.setToolTip('Set Arguments for the GUI in a dict (optional)')
param_setup_formlayout.addRow('Additional Options', self.guiargs_bt)
param_setup_layout.addLayout(param_setup_formlayout)
self.param_setup_gbox.setLayout(param_setup_layout)
setup_layout.addWidget(self.param_setup_gbox)
layout.addLayout(setup_layout)
bt_layout = QHBoxLayout()
save_bt = QPushButton('Save')
save_bt.setFont(QFont(QS().value('app_font'), 16))
save_bt.clicked.connect(self.save_pkg)
bt_layout.addWidget(save_bt)
src_bt = QPushButton('Show Code')
src_bt.setFont(QFont(QS().value('app_font'), 16))
src_bt.clicked.connect(self.show_code)
bt_layout.addWidget(src_bt)
close_bt = QPushButton('Quit')
close_bt.setFont(QFont(QS().value('app_font'), 16))
close_bt.clicked.connect(self.close)
bt_layout.addWidget(close_bt)
layout.addLayout(bt_layout)
self.setLayout(layout)
self.populate_target_cmbx()
self.populate_tab_cmbx()
self.populate_group_cmbx()
self.populate_guitype_cmbx()
def update_func_cmbx(self):
self.func_cmbx.clear()
self.func_cmbx.insertItems(0, self.add_pd_funcs.index)
try:
current_index = list(self.add_pd_funcs.index).index(self.current_function)
except ValueError:
current_index = 0
self.func_cmbx.setCurrentIndex(current_index)
def clear_func_items(self):
self.falias_le.clear()
self.target_cmbx.setCurrentIndex(-1)
self.target_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.tab_cmbx.setCurrentIndex(-1)
self.tab_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.group_cmbx.setCurrentIndex(-1)
self.group_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.mtpl_yesbt.setChecked(False)
self.mtpl_nobt.setChecked(False)
self.mtpl_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.myv_nobt.setChecked(False)
self.myv_nobt.setChecked(False)
self.myv_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
def clear_param_items(self):
self.update_param_view()
self.palias_le.clear()
self.default_le.clear()
self.default_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.unit_le.clear()
self.guitype_cmbx.setCurrentIndex(-1)
self.guitype_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.param_setup_gbox.setEnabled(False)
def func_item_selected(self, text):
if text:
self.current_function = text
self.update_code_editor()
self.update_func_setup()
if any([self.current_function in str(x) for x in self.add_pd_params['functions']]):
self.param_setup_gbox.setEnabled(True)
self.update_param_view()
self.current_parameter = \
self.add_pd_params.loc[
[self.current_function in str(x) for x in self.add_pd_params['functions']]].index[0]
self.update_exst_param_label()
self.update_param_setup()
else:
self.update_exst_param_label()
# Clear existing entries
self.clear_param_items()
def param_item_selected(self, current):
self.current_parameter = self.param_model.getData(current)
self.update_param_setup()
self.update_code_editor()
def update_func_setup(self):
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'alias']):
self.falias_le.setText(self.add_pd_funcs.loc[self.current_function, 'alias'])
else:
self.falias_le.clear()
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'target']):
self.target_cmbx.setCurrentText(self.add_pd_funcs.loc[self.current_function, 'target'])
self.target_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.target_cmbx.setCurrentIndex(-1)
self.target_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'tab']):
self.tab_cmbx.setCurrentText(self.add_pd_funcs.loc[self.current_function, 'tab'])
self.tab_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.tab_cmbx.setCurrentIndex(-1)
self.tab_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'group']):
self.group_cmbx.setCurrentText(self.add_pd_funcs.loc[self.current_function, 'group'])
self.group_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.group_cmbx.setCurrentIndex(-1)
self.group_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'matplotlib']):
if self.add_pd_funcs.loc[self.current_function, 'matplotlib']:
self.mtpl_yesbt.setChecked(True)
else:
self.mtpl_nobt.setChecked(True)
self.mtpl_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.mtpl_void.setChecked(True)
self.mtpl_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_funcs.loc[self.current_function, 'mayavi']):
if self.add_pd_funcs.loc[self.current_function, 'mayavi']:
self.myv_yesbt.setChecked(True)
else:
self.myv_nobt.setChecked(True)
self.myv_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.myv_void.setChecked(True)
self.myv_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
def update_exst_param_label(self):
if self.current_function:
if len(self.param_exst_dict[self.current_function]) > 0:
self.exstparam_l.setText(f'Already existing Parameters: {self.param_exst_dict[self.current_function]}')
self.exstparam_l.show()
else:
self.exstparam_l.hide()
def update_param_setup(self):
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'alias']):
self.palias_le.setText(self.add_pd_params.loc[self.current_parameter, 'alias'])
else:
self.palias_le.clear()
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'default']):
self.default_le.setText(self.add_pd_params.loc[self.current_parameter, 'default'])
self.default_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
else:
self.default_le.clear()
self.default_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
if pd.notna(self.add_pd_params.loc[self.current_parameter, 'unit']):
self.unit_le.setText(self.add_pd_params.loc[self.current_parameter, 'unit'])
else:
self.unit_le.clear()
if | pd.notna(self.add_pd_params.loc[self.current_parameter, 'description']) | pandas.notna |
import pandas as pd
from pyecharts.charts import Bar, Pie, Geo, Liquid, Page
from pyecharts import options as opts
from pyecharts.commons.utils import JsCode
from pyecharts.globals import ChartType, SymbolType
# csv文件处理
df = | pd.read_csv("college_data.csv") | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.