prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import flask
from flask import request
import pandas as pd
import spacy
import nltk
import numpy as np
from sklearn.cluster import KMeans
import os
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
import gensim
from gensim import corpora
from sklearn import svm
from sklearn.linear_model import LogisticRegression
app = flask.Flask(__name__)
app.config["DEBUG"] = True
nltk.download('stopwords')
nlp = spacy.load('de_core_news_sm')
STOPWORDS = set(stopwords.words('german'))
def save_obj(obj, name ):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
@app.route('/api/load', methods=['GET'])
def load():
global documents
documents = load_obj('documents')
return "Database loaded!"
@app.route('/api/save', methods=['GET'])
def save():
save_obj(documents, 'documents')
return "Database saved!"
documents = {}
model = None
tfidf = None
if not os.path.exists('obj'):
os.makedirs('obj')
elif os.path.exists('obj/documents.pkl'):
print("Loading data and models")
load();
@app.route('/', methods=['GET'])
def home():
str = "<h1>Document Clustering and Classification Web API</h1>"
return str
@app.route('/api/add', methods=['POST'])
def api_add():
request_data = request.get_json()
id = request_data['id']
if id not in documents.keys():
documents[id] = {}
documents[id]['text'] = request_data['text']
documents[id]['tags'] = request_data['tags']
if "class" in request_data.keys():
documents[id]['class'] = request_data['class']
documents[id]['fixed'] = True
return "Document added!"
@app.route('/api/debug', methods=['GET'])
def api_debug():
return pd.DataFrame.from_dict(documents, orient='index').to_html()
@app.route('/api/delclasses', methods=['GET'])
def api_del_classes():
for key in documents.keys():
if "class" in documents[key].keys():
del documents[key]['class']
return "Deleted all classes!"
@app.route('/api/initpreprocess', methods=['GET'])
def initpreprocess():
i = 1
for key in documents.keys():
if 'tokens' not in documents[key].keys():
# Lemmatize
doc = nlp(documents[key]['text'])
result = []
for token in doc:
str_token = str(token)
if not (str_token.startswith("http://") or str_token.startswith("https://") or len(str_token.strip()) <= 1 or '\\n' in str_token or '\n' in str_token):
lemma = token.lemma_.lower()
if not lemma in STOPWORDS:
result.append(lemma)
result = result + documents[key]['tags']
documents[key]['tokens'] = result
i += 1
print("Processing document {} of {}".format(str(i), len(documents.keys())))
documents_df = pd.DataFrame.from_dict(documents, orient='index')
tokenized_text = documents_df['tokens'].to_numpy()
global tfidf
tfidf = TfidfVectorizer(tokenizer=very_special_tokenizer, lowercase=False, sublinear_tf=True)
X = tfidf.fit_transform(tokenized_text)
idx = 0
for key in documents.keys():
documents[key]['vector'] = X[idx]
idx += 1
return "Documents preprocessed!"
@app.route('/api/preprocess', methods=['GET'])
def preprocess():
i = 1
for key in documents.keys():
if 'tokens' not in documents[key].keys():
# Lemmatize
doc = nlp(documents[key]['text'])
result = []
for token in doc:
str_token = str(token)
if not (str_token.startswith("http://") or str_token.startswith("https://") or len(str_token.strip()) <= 1 or '\\n' in str_token or '\n' in str_token):
lemma = token.lemma_.lower()
if not lemma in STOPWORDS:
result.append(lemma)
result = result + documents[key]['tags']
documents[key]['tokens'] = result
i += 1
print("Processing document {} of {}".format(str(i), len(documents.keys())))
documents_df =
|
pd.DataFrame.from_dict(documents, orient='index')
|
pandas.DataFrame.from_dict
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
## jupyther console
import pandas as pd
## read: !cat ./pydata-book/examples/ex1.csv
data1 = pd.read_csv('./pydata-book/examples/ex1.csv')
data1
data2 = pd.read_table('./pydata-book/examples/ex1.csv', sep=',')
data2
## read: !cat ./pydata-book/examples/ex2.csv
data3 = pd.read_csv('./pydata-book/examples/ex2.csv', header=None)
data3
data4 = pd.read_csv('./pydata-book/examples/ex2.csv',
names=['a', 'b', 'c', 'd', 'message'])
data4
data5 = pd.read_csv('./pydata-book/examples/ex2.csv',
names=['a', 'b', 'c', 'd', 'message'], index_col='message')
data5
## read: !cat ./pydata-book/examples/csv_mindex.csv
data6 = pd.read_csv('./pydata-book/examples/csv_mindex.csv',
index_col=['key1', 'key2'])
data6
## read: !cat ./pydata-book/examples/ex3.csv
data7 =
|
pd.read_table('./pydata-book/examples/ex3.csv', sep='\s+')
|
pandas.read_table
|
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
from fatetrack_connections import buildFeatureFrame, buildOffsetFrame, generateCandidates, generateLinks, DivSimScore, DivSetupScore, DivisionCanditates, UpdateConnectionsDiv, TranslationTable, SolveMinCostTable, ReviewCostTable
def TranslateConnections(ConnectionTable, TranslationTable, timepoint, preference = "Master_ID"):
subTranslationTable_0 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_0['slabel_t0'] = subTranslationTable_0['slabel']
subTranslationTable_1 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_1['slabel_t1'] = subTranslationTable_1['slabel']
merge_0 = pd.merge(ConnectionTable, subTranslationTable_0, on="slabel_t0")
merge = pd.merge(merge_0, subTranslationTable_1, on="slabel_t1")
pref = str(preference)
result = merge.loc[:,[pref+"_x",pref+"_y"]]
result = result.drop_duplicates()
result = result.dropna(thresh=1)
result = result.reset_index(drop=True)
result = result.rename(columns = {(pref+"_x") : (pref+"_"+str(timepoint)), (pref+"_y") : (pref+"_"+str(timepoint+1))})
return(result)
def RajTLG_wrap(filename_t0, filename_t1,timepoint,ConnectionTable,TranslationTable,path="./"):
frame0 = buildFeatureFrame(filename_t0,timepoint,pathtoimage=path);
frame1 = buildFeatureFrame(filename_t1,timepoint+1,pathtoimage=path);
frames = pd.concat([frame0,frame1])
frames["timepoint"] = frames["time"]
InfoDF = pd.merge(frames,TranslationTable, on=['label','timepoint'])
RajTLG_translation = TranslateConnections(ConnectionTable=ConnectionTable, TranslationTable=TranslationTable, timepoint=timepoint, preference="RajTLG_ID")
RajTLGFrame = pd.DataFrame()
if (timepoint == 0):
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-0"])
tmpParent = "NaN"
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-0"])
tmpParent = int(RajTLG_translation.loc[RajTLG_translation["RajTLG_ID"+"_"+str(timepoint+1)] == tmpID,
"RajTLG_ID"+"_"+str(timepoint)])
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
RajTLGFrame = RajTLGFrame.reset_index(drop=True)
RajTLGFrame = RajTLGFrame.rename(columns={0:"pointID", 1:"frameNumber",
2:"xCoord",3:"yCoord",4:"parentID"})
RajTLGFrame["annotation"] = "none"
#RajTLGFrame.to_csv(outfilename,index=False)
return(RajTLGFrame)
def MatchToGoldStd(FileCompare,FileGoldSTD):
GoldSTD = pd.read_csv(FileGoldSTD)
FateTrack = pd.read_csv(FileCompare)
GoldTranslationTable = pd.DataFrame()
for obj in range(FateTrack.shape[0]):
FateID = FateTrack.loc[obj,"pointID"]
frame = FateTrack.loc[obj,"frameNumber"]
xC = FateTrack.loc[obj,"xCoord"]
yC = FateTrack.loc[obj,"yCoord"]
tmpGold = GoldSTD.loc[GoldSTD["frameNumber"] == frame,]
tmpGold = tmpGold.reset_index(drop=True)
dist = np.array(np.sqrt((tmpGold["xCoord"]-xC)**2 + (tmpGold["yCoord"]-yC)**2))
GoldIndex = np.where(dist == dist.min())[0][0]
GoldID = tmpGold.loc[GoldIndex,"pointID"]
GoldTranslationTable = GoldTranslationTable.append(pd.DataFrame([GoldID,FateID]).T)
GoldTranslationTable = GoldTranslationTable.rename(columns={0:"GoldID",1:"FateID"})
return(GoldTranslationTable)
def CheckAccuracy(frame,FileCompare,FileGoldSTD,skip=0):
TranslateGold = MatchToGoldStd(FileCompare,FileGoldSTD)
GoldSTD = pd.read_csv(FileGoldSTD)
FateTrack = pd.read_csv(FileCompare)
FateTrack = FateTrack.loc[FateTrack["frameNumber"]==frame,]
FateTrack = FateTrack.reset_index(drop=True)
GoldSTD = GoldSTD.loc[GoldSTD["frameNumber"]==frame,]
GoldSTD = GoldSTD.reset_index(drop=True)
correct=0
incorrect=0
for obj in range(FateTrack.shape[0]):
FateID = FateTrack.loc[obj,"pointID"]
FateParent = FateTrack.loc[obj,"parentID"]
transGoldID = TranslateGold.loc[TranslateGold["FateID"]==FateID,"GoldID"].values[0] ;
transGoldParent = TranslateGold.loc[TranslateGold["FateID"]==FateParent,"GoldID"] ;
if not(transGoldParent.empty):
transGoldParent = transGoldParent.values[0]
actualGoldParent = GoldSTD.loc[GoldSTD["pointID"] == transGoldID,"parentID"]
if (not(actualGoldParent.empty | math.isnan(actualGoldParent.values[0]))):
actualGoldParent = int(actualGoldParent.values[0])
if(actualGoldParent == transGoldParent):
correct = correct+1
else:
incorrect = incorrect+1
results = pd.DataFrame([frame, skip, correct, incorrect]).T
results = results.rename(columns={0:"Frame",1:"Skip",2:"Correct",3:"Incorrect"})
return(results)
def AssembleAccMeasurements(FileCompare,FileGoldSTD,skip=0):
GoldSTD = pd.read_csv(FileGoldSTD)
maxFrame = np.max(GoldSTD["frameNumber"])
completeResults = pd.DataFrame()
for frame in (np.array(range(1,maxFrame))+1):
tmpFrame = CheckAccuracy(frame=frame,FileCompare=FileCompare,FileGoldSTD=FileGoldSTD,skip=skip)
completeResults = completeResults.append(tmpFrame)
completeResults = completeResults.reset_index(drop=True)
return(completeResults)
def redefineGold(FileGoldSTD, outfilename, skip = 1,startTime = 0):
GoldSTD = pd.read_csv(FileGoldSTD)
sub = startTime+1
maxFrame = np.max(GoldSTD['frameNumber'])
frames_to_keep = np.array(range(startTime+1,maxFrame+1,skip+1))
starter_frame = frames_to_keep[0]
other_frames = frames_to_keep[1:]
newGoldSTD = GoldSTD.loc[GoldSTD["frameNumber"].isin(other_frames),:]
newGoldSTD = newGoldSTD.reset_index(drop=True)
starterGold = GoldSTD.loc[GoldSTD["frameNumber"]==starter_frame,:]
starterGold = starterGold.reset_index(drop=True)
starterGold["parentID"] = "NaN"
pointsNew =
|
pd.concat([starterGold, newGoldSTD])
|
pandas.concat
|
#!/usr/bin/env python3
import unittest
import numpy as np
import numpy.testing as nptest
import pandas as pd
import pandas.testing as pdtest
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from datafold.dynfold.transform import (
TSCApplyLambdas,
TSCFeaturePreprocess,
TSCFiniteDifference,
TSCIdentity,
TSCPolynomialFeatures,
TSCPrincipalComponent,
TSCRadialBasis,
TSCTakensEmbedding,
TSCTransformerMixin,
)
from datafold.pcfold.kernels import *
from datafold.pcfold.timeseries.collection import TSCDataFrame, TSCException
def _all_tsc_transformers():
# only finds the ones that are importated (DMAP e.g. is not here)
print(TSCTransformerMixin.__subclasses__())
class TestTSCTransform(unittest.TestCase):
def _setUp_simple_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
self.simple_df = pd.DataFrame(np.random.rand(9, 2), index=idx, columns=col)
def _setUp_takens_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
# Requires non-random values
self.takens_df_short = pd.DataFrame(
np.arange(18).reshape([9, 2]), index=idx, columns=col
)
n_samples_timeseries = 100
idx = pd.MultiIndex.from_product(
[np.array([0, 1]), np.arange(n_samples_timeseries)]
)
self.takens_df_long = pd.DataFrame(
np.random.rand(n_samples_timeseries * 2, 2), index=idx, columns=col
)
def setUp(self) -> None:
self._setUp_simple_df()
self._setUp_takens_df()
def test_is_valid_sklearn_estimator(self):
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils.estimator_checks import check_estimator
TEST_ESTIMATORS = (
TSCIdentity(),
TSCPrincipalComponent(),
TSCFeaturePreprocess(sklearn_transformer=MinMaxScaler()),
TSCFeaturePreprocess(sklearn_transformer=StandardScaler()),
TSCPolynomialFeatures(),
)
for test_estimator in TEST_ESTIMATORS:
for estimator, check in check_estimator(test_estimator, generate_only=True):
try:
check(estimator)
except Exception as e:
print(estimator)
print(check)
raise e
def test_identity0(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity()
pdtest.assert_frame_equal(_id.fit_transform(tsc), tsc)
pdtest.assert_frame_equal(_id.inverse_transform(tsc), tsc)
def test_identity1(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity(include_const=True)
tsc_plus_const = tsc.copy(deep=True)
tsc_plus_const["const"] = 1
pdtest.assert_frame_equal(_id.fit_transform(tsc.copy()), tsc_plus_const)
pdtest.assert_frame_equal(_id.inverse_transform(tsc_plus_const), tsc)
def test_identity2(self):
data = np.random.rand(5, 5)
data_wo_const = TSCIdentity(include_const=False).fit_transform(data)
data_plus_const = TSCIdentity(include_const=True).fit_transform(data)
nptest.assert_equal(data, data_wo_const)
nptest.assert_equal(data_plus_const, np.column_stack([data, np.ones(5)]))
def test_identity3(self):
data = TSCDataFrame(self.simple_df)
data_wo_const = TSCIdentity(
include_const=False, rename_features=True
).fit_transform(data)
data_with_const = TSCIdentity(
include_const=True, rename_features=True
).fit_transform(data)
data = data.add_suffix("_id")
pdtest.assert_index_equal(data.columns, data_wo_const.columns)
data["const"] = 1
pdtest.assert_index_equal(data.columns, data_with_const.columns)
def test_scale_min_max(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("min-max")
scaled_tsc = scale.fit_transform(tsc_df)
# sanity check:
nptest.assert_allclose(scaled_tsc.min().to_numpy(), np.zeros(2), atol=1e-16)
nptest.assert_allclose(scaled_tsc.max().to_numpy(), np.ones(2), atol=1e-16)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_scale_standard(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("standard")
scaled_tsc = scale.fit_transform(tsc_df)
nptest.assert_array_equal(
scaled_tsc.to_numpy(),
StandardScaler(with_mean=True, with_std=True).fit_transform(
tsc_df.to_numpy()
),
)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_sklearn_scaler(self):
tsc_df = TSCDataFrame(self.simple_df)
from sklearn.preprocessing import (
MaxAbsScaler,
PowerTransformer,
QuantileTransformer,
RobustScaler,
)
# each tuple has the class and a dictionary with the init-options
scaler = [
(MaxAbsScaler, dict()),
(PowerTransformer, dict(method="yeo-johnson")),
(PowerTransformer, dict(method="box-cox")),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="uniform"),
),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="normal"),
),
(RobustScaler, dict()),
]
for cls, kwargs in scaler:
scale = TSCFeaturePreprocess(sklearn_transformer=cls(**kwargs))
tsc_transformed = scale.fit_transform(tsc_df)
# Check the underlying array equals:
nptest.assert_array_equal(
cls(**kwargs).fit_transform(tsc_df.to_numpy()),
tsc_transformed.to_numpy(),
)
# check inverse transform is equal the original TSCDataFrame:
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(tsc_transformed))
def test_polynomial_feature_transform01(self):
from sklearn.preprocessing import PolynomialFeatures
tsc = TSCDataFrame(self.simple_df)
for degree in [2, 3, 4]:
for include_bias in [True, False]:
actual = TSCPolynomialFeatures(
degree=degree, include_bias=include_bias, include_first_order=True
).fit_transform(tsc)
expected = PolynomialFeatures(
degree=degree, include_bias=include_bias
).fit_transform(tsc.to_numpy())
nptest.assert_array_equal(actual.to_numpy(), expected)
def test_polynomial_feature_transform02(self):
tsc = TSCDataFrame(self.simple_df)
for include_first_order in [True, False]:
poly = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=include_first_order
).fit(tsc)
actual = poly.transform(tsc)
expected = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_frame_equal(actual, expected)
def test_polynomial_feature_transform03(self):
tsc = TSCDataFrame(self.simple_df)
actual = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["1", "A^2", "A B", "B^2"], name="feature"),
)
actual = TSCPolynomialFeatures(
degree=2, include_bias=False, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["A^2", "A B", "B^2"], name="feature"),
)
def test_apply_lambda_transform01(self):
# use lambda identity function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[lambda x: x]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
pdtest.assert_frame_equal(actual, expected)
def test_apply_lambda_transform02(self):
# use numpy function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[np.square]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc.apply(np.square, axis=0, raw=True)
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
pdtest.assert_frame_equal(actual, expected)
def test_apply_lambda_transform03(self):
# use numpy function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[lambda x: x, np.square]).fit(tsc)
actual = lambda_transform.transform(tsc)
identity = tsc
identity.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
squared = tsc.apply(np.square, axis=0, raw=True)
squared.columns = pd.Index(
["A_lambda1", "B_lambda1"], name=TSCDataFrame.tsc_feature_col_name
)
expected = pd.concat([identity, squared], axis=1)
pdtest.assert_frame_equal(actual, expected)
def test_pca_transform(self):
tsc = TSCDataFrame(self.simple_df)
pca = TSCPrincipalComponent(n_components=1).fit(tsc)
data = pca.transform(tsc)
self.assertIsInstance(data, TSCDataFrame)
pca_sklearn = PCA(n_components=1).fit(tsc.to_numpy())
data_sklearn = pca_sklearn.transform(tsc)
nptest.assert_allclose(data, data_sklearn, atol=1e-15)
nptest.assert_array_equal(
pca.inverse_transform(data).to_numpy(),
pca_sklearn.inverse_transform(data_sklearn),
)
def test_takens_embedding0(self):
simple_df = self.takens_df_short.drop("B", axis=1)
tsc_df = TSCDataFrame(simple_df)
takens = TSCTakensEmbedding(
delays=1,
lag=0,
frequency=1,
)
actual = takens.fit_transform(tsc_df)
self.assertIsInstance(actual, TSCDataFrame)
# First test
actual_numerics = actual.to_numpy() # only compare the numeric values
expected = np.array(
[
[2.0, 0.0],
[6.0, 4.0],
[10.0, 8.0],
[14.0, 12.0],
[16.0, 14.0],
]
)
nptest.assert_equal(actual_numerics, expected)
# Second test
actual_inverse = takens.inverse_transform(actual)
pdtest.assert_frame_equal(tsc_df.drop([0, 17], level=1), actual_inverse)
def test_takens_embedding1(self):
# test kappa = 1
tsc_df = TSCDataFrame.from_single_timeseries(
pd.DataFrame(
np.column_stack([[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]),
columns=["A", "B"],
dtype=float,
)
)
takens = TSCTakensEmbedding(lag=0, delays=5, frequency=1, kappa=1)
# embedd to a single instance
actual = takens.fit_transform(tsc_df)
self.assertIsInstance(actual, TSCDataFrame)
self.assertTrue(actual.has_degenerate())
self.assertEqual(actual.n_timeseries, 1)
# First test
actual_numerics = actual.to_numpy() # only compare the numeric values
expected = np.array([[5, 4, 3, 2, 1, 0]], dtype=float) * np.exp(
-1.0 * np.array([0, 1, 2, 3, 4, 5])
)
expected = np.repeat(expected, 2, axis=1)
nptest.assert_equal(actual_numerics, expected)
# Second test
actual_inverse = takens.inverse_transform(actual)
expected = tsc_df.final_states(1)
pdtest.assert_frame_equal(actual_inverse, expected)
def test_takens_delay_indices(self):
tsc_short = TSCDataFrame(self.takens_df_short) # better check for errors
tsc_long = TSCDataFrame(self.takens_df_long)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=1, lag=0, frequency=1)
.fit(tsc_short)
.delay_indices_,
np.array([1]),
)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=2, lag=0, frequency=1)
.fit(tsc_long)
.delay_indices_,
np.array([1, 2]),
)
with self.assertRaises(TSCException):
# Data too short
TSCTakensEmbedding(delays=5, lag=0, frequency=1).fit(tsc_short)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=1, lag=1, frequency=1)
.fit(tsc_long)
.delay_indices_,
np.array([2]),
)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=5, lag=1, frequency=1)
.fit(tsc_long)
.delay_indices_,
np.array([2, 3, 4, 5, 6]),
)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=2, lag=2, frequency=2)
.fit(tsc_long)
.delay_indices_,
np.array([3, 5]),
)
nptest.assert_array_equal(
TSCTakensEmbedding(delays=4, lag=2, frequency=2)
.fit(tsc_long)
.delay_indices_,
np.array([3, 5, 7, 9]),
)
with self.assertRaises(ValueError):
TSCTakensEmbedding(delays=1, lag=0, frequency=2).fit(tsc_short)
def test_rbf_1d(self):
func = lambda x: np.exp(x * np.cos(3 * np.pi * x)) - 1
x_vals = np.linspace(0, 1, 100)
y_vals = func(x_vals)
df = pd.DataFrame(y_vals, index=x_vals, columns=["qoi"])
tsc = TSCDataFrame.from_single_timeseries(df)
rbf = TSCRadialBasis(kernel=MultiquadricKernel())
rbf_coeff = rbf.fit_transform(tsc)
rbf_coeff_inverse = rbf.inverse_transform(rbf_coeff)
pdtest.assert_frame_equal(tsc, rbf_coeff_inverse, check_exact=False)
def test_rbf_2d(self):
func = lambda x: np.exp(x * np.cos(3 * np.pi * x)) - 1
x_vals = np.linspace(0, 1, 15)
y_vals = func(x_vals)
df = pd.DataFrame(np.column_stack([x_vals, y_vals]), columns=["qoi1", "qoi2"])
tsc = TSCDataFrame.from_single_timeseries(df)
rbf = TSCRadialBasis(kernel=MultiquadricKernel(epsilon=1.0))
rbf_coeff = rbf.fit_transform(tsc)
rbf_coeff_inverse = rbf.inverse_transform(rbf_coeff)
pdtest.assert_frame_equal(tsc, rbf_coeff_inverse, check_exact=False)
def test_rbf_centers(self):
func = lambda x: np.exp(x * np.cos(3 * np.pi * x)) - 1
x_vals = np.linspace(0, 1, 15)
y_vals = func(x_vals)
df = pd.DataFrame(np.column_stack([x_vals, y_vals]), columns=["qoi1", "qoi2"])
tsc = TSCDataFrame.from_single_timeseries(df)
# Use centers at another location than the data. These can be selected in a
# optimization routine (such as kmean), or randomly put into the phase space.
x_vals_centers = np.linspace(0, 1, 10)
y_vals_centers = func(x_vals_centers)
centers = np.column_stack([x_vals_centers, y_vals_centers])
centers =
|
pd.DataFrame(centers, columns=tsc.columns)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Read_edf
========
Reading data from Elmiko DigiTrack. Integrating time info from XML (.EVX file from digitrack) about time of first EEG sample
with sampling rate info (from .1 file from digitrack) to make timestamps for EEG signal. EEG signal needs to be exported to .edf
from digitrack, then it can be parsed here.
Use timestamps from experiment log file to cut slices from EEG around events. EEG and events need to be saved with respect to the same
clock, so best do experiment and recording on the same machine.
"""
import pandas as pd
import xml.etree.ElementTree as etree
import pyedflib
import numpy as np
from datetime import datetime
import random
import struct
import pyseries.Preprocessing.ArtifactRemoval as ar
#TODO Make an organized/relative paths way of maintaining database
#path = '/Users/user/Desktop/Nagrania/rest/Rysiek_03_06/'
def Combine_EDF_XML(path,freq_min = 0, freq_max = 70):
"""Extracts EEG channels data from edf and creates a new channel with timestamps.
Returns
-------
signal_dict: dict
stores EEG timeseries and timestamps
"""
signal_dict = Read_EDF(path + "sygnal.edf")
for chan_name, sig in signal_dict.items():
#signal_dict[chan_name] = ar.band_pass(sig, freq_min,freq_max)
signal_dict[chan_name] = sig
log = pd.read_csv(path + 'unity_log.csv',parse_dates = True, index_col = 0, skiprows = 1, skipfooter = 1, engine='python')
signal_dict['events'] = log
#Get the timestamp based on the info from the exact_timestamp field in the .1 file
e_ts = exact_timestamp(path, GetMaxLength(signal_dict))
#TODO decide which timestamp is correct
signal_dict['timestamp'] = e_ts
return signal_dict
def GetMaxLength(_dict):
maks=max(_dict, key=lambda k: len(_dict[k]))
return len(_dict[maks])
#path = '/Users/user/Desktop/Resty/Ewa_resting_state.edf'
def Read_EDF(path):
"""Read .edf exported from digitrack and converts them to a dictionary.
Parameters
----------
path:str
directory of .edf
Returns
-------
signal_dict: dict(np.array)
Keys are channel names
"""
f = pyedflib.EdfReader(path)
#n = f.signals_in_file
signal_labels = f.getSignalLabels()
signal_dict = {}
print('Channels:')
for idx, name in enumerate(signal_labels):
print(name.decode("utf-8"))
signal_dict[name.decode("utf-8")] = f.readSignal(idx)
f._close()
return signal_dict
def Read_XML(path):
# import xml.etree.cElementTree as ET
"""Read the header for the signal from .EVX.
Returns
-------
df: DataFrame
Contains timestamp marking first EEG sample
"""
with open(path, mode='r',encoding='utf-8') as xml_file:
xml_tree = etree.parse(xml_file)
root = xml_tree.getroot()
#Get only the relevant fields
for child_of_root in root:
if(child_of_root.attrib['strId'] == 'Technical_ExamStart'):
time_event = child_of_root.find('event')
#Timestamp in unix time
u_time = time_event.attrib['time']
#Timestamp in DateTime
dt_time = time_event.find('info').attrib['time']
#store this information in a dataframe in a datetime/timestamp format
df = pd.DataFrame()
#HACK changing timezone by manually adding two hours
#TODO make sure the timestamps will be possible to comapre between tz (utc) naive and tz aware formats
df['UNIXTIME'] =
|
pd.to_datetime([u_time], unit='us')
|
pandas.to_datetime
|
#!/usr/bin/env python
# coding: utf-8
# In[10]:
#https://github.com/pomber/covid19
import numpy as np
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
import datetime
# In[11]:
df = pd.read_csv('../data/brazil_corona19_data.csv', sep=',')
df['date'] = df['date'].astype('datetime64[ns]')
today = str(df.date.max().date())
tomorrow = str(df.date.max().date() + datetime.timedelta(days=1))
dayAfterTomorrow = str(df.date.max().date() + datetime.timedelta(days=2))
yesterday = str(df.date.max().date() - datetime.timedelta(days=1))
df[df['state']=='SP'].tail()
# In[12]:
states = df['state'].unique()
states
# ----------------------------
# ### Predicting cases and deaths for a selected stated
# In[13]:
#inform the states for predictions
predictedStates = ['PI', 'CE', 'AM', 'RJ', 'SP', 'PR',]
# In[14]:
df_prediction =
|
pd.DataFrame(columns=['state','ds', 'case_day', 'death_day', 'cases', 'deaths'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from pandas import compat
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.indexing import is_list_like
from pandas.core.arrays.categorical import _factorize_from_iterable
class Smarties:
def __init__(self, main_lookup=None):
self.main_lookup=main_lookup
return None
def transform(self, df):
result_lookup = self.main_lookup
try:
df = df[result_lookup['normal']]
except:
list1 = result_lookup['normal']
list2 = df.columns
for i in list1:
if i in list2:
print('ok',i)
else:
print('missing!',i)
raise Exception('You are missing a column key, should be:' + str(result_lookup['normal']))
encoding_lookup = result_lookup['encoding']
with_dummies = [df.drop(encoding_lookup.keys(), axis=1)] #drop columns to encode
for key in encoding_lookup:
values = df[key].values
#Check to see if encoding took place
number_of_cols = len(encoding_lookup[key])
number_of_rows = df.shape[0]
dummy_mat = np.zeros((number_of_rows, number_of_cols), dtype=np.uint8)
for row in range(number_of_rows):
indices = [i for i, s in enumerate(encoding_lookup[key]) if key + '_' + str(values[row]) == s]
if len(indices) > 0:
dummy_mat[row][indices[0]] = 1
with_dummies.append(DataFrame(dummy_mat, index=df.index, columns=encoding_lookup[key]))
return pd.concat(with_dummies, axis=1)
def fit_transform(self, data, y=None, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False):
"""
Convert categorical variable into dummy/indicator variables
"""
#from pandas.core.reshape.concat import concat
from itertools import cycle
if 'DataFrame' not in str(type(data)): #convert series to dataframe
data = data.to_frame()
main_lookup={}
main_lookup['normal'] = data.columns
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(
include=['object', 'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did not match the length of "
"the columns being encoded ({2}).")
if is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
if set(columns_to_encode) == set(data.columns):
with_dummies = []
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]
#print(with_dummies)
encoding_lookup={}
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = self._get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first)
encoding_lookup[col]=dummy.columns
with_dummies.append(dummy)
main_lookup['encoding'] = encoding_lookup
result = pd.concat(with_dummies, axis=1)
else:
result = self._get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse, drop_first=drop_first)
self.main_lookup = main_lookup #save class variables
return result #, dummy, columns_to_encode, main_lookup
def _get_dummies_1d(self, data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False):
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
def get_empty_Frame(data, sparse):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return
|
DataFrame(index=index)
|
pandas.core.frame.DataFrame
|
"""
Load data to database
"""
import os
import pandas as pd
def apply_adjustment(df, adj_date, adj_value,
adj_type='mul',date_col='date',
cols=['open','high', 'low', 'close']):
"""
Apply adjustment to a given stock
df
dataframe of the given stock
adj_date
date from which the adjustment is
to be made
adj_value
value to be adjusted
adj_type
method of adjustment **mul/sub**
mul means multiply all the values
such as splits and bonuses
sub means subtract the values
such as dividends
date_col
date column on which the adjustment
is to be applied
cols
columns to which the adjustment is to
be made
Notes
-----
1) You can use negative values to add to the
stock value by using **adj_type=sub**
2) Adjustment is applied prior to all dates
in the dataframe
3) In case your dataframe has date or
symbol as indexes, reset them
"""
df = df.set_index(date_col).sort_index()
values_on_adj_date = df.loc[adj_date, cols].copy()
if adj_type == "mul":
adjusted_values = (df.loc[:adj_date, cols] * adj_value).round(2)
elif adj_type == "sub":
adjusted_values = (df.loc[:adj_date, cols] - adj_value).round(2)
else:
raise ValueError('adj_type should be either mul or sub')
df.loc[:adj_date, cols] = adjusted_values
df.loc[adj_date, cols] = values_on_adj_date
return df.reset_index()
class DataLoader(object):
"""
Data Loader class
"""
def __init__(self, directory, mode='HDF', engine=None,
tablename=None):
"""
Initialize parameters
directory
directory to search files
mode
HDF/SQL - should be explicitly specified
engine
filename in case of HDF
SQL Alchemy connection string in case of engine
tablename
table where data is to be written
parse dates
list of columns to be parsed as date
"""
if mode not in ['SQL', 'HDF']:
raise TypeError('No mode specified; should be HDF or SQL')
self.directory = directory
self.mode = mode
self.engine = engine
self.tablename = tablename
def _initialize_HDF_file(self):
import hashlib
hash = hashlib.sha1().hexdigest()
with
|
pd.HDFStore(self.engine)
|
pandas.HDFStore
|
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import time
import multiprocessing as mp
start_time=time.time()
def deepl(location1,location2):
data=pd.read_csv(location1)
data_columns=data.columns
xtrain = data[data_columns[data_columns != 'typeoffraud']]
ytrain=data['typeoffraud']
data1=pd.read_csv(location2)
data1_columns=data1.columns
xtest = data1[data1_columns[data1_columns != 'typeoffraud']]
ytest=data1['typeoffraud']
xtrain_norm = (xtrain - xtrain.mean()) / xtrain.std()
xtest_norm = (xtest - xtest.mean()) / xtest.std()
n_cols = xtrain_norm.shape[1]
ytrain=to_categorical(ytrain)
ytest=to_categorical(ytest)
num_classes=ytrain.shape[1]
def classification_model():
# create model
model = Sequential()
model.add(Dense(100,activation='relu', input_shape=(n_cols,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
# build the model
model = classification_model()
# fit the model
model.fit(xtrain_norm, ytrain, validation_data=(xtest_norm, ytest), epochs=10, verbose=1)
# evaluate the model
# test_loss,test_acc=model.evaluate(xtest_norm, ytest)
test_labels_p=model.predict(xtest_norm)
test_labels_p=np.argmax(test_labels_p,axis=1)
rel=list(zip(test_labels_p))
pp=pd.DataFrame(data=rel,columns=['label'])
pp.to_csv('label.csv',index=False)
################################################################################################################
def maketags(location2,location3):
e=pd.read_csv(location2)
tags=[]
ids=[]
tags1=[]
ids1=[]
for i,l in enumerate(e['typeoffraud']):
if l==1 or l==2 or l==3:
ids.append(e.iloc[i,1])
tags.append(e.iloc[i,4])
if l==4 or l==5 or l==6:
ids1.append(e.iloc[i,1])
tags1.append(e.iloc[i,4])
rel=list(zip(ids,tags))
pp=
|
pd.DataFrame(data=rel,columns=['ids','tags'])
|
pandas.DataFrame
|
import gc
import json
from pathlib import Path
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from scipy.sparse import load_npz
from dataset import MultiDataset, MultiTestDataset
from dataset import MultiClassDataset, MultiClassTestDataset
from dataset import Regression_MultiDataset, Regression_MultiTestDataset
def scatter_path_array(path_data, size, rank):
all_lst = []
for row, item in path_data.iterrows():
path, num = item['path'], int(item['res_num'])
all_lst.extend([[path, i, row] for i in range(num)])
all_lst = np.array(all_lst, dtype=object)
all_lst = np.random.permutation(all_lst)
all_lst = all_lst[int(len(all_lst) / size) * rank:int(len(all_lst) / size) * (rank + 1):]
return all_lst[:, 0], all_lst[:, 1], all_lst[:, 2]
class Dataproc():
def __init__(self, size, rank, config):
self.config = config
label_name = self.config['label'][0]
voxel_path = Path(self.config['voxel_path'])
label_path = Path(self.config['label_path'])
# size = 32
train_data_df = pd.read_csv(self.config['train_data'], index_col=0)
path, resid, protein_id = scatter_path_array(train_data_df, size, rank)
train_df = pd.DataFrame({'path': path, 'i': resid}, index=protein_id)
train_groupby = train_df.groupby('path')
def load_train(path, group):
voxel = load_npz(voxel_path / path)
label = np.load(label_path / path)[label_name]
return [[voxel[index], float(label[index])] for index in group['i']]
sub = Parallel(n_jobs=14)([delayed(load_train)(path, group) for path, group in train_groupby])
train_voxel = [y[0] for x in sub for y in x]
train_label = [y[1] for x in sub for y in x]
del train_df, train_groupby, sub
gc.collect()
test_data_df =
|
pd.read_csv(self.config['test_data'], index_col=0)
|
pandas.read_csv
|
# AUTOGENERATED! DO NOT EDIT! File to edit: queries.ipynb (unless otherwise specified).
__all__ = ['optimize_floats', 'optimize_ints', 'optimize_objects', 'df_optimize', 'connect_db', 'update_radcom',
'update_stel', 'update_mosaico', 'update_base', 'read_stel', 'read_radcom', 'read_mosaico', 'read_base']
# Cell
import requests
from decimal import *
from typing import *
from gazpacho import Soup
from rich.progress import track
from pathlib import Path
from unidecode import unidecode
import pandas as pd
import pandas_read_xml as pdx
import pyodbc
import re
import xml.etree.ElementTree as et
from zipfile import ZipFile
import collections
from fastcore.utils import listify
from fastcore.foundation import L
from fastcore.test import *
from .constants import *
from pyarrow import ArrowInvalid
getcontext().prec = 5
# Cell
def optimize_floats(df: pd.DataFrame, exclude = None) -> pd.DataFrame:
floats = df.select_dtypes(include=["float64"]).columns.tolist()
floats = [c for c in floats if c not in listify(exclude)]
df[floats] = df[floats].apply(pd.to_numeric, downcast="float")
return df
def optimize_ints(df: pd.DataFrame, exclude=None) -> pd.DataFrame:
ints = df.select_dtypes(include=["int64"]).columns.tolist()
ints = [c for c in ints if c not in listify(exclude)]
df[ints] = df[ints].apply(pd.to_numeric, downcast="integer")
return df
def optimize_objects(df: pd.DataFrame, datetime_features: List[str], exclude=None) -> pd.DataFrame:
for col in df.select_dtypes(include=["object"]).columns.tolist():
if col not in datetime_features:
if col in listify(exclude): continue
num_unique_values = len(df[col].unique())
num_total_values = len(df[col])
if float(num_unique_values) / num_total_values < 0.5:
dtype = "category"
else:
dtype = "string"
df[col] = df[col].astype(dtype)
else:
df[col] = pd.to_datetime(df[col]).dt.date
return df
def df_optimize(df: pd.DataFrame, datetime_features: List[str] = [], exclude = None):
return optimize_floats(optimize_ints(optimize_objects(df, datetime_features, exclude), exclude), exclude)
# Cell
def connect_db():
"""Conecta ao Banco ANATELBDRO01 e retorna o 'cursor' (iterador) do Banco pronto para fazer iterações"""
conn = pyodbc.connect(
"Driver={ODBC Driver 17 for SQL Server};"
"Server=ANATELBDRO01;"
"Database=SITARWEB;"
"Trusted_Connection=yes;"
"MultipleActiveResultSets=True;",
timeout=TIMEOUT,
)
return conn
# Internal Cell
def row2dict(row):
"""Receives a json row and return the dictionary from it"""
return {k: v for k, v in row.items()}
def dict2cols(df, reject=()):
"""Recebe um dataframe com dicionários nas células e extrai os dicionários como colunas
Opcionalmente ignora e exclue as colunas em reject
"""
for column in df.columns:
if column in reject:
df.drop(column, axis=1, inplace=True)
continue
if type(df[column].iloc[0]) == collections.OrderedDict:
try:
new_df = pd.DataFrame(df[column].apply(row2dict).tolist())
df = pd.concat([df, new_df], axis=1)
df.drop(column, axis=1, inplace=True)
except AttributeError:
continue
return df
def parse_plano_basico(row, cols=COL_PB):
"""Receives a json row and filter the column in `cols`"""
return {k: row[k] for k in cols}
def scrape_dataframe(id_list):
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df =
|
pandas.DataFrame(data)
|
pandas.DataFrame
|
from datetime import datetime,timedelta
import ntpath
import pytz
import logging
import re
import pandas as pd
import numpy as np
logger = logging.getLogger(__name__)
def is_month_complete(start,end):
if end.month == (end + timedelta(days=1)).month:
return False
if start.day == 1:
return True
else:
return False
def _clean_grunddaten_from_sheet(sheets):
rechnung_grunddaten = {}
kad = sheets['Kunden-Absender Daten']
rechnung_grunddaten['rechnungsnummer'] = kad[kad['key'] == 'Rechnungsnummer:'].value.item()
rechnung_grunddaten['rahmenvertragsnummer'] = kad[kad['key'] == 'Rahmenvertragsnummer:'].value.item()
rechnung_grunddaten['umsatzsteuer'] = float(kad[kad['key'] == 'Umsatzsteuer:'].value.item()[:-2].replace(',','.'))
rechnung_grunddaten['abrechnungsperiode_start'] = pytz.utc.localize(datetime.strptime(kad[kad['key'] == 'Beginn der Abrechnungsperiode:'].value.item(), '%d.%m.%Y'))
rechnung_grunddaten['abrechnungsperiode_ende'] = pytz.utc.localize(datetime.strptime(kad[kad['key'] == 'Ende der Abrechnungsperiode:'].value.item(), '%d.%m.%Y'))
rechnung_grunddaten['rechnungsmonat_komplett'] = is_month_complete(rechnung_grunddaten['abrechnungsperiode_start'],rechnung_grunddaten['abrechnungsperiode_ende'])
rs = sheets['Rechnungssummen']
rechnung_grunddaten['rechnung_betrag_dtag_netto'] = float(rs[rs['text'] == 'Betrag Telekom Deutschland GmbH']['summen_betrag_netto'].item())
rechnung_grunddaten['rechnung_betrag_dtag_brutto'] = float(rs[rs['text'] == 'Betrag Telekom Deutschland GmbH']['brutto_betrag'].item())
rechnung_grunddaten['rechnung_betrag_drittanbieter_brutto'] = float(rs[rs['text'] == 'Genutzte Angebote']['betrag'].sum())
rechnung_grunddaten['rechnung_summe_netto'] = float(rs[rs['text'] == 'Rechnungsbetrag']['summen_betrag_netto'].item())
rechnung_grunddaten['rechnung_summe_brutto'] = float(rs[rs['text'] == 'Zu zahlender Betrag']['brutto_betrag'].item())
rp = sheets['Rechnungspositionen']
rechnung_grunddaten['rechnung_betrag_vda_brutto'] = rp[(rp['service'] == "VDA") & (rp['rechnungsbereich'] == "Telekom Deutschland GmbH")]['summen_nettobetrag'].sum()
zusatzangaben = rp[rp['rechnungsbereich'] == "Zusatzangaben zum Rechnungsbetrag"]
if not zusatzangaben['rechnungsposition'].empty:
regex = r'^([0-9,]+)% Vergünstigung auf\s(.*)$'
match = re.search(regex, zusatzangaben['rechnungsposition'].item())
rechnung_grunddaten[f"rechnung_zusatzangaben_auf_rechnungsbereich"] = match.group(2)
rechnung_grunddaten[f"rechnung_zusatzangaben_prozent"] = match.group(1)
rechnung_grunddaten = pd.DataFrame(rechnung_grunddaten, index=[0])
return rechnung_grunddaten
def _clean_summen_der_verguenstigungen(sheets):
# Summen der Vergünstigen berechnen:
# Erst alle Einzelpositionen zusammensetzen
# und am Ende der Funktion aufsummieren
rp_sheet = sheets['Rechnungspositionen']
rp = rp_sheet[(rp_sheet['service'] == "Telefonie")
& (rp_sheet['eur_netto'].notnull())
& (rp_sheet['summen_brutto_betraege'].isnull())
& (rp_sheet['andere_leistungen_eur_brutto'].isnull())]
df = pd.DataFrame()
regex = r'^[0-9,]+% auf\s?(Grundpreis\s(.*)|(TwinBill - Aufpreis))$'
tmp = rp['rechnungsposition'].str.extractall(regex).droplevel(-1)
df['kartennummer'] = rp['kartennummer']
df['rufnummer'] = rp['rufnummer']
df['verguenstigung_grundpreis_art'] = tmp[1].combine_first(tmp[2])
df['verguenstigung_grundpreis_art'].replace(['TwinBill - Aufpreis'], 'TwinBill Aufpreis', inplace=True)
df['verguenstigung_grundpreis_summe'] = pd.to_numeric(rp['eur_netto'], errors='coerce')
# Die Reihen ohne "verguenstigung_grundpreis_art" sind unvergünstigte Grundpreise
# und müssen daher rausgefiltert werden für die Berechnung der vergünstigten Grundpreise
df = df.dropna(axis=0)
df = df.groupby(['kartennummer','rufnummer']).sum()
return df
def _clean_berechne_echte_grundpreise_u_variable_kosten(sheets, df1):
rp_sheet = sheets['Rechnungspositionen']
# DTAG Kosten und errechneter Grundpreise inkl. Vergünstigungen
# daraus dann können die variablen Kosten berechnet werden
df2 = rp_sheet[(rp_sheet['service'] == "Telefonie")
& (rp_sheet['summen_nettobetrag'].notnull())
& (rp_sheet['kartennummer'].notna())]
df2 = df2.groupby(['kartennummer','kostenstelle','kostenstellennutzer','rufnummer','rechnungsbereich'], dropna=False).sum()
df2 = df2.reset_index()
df2 = df2.pivot(index=['kartennummer','kostenstelle','kostenstellennutzer','rufnummer'], columns=['rechnungsbereich'], values='summen_nettobetrag')
df2 = df2[['Grundpreise','Telekom Deutschland GmbH']]
df2 = df2.reset_index()
df2 = df2.set_index(['kartennummer','rufnummer'])
df = pd.concat((df1, df2), axis=1)
df = df.reset_index()
cols = ['Grundpreise','Telekom Deutschland GmbH','verguenstigung_grundpreis_summe']
df[cols] =df[cols].apply(pd.to_numeric, errors='coerce')
df[cols] =df[cols].fillna(0)
df['grundpreise_echt'] = df['Grundpreise']+df['verguenstigung_grundpreis_summe']
df['variable_kosten'] = df['Telekom Deutschland GmbH'] - df['grundpreise_echt']
df = df.drop(columns=['Grundpreise','verguenstigung_grundpreis_summe'])
return df
def _rechnungspositionen_komplett(sheets):
# Rechnungspositionen komplett importieren ohne zu bearbeiten
rp = sheets['Rechnungspositionen']
cols = ['beginn_datum','ende_datum']
rp['beginn_datum'] =
|
pd.to_datetime(rp['beginn_datum'],utc=True, format='%d.%m.%Y')
|
pandas.to_datetime
|
# %% [markdown]
# # Comparing Binary and Linear Search
# %% [markdown]
# ## Configuration
# %% [markdown]
# ### Import
# %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import clear_output
from timeit import default_timer as timer
# %% [markdown]
# ### Ramdom setup + Linear Search Confirmation
# %%
def int_check(prompt, flag = None):
while True:
try:
value = int(input(prompt))
except ValueError:
if flag is False:
value = np.random.randint(np.iinfo(np.int32).max)
else:
print("Check input")
continue
if value < 0:
print("Please input a number larger than 0")
continue
if flag is True and value < range_low:
print("Invalid top end")
continue
clear_output()
return value
range_low = int_check("Choose the bottom end of your randomness ( ≥ 0 ): ")
range_high = int_check("Choose the top end of your randomness ( ≥ 0 ): ", True)
random_size = int_check("Choose the size of the array ( ≥ 0 ): ")
seed_info = int_check("Choose your seed ( ≥ 0 ): ", False) # If ValueError then a random seed between 0 and top limit of int32 would be selected
np.random.seed(seed_info)
arr = np.random.randint(low = range_low, high = range_high, size = random_size)
exec_unsort = input("CONFIRMING LINEAR search on UNSORTED array (yes): ")
exec_sort = input("CONFIRMING LINEAR search on SORTED array (yes): ")
# %% [markdown]
# ### Duplication handling
# %%
unique_arr_unsorted =
|
pd.unique(arr)
|
pandas.unique
|
import os
import sys
import logging
import pandas as pd
import numpy as np
from linker.plugins.base import AlgorithmProvider
from linker.core.union_find import UnionFind
from jellyfish import levenshtein_distance, jaro_winkler
logger = logging.getLogger(__name__)
class Levenshtein(AlgorithmProvider):
name = 'LEVENSHTEIN'
title = 'Levenshtein'
type = 'DTR'
args = ['max_edits']
def apply(self, s1, s2, max_edits=0):
def levenshtein_alg(x, max_edits=0):
try:
d = levenshtein_distance(x[0], x[1])
return 1 if d <= max_edits else 0
except TypeError as err:
logger.error(
'Error in calculating Levenshtein edit distance: {}'
.format(err))
strings = pd.concat([s1, s2], axis=1, ignore_index=True)
strings = strings.replace(np.nan, '', regex=True)
return strings.apply(levenshtein_alg, axis=1, max_edits=max_edits)
class JaroWinkler(AlgorithmProvider):
name = 'JARO_WINKLER'
title = 'Jaro-Winkler'
type = 'DTR'
args = ['threshold']
def apply(self, s1, s2, threshold=1.0):
def jaro_winkler_alg(x, threshold=1.0):
try:
t = jaro_winkler(x[0], x[1])
return 1 if t >= threshold else 0
except TypeError as err:
logger.error(
'Error in calculating Jaro-Winkler similarity: {}'
.format(err))
strings = pd.concat([s1, s2], axis=1, ignore_index=True)
strings = strings.replace(np.nan, '', regex=True)
return strings.apply(jaro_winkler_alg, axis=1, threshold=threshold)
class SynonymTable(AlgorithmProvider):
"""
Creates disjoint sets of synonym names first auch that all synonym names will be in the same set.
Then it compares two given series of names and check if the names are the same(synonyms) for every pair of
names in s1 and s2.
:param s1: Pandas Series, First input sequence of names
:param s2: Pandas Series, Second input sequence of names
:return: For each pair of names returns 1 if names are sysnonym and returns 0 otherwise.
"""
name = 'SYNONYMS'
title = 'Synonym Names'
type = 'DTR'
args = []
synonym_file = "nicknames.csv"
names_index = None
name_sets = None
@staticmethod
def create_synonyms():
'''
Creates the disjoint sets of names using the given nicknames file.
'''
file_path = os.path.join(os.path.dirname(__file__), SynonymTable.synonym_file)
nicknames = pd.read_csv(file_path)
nicknames['nameA'] = map(lambda x: x.upper(), nicknames['nameA'])
nicknames['nameB'] = map(lambda x: x.upper(), nicknames['nameB'])
names = pd.concat([nicknames['nameA'], nicknames['nameB']]).drop_duplicates()
names.index = list(range(len(names)))
names_index = pd.Series(list(range(len(names))), index=names.values)
name_set = UnionFind(len(names))
for name_x, name_y in nicknames[['nameA', 'nameB']].values:
name_set.union(names_index[name_x], names_index[name_y])
SynonymTable.name_sets = name_set
SynonymTable.names_index = names_index
@staticmethod
def synonym(name_x, name_y):
'''
Checks if name_x and name_y are synonym.
:param name_x: First input name
:param name_y: Second input name.
:return: 1 if name_x and name_y are synonyms, 0 otherwise.
'''
if SynonymTable.name_sets is None:
SynonymTable.create_synonyms()
if pd.isnull(name_x):
name_x = ''
if pd.isnull(name_y):
name_y = ''
name_x = name_x.upper()
name_y = name_y.upper()
if name_x == '' and name_y == '':
return 1
if name_x not in SynonymTable.names_index or name_y not in SynonymTable.names_index:
return 0
return 1 if SynonymTable.name_sets.linked(SynonymTable.names_index[name_x],
SynonymTable.names_index[name_y]) else 0
def __init__(self):
if SynonymTable.name_sets is None:
SynonymTable.create_synonyms()
def apply(self, s1, s2):
names =
|
pd.concat([s1, s2], axis=1, ignore_index=True)
|
pandas.concat
|
import utils_ECAD as AD_algos
import matplotlib.patches as mpatches
import matplotlib.pyplot as pl
from pyod.models.pca import PCA
from pyod.models.ocsvm import OCSVM
from pyod.models.iforest import IForest
from pyod.models.hbos import HBOS
from pyod.models.knn import KNN # kNN detector
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import neighbors
from sklearn.neural_network import MLPClassifier
from sklearn import svm
from PI_class_EnbPI import prediction_interval
import utils_EnbPI as util
from utils_EnbPI import plot_average_new, grouped_box_new, one_dimen_transform
from matplotlib.lines import Line2D # For legend handles
import statsmodels as sm
import calendar
import warnings
import matplotlib.pyplot as plt
from sklearn.linear_model import RidgeCV, LassoCV
from sklearn.ensemble import RandomForestRegressor
import itertools
import importlib
import time
import pandas as pd
import numpy as np
import os
import sys
import keras
warnings.filterwarnings("ignore")
'''This File contains code for reproducing all figures
in the paper (including those in the appendix).
The main difference is the number of additional experiments being done
and extra steps needed for network data in California Energy Data'''
'''A. Marginal Coverage Results'''
# Read data and initialize parameters
max_data_size = 10000
# data0--2 results are in Section 8.4
data0 = util.read_data(0, 'Data/green_house_data.csv', max_data_size)
data1 = util.read_data(1, 'Data/appliances_data.csv', max_data_size)
data2 = util.read_data(
2, 'Data/Beijing_air_Tiantan_data.csv', max_data_size)
dataSolar_Atl = util.read_data(3, 'Data/Solar_Atl_data.csv', max_data_size)
# Results in Sec 8.3
CA_cities = ['Fremont', 'Milpitas', 'Mountain_View', 'North_San_Jose',
'Palo_Alto', 'Redwood_City', 'San_Mateo', 'Santa_Clara',
'Sunnyvale']
for city in CA_cities:
globals()['data%s' % city] = read_CA_data(f'Data/{city}_data.csv')
stride = 1
miss_test_idx = []
alpha = 0.1
tot_trial = 10 # For CP methods that randomizes
np.random.seed(98765)
B = 30 # number of bootstrap samples
Data_name = ['green_house', 'appliances', 'Beijing_air',
'Solar_Atl', 'Palo_Alto', 'Wind_Austin']
Data_name_network = ['Palo_Alto']
response_ls = {'green_house': 15, 'appliances': 'Appliances',
'Beijing_air': 'PM2.5', 'Solar_Atl': 'DHI', 'Wind_Austin': 'MWH'}
response_ls_network = {'Palo_Alto': 'DHI'}
data_ind = {}
for i in range(len(Data_name)):
key = Data_name[i]
if i <= 2:
data_ind[key] = i
else:
data_ind[key] = key
data_ind_network = {'Palo_Alto': 'Palo_Alto'}
min_alpha = 0.0001
max_alpha = 10
ridge_cv = RidgeCV(alphas=np.linspace(min_alpha, max_alpha, 10))
random_forest = RandomForestRegressor(n_estimators=10, criterion='mse',
bootstrap=False, max_depth=2, n_jobs=-1)
def big_transform(CA_cities, current_city, one_dim, train_size):
# Next, merge these data (so concatenate X_t and Y_t for one_d or not)
# Return [X_train, X_test, Y_train, Y_test] from data_x and data_y
# Data_x is either multivariate (direct concatenation)
# or univariate (transform each series and THEN concatenate the transformed series)
big_X_train = []
big_X_predict = []
for city in CA_cities:
data = eval(f'data{city}') # Pandas DataFrame
data_x = data.loc[:, data.columns != 'DHI']
data_y = data['DHI']
data_x_numpy = data_x.to_numpy() # Convert to numpy
data_y_numpy = data_y.to_numpy() # Convert to numpy
X_train = data_x_numpy[:train_size, :]
X_predict = data_x_numpy[train_size:, :]
Y_train_del = data_y_numpy[:train_size]
Y_predict_del = data_y_numpy[train_size:]
if city == current_city:
Y_train = Y_train_del
Y_predict = Y_predict_del
if one_dim:
X_train, X_predict, Y_train_del, Y_predict_del = one_dimen_transform(
Y_train_del, Y_predict_del, d=20)
big_X_train.append(X_train)
big_X_predict.append(X_predict)
if city == current_city:
Y_train = Y_train_del
else:
big_X_train.append(X_train)
big_X_predict.append(X_predict)
X_train = np.hstack(big_X_train)
X_predict = np.hstack(big_X_predict)
return([X_train, X_predict, Y_train, Y_predict])
tot_trial = 10
rnn = False
# True for Palo Alto only, as it is a network. So need to run the procedure TWICE
energy_data = True
''' A.1 Coverage over 1-\alpha (e.g. Figure 1)'''
alpha_ls = np.linspace(0.05, 0.25, 5)
methods = ['Ensemble']
for one_dim in [True, False]:
for data_name in Data_name:
data = eval(f'data{data_ind[data_name]}') # Pandas DataFrame
data_x = data.loc[:, data.columns != response_ls[data_name]]
data_y = data[response_ls[data_name]]
data_x_numpy = data_x.to_numpy() # Convert to numpy
data_y_numpy = data_y.to_numpy() # Convert to numpy
total_data_points = data_x_numpy.shape[0]
train_size = int(0.2 * total_data_points)
results = pd.DataFrame(columns=['itrial', 'dataname', 'muh_fun',
'method', 'alpha', 'coverage', 'width'])
results_ts = pd.DataFrame(columns=['itrial', 'dataname',
'method', 'alpha', 'coverage', 'width'])
for itrial in range(tot_trial):
np.random.seed(98765 + itrial)
for alpha in alpha_ls:
# Note, this is necessary because a model may "remember the past"
nnet = util.keras_mod()
rnnet = util.keras_rnn()
print(f'At trial # {itrial} and alpha={alpha}')
print(f'For {data_name}')
if energy_data:
X_train, X_predict, Y_train, Y_predict = big_transform(
Data_name, data_name, one_dim, train_size)
d = 20
else:
X_train = data_x_numpy[:train_size, :]
X_predict = data_x_numpy[train_size:, :]
Y_train = data_y_numpy[:train_size]
Y_predict = data_y_numpy[train_size:]
d = 20 # for 1-d memory depth
if one_dim:
X_train, X_predict, Y_train, Y_predict = one_dimen_transform(
Y_train, Y_predict, d=d)
ridge_results = prediction_interval(
ridge_cv, X_train, X_predict, Y_train, Y_predict)
rf_results = prediction_interval(
random_forest, X_train, X_predict, Y_train, Y_predict)
nn_results = prediction_interval(
nnet, X_train, X_predict, Y_train, Y_predict)
if rnn:
T, k = X_train.shape
T1 = X_predict.shape[0]
X_train = X_train.reshape((T, 1, k))
X_predict = X_predict.reshape((T1, 1, k))
rnn_results = prediction_interval(
rnnet, X_train, X_predict, Y_train, Y_predict)
if itrial == 0:
# For ARIMA, only run once
result_ts = ridge_results.run_experiments(
alpha, B, stride, data_name, itrial, miss_test_idx, methods=methods, none_CP=True)
result_ts.rename(
columns={'train_size': 'alpha'}, inplace=True)
if one_dim:
result_ts['alpha'].replace(
train_size - d, alpha, inplace=True)
else:
result_ts['alpha'].replace(
train_size, alpha, inplace=True)
results_ts =
|
pd.concat([results_ts, result_ts])
|
pandas.concat
|
# %% [markdown]
# # Models and Ensembling Methods
# %% [markdown]
# ## Import dependencies
import numpy
from gensim.models import word2vec
from gensim.models import KeyedVectors
import pandas
from nltk import WordPunctTokenizer
from sklearn.preprocessing import label_binarize
import sqlite3
from sklearn.multiclass import OneVsRestClassifier
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn import svm
from itertools import cycle
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
from sklearn.metrics import precision_score, recall_score, roc_auc_score
from sklearn.metrics import multilabel_confusion_matrix, confusion_matrix
from sklearn.metrics import make_scorer
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn import tree
from sklearn.model_selection import GridSearchCV
from mlxtend.plotting import plot_learning_curves
import lime
import lime.lime_tabular
# %% [markdown]
# ## Define Constants
W2V_FEATURE_SIZE = 300
N_CLASSES = 4
RANDOM_STATE = 123
N_FOLDS = 5
# %% [markdown]
# ## Read in the data
# %% [markdown]
# ### Load raw train and test data
# %% [markdown]
# #### Load in the data from the database
# %%
dbconn = sqlite3.connect('./data/cleanedtraintest_v2.db')
train_data_df = pandas.read_sql_query(
'SELECT category, content_cleaned FROM train_data', dbconn)
test_data_df =
|
pandas.read_sql_query(
'SELECT category, content_cleaned FROM test_data', dbconn)
|
pandas.read_sql_query
|
import argparse
import sys
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import glob
from sklearn import metrics
from scipy.stats import pearsonr, spearmanr
from scipy.optimize import curve_fit
from collections import Counter
import pickle
import pdb
parser = argparse.ArgumentParser(description = '''Visualize and analyze the DockQ scores.''')
#Bench4
parser.add_argument('--bench4_dockq_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 AF in csv.')
parser.add_argument('--bench4_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 from RF in csv.')
parser.add_argument('--plDDT_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#parser.add_argument('--pconsdock_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
#parser.add_argument('--pconsdock_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
parser.add_argument('--bench4_kingdom', nargs=1, type= str, default=sys.stdin, help = 'Path to kingdoms for bench4 in csv.')
parser.add_argument('--dssp_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp annotations for bench4 in csv.')
parser.add_argument('--afdefault_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
#Marks positivef
parser.add_argument('--marks_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set RF in csv.')
parser.add_argument('--marks_dockq_AF_bb', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF back bone atoms in csv.')
parser.add_argument('--marks_dockq_AF_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF all atoms in csv.')
parser.add_argument('--marks_dockq_GRAMM', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set GRAMM in csv.')
parser.add_argument('--marks_dockq_TMfull', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set TMdock in csv.')
parser.add_argument('--marks_dockq_TMint', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set interface TMdock in csv.')
parser.add_argument('--marks_dockq_mdockpp', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set MdockPP in csv.')
parser.add_argument('--plDDT_marks_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--plDDT_marks_fused', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--dssp_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp metrics in csv.')
parser.add_argument('--ifstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to if metrics in csv.')
parser.add_argument('--aln_scores_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to aln scores in csv.')
parser.add_argument('--oxstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to statistics over organisms in csv.')
parser.add_argument('--afdefault_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
parser.add_argument('--af_chain_overlap_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to chain overlap for AF a3m in csv.')
#Marks negative
parser.add_argument('--plDDT_marks_negative_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#Negatome
parser.add_argument('--plDDT_negatome_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#New set
parser.add_argument('--newset_dockq_AF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for new set AF in csv.')
parser.add_argument('--plDDT_newset', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv for newset.')
#Output directory
parser.add_argument('--outdir', nargs=1, type= str, default=sys.stdin, help = 'Path to output directory. Include /in end')
################FUNCTIONS#################
def dockq_box(bench4_dockq, outdir):
'''Plot a boxplot of the dockq score for the different modes
'''
#Plot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
modes = bench4_dockq.columns[1:]
all_modes = []
all_scores = []
all_msas = []
all_model_options = []
accuracies = {}
for mode in modes:
#Frac correct and avg score
fraq_correct = np.argwhere(bench4_dockq[mode].values>=0.23).shape[0]/len(bench4_dockq)
accuracies[mode]=fraq_correct
av = np.average(bench4_dockq[mode].values)
print(mode, np.round(fraq_correct,3),np.round(av,3))
#Save scores
all_scores.extend([*bench4_dockq[mode].values])
mode = '_'.join(mode.split('_')[4:])
mode = mode.split('_')
msa = mode[0]
model = '_'.join(mode[1:-1])
option = mode[-1]
#save
all_modes.extend([msa+'\n'+model+'\n'+option]*len(bench4_dockq))
all_msas.extend([msa]*len(bench4_dockq))
all_model_options.extend([model+' '+option]*len(bench4_dockq))
def correlate_scores(bench4_dockq, outdir):
'''Correlate the scores for all different modeling strategies
'''
modes = ['DockQ_dockqstats_bench4_af2_hhblits_model_1_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_rec10']
corr_matrix = np.zeros((len(modes),len(modes)))
for i in range(len(modes)):
scores_i = bench4_dockq[modes[i]].values
for j in range(i+1,len(modes)):
scores_j = bench4_dockq[modes[j]].values
#Correlate
R,p = pearsonr(scores_i,scores_j)
corr_matrix[i,j]=np.round(R,2)
corr_matrix[j,i]=np.round(R,2)
print(modes)
print(corr_matrix)
#Create df
corr_df = pd.DataFrame()
modes = ['_'.join(x.split('_')[4:]) for x in modes]
corr_df['Comparison'] = modes
for i in range(len(modes)):
corr_df[modes[i]]=corr_matrix[i,:]
corr_df.to_csv(outdir+'model_correlations.csv')
def fetch_missing_dockq(marks_dockq_AF_bb,marks_dockq_AF_aa):
'''Fetch missing DockQ scores
'''
ids = ['_'.join(x.split('-')) for x in marks_dockq_AF_aa.complex_id.values]
#Get mising scores
missing = marks_dockq_AF_bb[~marks_dockq_AF_bb.complex_id.isin(ids)]
ids = [x[:6]+'-'+x[7:] for x in missing.complex_id.values]
missing['complex_id']=ids
marks_dockq_AF_aa = pd.concat([marks_dockq_AF_aa,missing[marks_dockq_AF_aa.columns]])
return marks_dockq_AF_aa
def pdockq(if_plddt_contacts, dockq_scores, outdir):
#pdockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
#Create RA
x_ra = []
y_ra = []
y_std = []
y_av_err = []
step = 20
for t in np.arange(0,max(if_plddt_contacts)-step,step):
inds = np.argwhere((if_plddt_contacts>=t)&(if_plddt_contacts<t+step))[:,0]
x_ra.append(t+step/2)
y_ra.append(np.average(dockq_scores[inds]))
y_std.append(np.std(dockq_scores[inds]))
y_av_err.append(np.average(np.absolute(dockq_scores[inds]-y_ra[-1])))
#Do a simple sigmoid fit
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
xdata = if_plddt_contacts[np.argsort(if_plddt_contacts)]
ydata = dockq_scores[np.argsort(if_plddt_contacts)]
p0 = [max(ydata), np.median(xdata),1,min(ydata)] # this is an mandatory initial guess
popt, pcov = curve_fit(sigmoid, xdata, ydata,p0, method='dogbox')
y = sigmoid(xdata, *popt)
plt.plot(xdata,y,color='r',label='Sigmoidal fit')
#Calc error
print('Sigmoid params:',*popt)
plt.scatter(if_plddt_contacts,dockq_scores,s=1)
#plt.plot(x_ra,y_ra,label='Running average', color='tab:blue')
#plt.fill_between(x_ra,np.array(y_ra)-np.array(y_av_err),np.array(y_ra)+np.array(y_av_err),color='tab:blue',alpha=0.25, label='Average error')
plt.title('pDockQ')
plt.xlabel('IF plDDT⋅log(IF contacts)')
plt.ylabel('DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pDockQ.svg',format='svg',dpi=300)
plt.close()
print('Average error for sigmoidal fit:',np.average(np.absolute(y-ydata)))
print('L=',np.round(popt[0],3),'x0=',np.round(popt[1],3) ,'k=',np.round(popt[2],3), 'b=',np.round(popt[3],3))
return popt
def ROC_pred_marks(marks_dockq_AF, plDDT_marks, outdir):
'''Compare the separation in the marks dataset for AF using metrics from the
predicted structures
'''
#Merge dfs
plDDT_marks['complex_id']=plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on=['complex_id'],how='inner')
#Get min of chains
single_chain_plddt = np.min(merged[['ch1_plddt_av_1', 'ch2_plddt_av_1']].values,axis=1)
merged['min_chain_plddt_av_1'] = single_chain_plddt
#Analyze ROC as a function of
plDDT_metrics = ['if_plddt_av', 'min_chain_plddt_av',
'plddt_av', 'num_atoms_in_interface', 'num_res_in_interface']
plDDT_nice_names = {'if_plddt_av':'IF_plDDT', 'min_chain_plddt_av':'Min plDDT per chain',
'plddt_av':'Average plDDT', 'num_atoms_in_interface':'IF_contacts',
'num_res_in_interface':'IF_residues'}
run='1'
dockq_scores = merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run'+run].values
correct = np.zeros(len(dockq_scores))
correct[np.argwhere(dockq_scores>=0.23)]=1
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
colors = {0:'darkblue',1:'magenta',2:'orange',3:'darkgreen',4:'tab:blue',5:'tab:yellow',6:'tab:black'}
for i in range(len(plDDT_metrics)):
plDDT_metric_vals = merged[plDDT_metrics[i]+'_'+run].values
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, plDDT_metric_vals, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
label = plDDT_metrics[i]
plt.plot(fpr, tpr, label = plDDT_nice_names[label]+': AUC = %0.2f' % roc_auc,color=colors[i])
#Add log(if contacts)*if_plddt_av
if_plddt_contacts = merged['if_plddt_av_1'].values*np.log10(merged['num_atoms_in_interface_1'].values+1)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, if_plddt_contacts, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'IF_plDDT⋅log(IF_contacts)'+': AUC = %0.2f' % roc_auc,color='tab:cyan')
#Get pDockQ
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
sigmoid_params = pdockq(if_plddt_contacts, dockq_scores, outdir)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, sigmoid(if_plddt_contacts,*sigmoid_params), pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'pDockQ'+': AUC = %0.2f' % roc_auc,color='k',linestyle='--')
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
plt.legend(fontsize=9)
plt.title('ROC as a function of different metrics')
plt.xlabel('FPR')
plt.ylabel('TPR')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'ROC_marks.svg',format='svg',dpi=300)
plt.close()
#pDockQ vs DockQ
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(sigmoid(if_plddt_contacts,*sigmoid_params),dockq_scores,s=1)
plt.title('pDockQ vs DockQ')
plt.xlabel('pDockQ')
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pdockq_vs_dockq.svg',format='svg',dpi=300)
plt.close()
#plot if plddt vs log contacts and color by dockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['num_atoms_in_interface_1'].values+1, merged['if_plddt_av_1'].values,c=dockq_scores,s=2)
cbar = plt.colorbar()
cbar.set_label('DockQ')
plt.xscale('log')
plt.ylim([40,100])
plt.title('Interface contacts, plDDT and DockQ')
plt.xlabel('Interface contacts')
plt.ylabel('Average interface plDDT')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'if_conctacts_vs_plddt.svg',format='svg',dpi=300)
plt.close()
return sigmoid_params
def score_marks_5runs_paired_af(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run3','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('Test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using av plDDT*log(if_contacts) in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('AUC using the same ranking', roc_auc)
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(first_ranked_scores, max_scores,s=3,color='tab:blue',label='Max')
plt.scatter(first_ranked_scores, min_scores,s=3,color='mediumseagreen',label='Min')
plt.title('Model ranking on the test set')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot([0,1],[0,1],color='k',linewidth=1,linestyle='--')
plt.xlabel('DockQ first ranked model')
plt.ylabel('DockQ')
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_marks_5runs.svg',format='svg',dpi=300)
plt.close()
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_af2']=first_ranked_scores
marks_dockq_AF['top_ranked_pDockQ']=first_ranked_separators
marks_dockq_AF['top_ranked_model_run_af2']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_af2_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_af2_marks_reduced.csv')
return marks_dockq_AF
def score_marks_5runs_paired_fused(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run3','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('FUSED test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using if_plddt_av and num contacts in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('FUSED AUC using the same ranking', roc_auc)
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_fused']=first_ranked_scores
marks_dockq_AF['top_ranked_model_run_fused']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_fused_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_fused_marks_reduced.csv')
return marks_dockq_AF
def marks_box(marks_dockq_AF, marks_dockq_GRAMM, marks_dockq_mdockpp, marks_dockq_TMfull, marks_dockq_TMint, marks_dockq_RF,outdir):
'''Box df of Marks set
'''
marks_dockq_TMint = marks_dockq_TMint.dropna()
marks_dockq_TMfull = marks_dockq_TMfull.dropna()
#Get data
rf_scores = marks_dockq_RF.DockQ_dockqstats_marks_RF.values
gramm_scores = marks_dockq_GRAMM[1].values
mdockpp_scores = marks_dockq_mdockpp.DockQ.values
TMfull_scores = marks_dockq_TMfull.dockq.values
TMint_scores = marks_dockq_TMint.dockq.values
paired_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10.values
af2_std_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_af2stdmsa_model_1_rec10.values
run1_both_scores= marks_dockq_AF.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1.values
run1_fused_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1.values
top_paired_af_scores = marks_dockq_AF.top_ranked_model_DockQ_af2.values
top_paired_fused_scores = marks_dockq_AF.top_ranked_model_DockQ_fused.values
data1 = [rf_scores, gramm_scores, mdockpp_scores, TMint_scores, af2_std_scores, paired_scores, top_paired_af_scores, top_paired_fused_scores]
data2 = [run1_both_scores, run1_fused_scores, top_paired_af_scores,top_paired_fused_scores]
all_data = [data1,data2]
xlabels1 = ['RF','GRAMM', 'MDockPP', 'TMdock\nInterfaces', 'AF2', 'Paired', 'AF2+Paired\ntop ranked','Block+Paired\ntop ranked']
xlabels2 = ['AF2+Paired', 'Block+Paired', 'AF2+Paired\ntop ranked', 'Block+Paired\ntop ranked']
all_xlabels = [xlabels1, xlabels2]
#Color
colors = sns.husl_palette(len(xlabels1)+2)
all_colors = [colors[:len(xlabels1)],colors[-len(xlabels2):]]
for i in range(len(all_data)):
#Boxplot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
data = all_data[i] #Get data and xlabel variation
xlabels = all_xlabels[i]
colors = all_colors[i]
#Success rates
srs = []
for j in range(len(data)):
sr = np.argwhere(data[j]>=0.23).shape[0]/len(data[j])
median = np.median(data[j])
print(xlabels[j],'sr:',np.round(sr,3),len(data[j]),median)
#xlabels[j]+='\nSR: '+str(np.round(100*sr,1))+'%'
#xlabels[j]+='\nM: '+str(np.round(median,3))
# Creating plot
#ax.violinplot(data)
bp = ax.boxplot(data, patch_artist = True, notch=True, showfliers=False)
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
patch.set_alpha(0.75)
# changing color and linewidth of
# medians
for median in bp['medians']:
median.set(color ='k',linewidth = 3)
# #Add swarm
# for i in range(len(data)):
# # Add some random "jitter" to the x-axis
# x = np.random.normal(i, 0.04, size=len(data[i]))
# plt.plot(x+1, data[i], 'r.', alpha=0.2)
# changing color and linewidth of
# whiskers
for whisker in bp['whiskers']:
whisker.set(color ='grey',
linewidth = 1)
# changing color and linewidth of
# caps
for cap in bp['caps']:
cap.set(color ='grey',
linewidth = 1)
plt.title('DockQ scores for the test set',fontsize=20)
plt.xticks(np.arange(1,len(xlabels)+1),xlabels,fontsize=12)
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'DockQ_box_test'+str(i)+'.svg',format='svg',dpi=300)
plt.close()
def AF_vs_RF_marks(marks_dockq_RF,marks_dockq_AF, outdir):
'''Compare the scores for RF vs AF
'''
merged = pd.merge(marks_dockq_RF,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks RF and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ_dockqstats_marks_RF'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('RF vs AF2 performance on the test set')
plt.xlabel('RF DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'RF_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_RF = np.argwhere(merged['DockQ_dockqstats_marks_RF'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate RF:',num_correct_RF,'out of',num_total,'|',np.round(100*num_correct_RF/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where RF outperforms AF
scores = merged[['DockQ_dockqstats_marks_RF','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
rf_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(rf_pos,axis=1)
print('RF outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(rf_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(rf_pos))
def AF_vs_GRAMM_marks(marks_dockq_GRAMM, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_GRAMM = marks_dockq_GRAMM.rename(columns={1: 'DockQ GRAMM'})
marks_dockq_GRAMM['complex_id'] = ['_'.join(x.split('-')) for x in marks_dockq_GRAMM[0]]
merged = pd.merge(marks_dockq_GRAMM,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks GRAMM and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ GRAMM'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('GRAMM vs AF2 performance on the test set')
plt.xlabel('GRAMM DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'GRAMM_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_GRAMM = np.argwhere(merged['DockQ GRAMM'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate GRAMM:',num_correct_GRAMM,'out of',num_total,'|',np.round(100*num_correct_GRAMM/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where GRAMM outperforms AF
scores = merged[['DockQ GRAMM','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
GRAMM_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(GRAMM_pos,axis=1)
print('GRAMM outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(GRAMM_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(GRAMM_pos))
def AF_vs_TMint_marks(marks_dockq_TMint, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_TMint = marks_dockq_TMint.rename(columns={'dockq': 'DockQ TMint'})
merged = pd.merge(marks_dockq_TMint,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks TMint and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ TMint'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('TMint vs AF2 performance on the test set')
plt.xlabel('TMint DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'TMint_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_TMint = np.argwhere(merged['DockQ TMint'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate TMint:',num_correct_TMint,'out of',num_total,'|',np.round(100*num_correct_TMint/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where GRAMM outperforms AF
scores = merged[['DockQ TMint','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
TMint_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(TMint_pos,axis=1)
print('TMint outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(TMint_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(TMint_pos))
def real_features_marks(marks_dockq_AF, dssp_marks, ifstats_marks, aln_scores_marks, AFneffs_marks, topneffs_marks, outdir):
'''Compare the separation in the marks dataset for AF using metrics from the
real structures
'''
#Change DSSP df
dssp_marks['Helix']=dssp_marks.G+dssp_marks.H+dssp_marks.I
dssp_marks['Sheet']=dssp_marks.E+dssp_marks.B
dssp_marks['Loop']=dssp_marks[' '].values
ss = dssp_marks[['Helix','Sheet','Loop']].values #0,1,2
dssp_marks['ss_class']=np.argmax(dssp_marks[['Helix','Sheet','Loop']].values,axis=1)
dssp_marks = dssp_marks[['id1','id2','ss_class']]
#Merge dfs
dssp_marks['complex_id']=dssp_marks.id1+'-'+dssp_marks.id2
ifstats_marks['complex_id']=ifstats_marks.id1+'-'+ifstats_marks.id2
aln_scores_marks['complex_id']=aln_scores_marks.id1+'-'+aln_scores_marks.id2
aln_scores_marks = aln_scores_marks[['complex_id','aln_score']]
merged_dssp = pd.merge(marks_dockq_AF,dssp_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(marks_dockq_AF,ifstats_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(merged_if,aln_scores_marks,on=['complex_id'],how='inner')
#AFneffs_marks['complex_id']=[code.replace('-', '_') for code in AFneffs_marks['complex_id']]
#topneffs_marks['complex_id']=[code.replace('-', '_') for code in topneffs_marks['complex_id']]
merged_if = pd.merge(merged_if,AFneffs_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(merged_if,topneffs_marks,on=['complex_id'],how='inner')
'''
G = 3-turn helix (310 helix). Min length 3 residues.
H = 4-turn helix (α helix). Minimum length 4 residues.
I = 5-turn helix (π helix). Minimum length 5 residues.
T = hydrogen bonded turn (3, 4 or 5 turn)
E = extended strand in parallel and/or anti-parallel β-sheet conformation. Min length 2 residues.
B = residue in isolated β-bridge (single pair β-sheet hydrogen bond formation)
S = bend (the only non-hydrogen-bond based assignment).
C = coil (residues which are not in any of the above conformations).
'''
print('Num complexes in DSSP feature analysis',len(merged_dssp))
#Plot success rate per ss class
ss_classes = {0:'Helix',1:'Sheet',2:'Loop'}
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for i in range(3):
sel = merged_dssp[merged_dssp.ss_class==i]
success=np.argwhere(sel.top_ranked_model_DockQ_af2.values>=0.23).shape[0]/len(sel)
print(ss_classes[i],'success rate',np.round(success,3),'over',len(sel),'structures')
#
sns.distplot(sel.top_ranked_model_DockQ_af2,label=ss_classes[i]+' : '+str(np.round(100*success,1))+' % successful',hist=False)
plt.title('DockQ and SS for the test set')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_per_SS_marks.svg',format='svg',dpi=300)
plt.close()
#Plot feature vs DockQ
#Get min chain len
merged_if['smallest chain length'] = np.min(merged_if[['l1','l2']].values,axis=1)
#Get max chain len
merged_if['biggest chain length'] = np.max(merged_if[['l1','l2']].values,axis=1)
vars = ['num_if_contacts_total','smallest chain length', 'biggest chain length', 'aln_score', 'AFdefault_Neff', 'tophit_Neff']
nicer_names = {'num_if_contacts_total':'number of interface contacts','smallest chain length':'smallest chain length', 'biggest chain length':'biggest chain length',
'aln_score':'alignment score', 'AFdefault_Neff':'AF Neff', 'tophit_Neff':'Paired Neff'}
print('Num complexes in real feature analysis',len(merged_if))
#Plot each third and the distribution vs vars
for var in vars:
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
print (np.quantile(merged_if[var],0.5,axis=0))
l=[np.min(merged_if[var])]
l+=[np.quantile(merged_if[var],0.33,axis=0)]
l+=[np.quantile(merged_if[var],0.67,axis=0)]
l+=[np.max(merged_if[var])]
print (l)
j=0
for i in l[0:3]:
j+=1
#print ("test: ",i,j,l[j])
sel = merged_if.loc[ (merged_if[var] > i) & (merged_if[var] < l[j]) ]
success=np.argwhere(sel.top_ranked_model_DockQ_af2.values>=0.23).shape[0]/len(sel)
print(j,str(i)+" - "+ str(l[j])+":",'success rate',np.round(success,3),'over',len(sel),'structures')
#
sns.kdeplot(sel.top_ranked_model_DockQ_af2,label=str(round(i,0))+"-"+str(round(l[j],0))+' : '+str(np.round(100*success,1))+' % successful')
plt.title('DockQ and ' + nicer_names[var] + '\nfor the test set')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_per_'+var+'_marks.svg',format='svg',dpi=300)
plt.close()
def marks_dockq_per_org(marks_dockq_AF, oxstats_marks, ifstats_marks, aln_scores_marks, AFneffs_marks, topneffs_marks, outdir):
'''Analyze the dockq per organism
'''
#Merge
oxstats_marks['complex_id'] = oxstats_marks.id1+'-'+oxstats_marks.id2
ifstats_marks['complex_id']=ifstats_marks.id1+'-'+ifstats_marks.id2
#AFneffs_marks['complex_id']=[code.replace('-', '_') for code in AFneffs_marks['complex_id']]
#topneffs_marks['complex_id']=[code.replace('-', '_') for code in topneffs_marks['complex_id']]
aln_scores_marks['complex_id']=aln_scores_marks.id1+'-'+aln_scores_marks.id2
aln_scores_marks = aln_scores_marks[['complex_id','aln_score']]
merged = pd.merge(marks_dockq_AF,oxstats_marks,on='complex_id',how='left')
merged = pd.merge(merged,ifstats_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,aln_scores_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,AFneffs_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,topneffs_marks,on=['complex_id'],how='inner')
#Get min chain len
merged['smallest chain length'] = np.min(merged[['l1','l2']].values,axis=1)
#Get max chain len
merged['biggest chain length'] = np.max(merged[['l1','l2']].values,axis=1)
organisms = ['Homo sapiens','Saccharomyces cerevisiae', 'Escherichia coli']
vars = ['num_if_contacts_total','smallest chain length', 'biggest chain length', 'aln_score','AFdefault_Neff', 'tophit_Neff']
#Save
orgs = []
dockq_scores = []
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for org in organisms:
sel = merged[merged.Org1==org]
sel = sel[sel.Org2==org]
print('Number of complexes for',org,len(sel))
#Successs rate
sel_scores = sel.top_ranked_model_DockQ_af2.values
sr = np.argwhere(sel_scores>=0.23).shape[0]/len(sel_scores)
print('Success rate',sr)
#correlation
for var in vars:
R,p = spearmanr(sel[var].values,sel['top_ranked_model_DockQ_af2'].values)
print(var, np.round(R,2))
if org =='Saccharomyces cerevisiae':
org = 'S.cerevisiae'
if org =='Escherichia coli':
org = 'E.coli'
sns.distplot(sel_scores,label=org+' : '+str(np.round(sr*100,1))+' % successful',hist=False)
plt.title('DockQ per organism for the test set')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_per_org_marks.svg',format='svg',dpi=300)
plt.close()
def marks_dockq_per_kingdom(marks_dockq_AF, oxstats_marks, AFneffs_marks, topneffs_marks, outdir):
'''Analyze the dockq per organism
'''
#Merge
oxstats_marks['complex_id'] = oxstats_marks.id1+'-'+oxstats_marks.id2
#AFneffs_marks['complex_id']=['_'.join(x.split('-')) for x in AFneffs_marks.complex_id]
#topneffs_marks['complex_id']=['_'.join(x.split('-')) for x in topneffs_marks.complex_id]
merged = pd.merge(marks_dockq_AF,oxstats_marks,on='complex_id',how='left')
merged = pd.merge(merged,AFneffs_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,topneffs_marks,on=['complex_id'],how='inner')
kingdoms = ['E', 'B', 'A', 'V']
nice_labels = {'top_ranked_model_DockQ_af2':'DockQ', 'AFdefault_Neff':'AF Neff', 'tophit_Neff':'Paired Neff'}
for var in ['top_ranked_model_DockQ_af2', 'AFdefault_Neff', 'tophit_Neff']:
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for kd in kingdoms:
sel = merged[merged.kingdom1==kd]
sel = sel[sel.kingdom2==kd]
#Successs rate
sel_scores = sel[var].values
if var=='top_ranked_model_DockQ_af2':
sr = np.argwhere(sel_scores>=0.23).shape[0]/len(sel_scores)
print('Success rate for',kd,sr,len(sel_scores))
sns.distplot(sel_scores,label=kd+' : '+str(np.round(sr*100,1))+' % successful',hist=False)
else:
sns.distplot(sel_scores,label=kd,hist=False)
plt.title(nice_labels[var]+' per kingdom for the test set')
plt.xlabel(nice_labels[var])
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+var+'_per_kd_marks.svg',format='svg',dpi=300)
plt.close()
def marks_dockq_vs_aln_overlap(marks_dockq_AF, af_chain_overlap_marks, outdir):
'''Analyze the dockq vs chain overlap
'''
#Merge
cid = ['_'.join(x.split('-')) for x in af_chain_overlap_marks.complex_id.values]
af_chain_overlap_marks['complex_id']=cid
merged = pd.merge(marks_dockq_AF,af_chain_overlap_marks,on='complex_id',how='inner')
#Plot tertiles
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
l=[np.min(merged.Overlap)]
l+=[np.quantile(merged.Overlap,0.33,axis=0)]
l+=[np.quantile(merged.Overlap,0.67,axis=0)]
l+=[np.max(merged.Overlap)]
j=0
for i in l[0:3]:
j+=1
sel = merged.loc[ (merged['Overlap'] > i) & (merged['Overlap'] < l[j]) ]
success=np.argwhere(sel.DockQ_dockqstats_marks_af2_af2stdmsa_model_1_rec10.values>=0.23).shape[0]/len(sel)
print(j,str(i)+" - "+ str(l[j])+":",'success rate',np.round(success,3),'over',len(sel),'structures')
#
sns.kdeplot(sel.DockQ_dockqstats_marks_af2_af2stdmsa_model_1_rec10,label=str(round(i,2))+"-"+str(round(l[j],2))+' : '+str(np.round(100*success,1))+' % successful')
plt.title('DockQ vs chain overlap in AF2 msas')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'dockq_vs_overlap.svg',format='svg',dpi=300)
plt.close()
#Plot overlap distribution
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
sns.distplot(merged.Overlap)
plt.title('Chain overlap distribution in AF2 msas')
plt.xlabel('Overlap fraction')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'overlap_distr.svg',format='svg',dpi=300)
plt.close()
def score_newset_5runs(newset_dockq_AF, plDDT_newset, sigmoid_params, outdir):
'''Compare the separation in the newset dataset for AF using metrics from the
predicted structures
'''
#Merge dfs
plDDT_newset['complex_id'] = plDDT_newset.id1+'-'+plDDT_newset.id2
merged = pd.merge(newset_dockq_AF,plDDT_newset,on=['complex_id'],how='inner')
#Get num res in interface
separator1 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[newset_dockq_AF.columns[1:]].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('New set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using num contacts in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(first_ranked_scores, max_scores,s=10,color='darkblue',label='Max')
plt.scatter(first_ranked_scores, min_scores,s=10,color='mediumseagreen',label='Min')
plt.title('Model ranking from 5 runs on the new dimer set\n(both MSAs, model 1, 10 recycles)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot([0,1],[0,1],color='k',linewidth=1,linestyle='--')
plt.xlabel('DockQ first ranked model')
plt.ylabel('DockQ')
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_newset_5runs.svg',format='svg',dpi=300)
plt.close()
def dev_vs_test(marks_dockq_AF, oxstats_marks, ifstats_marks, aln_scores_marks, AFneffs_marks,
topneffs_marks, bench4_kingdom, dssp_bench4, AFneffs_bench4, topneffs_bench4, outdir):
'''Analyze the distributions of different features for the dev vs the test set
Neff
Kingdom
SS in interface
Number of interface contacts
Chain length (biggest and smallest)
'''
#Merge bench4
bench4_merged = pd.merge(bench4_kingdom,dssp_bench4,on=['id1','id2'],how='inner')
bench4_merged['complex_id'] = bench4_merged.PDB+'_u1-'+bench4_merged.PDB+'_u2'
bench4_merged = pd.merge(bench4_merged,AFneffs_bench4,on='complex_id',how='inner')
bench4_merged = pd.merge(bench4_merged,topneffs_bench4,on='complex_id',how='inner')
bench4_merged['min_chain_len'] = np.min(bench4_merged[['Sequence Length 1','Sequence Length 2']].values,axis=1)
bench4_merged['max_chain_len'] = np.max(bench4_merged[['Sequence Length 1','Sequence Length 2']].values,axis=1)
bench4_merged['sum_chain_len'] = np.sum(bench4_merged[['Sequence Length 1','Sequence Length 2']].values,axis=1)
bench4_kingdom['Kingdom'] = bench4_kingdom['Kingdom'].replace({' Bacteria ':'B', ' Eukaryota ':'E','Virus':'V'})
bench4_merged['if_fraction'] = np.divide(bench4_merged['num_if_contacts_total'],bench4_merged['sum_chain_len'])
#Merge Marks
marks_merged = pd.merge(oxstats_marks, ifstats_marks, on=['id1','id2'],how='inner')
marks_merged['complex_id'] = marks_merged.id1+'_'+marks_merged.id2
marks_merged = pd.merge(marks_merged,AFneffs_marks,on='complex_id',how='inner')
marks_merged = pd.merge(marks_merged,topneffs_marks,on='complex_id',how='inner')
marks_merged['min_chain_len'] = np.min(marks_merged[['l1','l2']].values,axis=1)
marks_merged['max_chain_len'] = np.max(marks_merged[['l1','l2']].values,axis=1)
marks_merged['sum_chain_len'] = np.sum(marks_merged[['l1','l2']].values,axis=1)
marks_merged['if_fraction'] = np.divide(marks_merged['num_if_contacts_total'],marks_merged['sum_chain_len'])
#Get kingdom fractions
kingdoms = ['E', 'B', 'A', 'V']
print('KD','Bench4','Marks')
for kd in kingdoms:
sel_bench4 = bench4_kingdom[bench4_kingdom.Kingdom==kd]
sel_marks = marks_merged[(marks_merged.kingdom1==kd)&(marks_merged.kingdom2==kd)]
print(kd,len(sel_bench4)/len(bench4_kingdom),len(sel_marks)/len(marks_merged))
#Plot vars
vars = ['num_if_contacts_total', 'min_chain_len','max_chain_len', 'sum_chain_len', 'AFdefault_Neff' ,'tophit_Neff','if_fraction']
for var in vars:
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
sns.distplot(bench4_merged[var],label='Dev. set',hist=True,kde=True,norm_hist=True)
sns.distplot(marks_merged[var],label='Test. set',hist=True,kde=True,norm_hist=True)
plt.legend()
plt.title('Dev. vs Test '+var)
plt.xlabel(var)
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'dev_vs_test_'+var+'.svg',format='svg',dpi=300)
plt.close()
def neg_vs_pos(plDDT_marks_af, plDDT_marks_negative_af, plDDT_negatome_af, sigmoid_params):
'''Compare the interfaces of positive and neg marks set + negatome
'''
#Filter out the homodimers from the negatome
keep_inds = []
for i in range(len(plDDT_negatome_af)):
row = plDDT_negatome_af.loc[i]
if row.id1!=row.id2:
keep_inds.append(i)
print('Num homodimers:',len(plDDT_negatome_af)-len(keep_inds))
plDDT_negatome_af = plDDT_negatome_af.loc[keep_inds]
#Get AUC using the different metrics
#Get min of chains
#Pos
single_chain_plddt = np.min(plDDT_marks_af[['ch1_plddt_av_1', 'ch2_plddt_av_1']].values,axis=1)
plDDT_marks_af['min_chain_plddt_av_1'] = single_chain_plddt
#Neg marks
single_chain_plddt = np.min(plDDT_marks_negative_af[['ch1_plddt_av', 'ch2_plddt_av']].values,axis=1)
plDDT_marks_negative_af['min_chain_plddt_av'] = single_chain_plddt
#Negatome
single_chain_plddt = np.min(plDDT_negatome_af[['ch1_plddt_av', 'ch2_plddt_av']].values,axis=1)
plDDT_negatome_af['min_chain_plddt_av'] = single_chain_plddt
#Analyze ROC as a function of
feature_nice_names = {'if_plddt_av':'IF_plDDT', 'min_chain_plddt_av':'Min plDDT per chain',
'plddt_av':'Average plDDT', 'num_atoms_in_interface':'IF_contacts',
'num_res_in_interface':'IF_residues'}
colors = {'if_plddt_av':'darkblue','min_chain_plddt_av':'magenta','plddt_av':'orange',
'num_atoms_in_interface':'darkgreen','num_res_in_interface':'tab:blue', 'IF_cp':'cyan', 'pDockQ':'k'}
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for key in feature_nice_names:
pos_features = plDDT_marks_af[key+'_1'].values
neg_features = np.concatenate([plDDT_marks_negative_af[key].values, plDDT_negatome_af[key].values])
#ROC
correct = np.zeros(len(pos_features)+len(neg_features))
correct[:len(pos_features)]=1
all_features = np.concatenate([pos_features, neg_features])
fpr, tpr, threshold = metrics.roc_curve(correct, all_features, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
#Plot ROC
plt.plot(fpr, tpr, label = feature_nice_names[key]+': AUC = %0.2f' % roc_auc, color=colors[key])
#TPRs
print(key,'TPR at FPR 1%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=0.01)[-1][0]]))
print(key,'TPR at FPR 5%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=0.05)[-1][0]]))
#Add log(if contacts)*if_plddt_av
pos_features_if_cp = plDDT_marks_af['if_plddt_av_1'].values*np.log10(plDDT_marks_af['num_atoms_in_interface_1'].values+1)
neg_features_marks_if_cp = plDDT_marks_negative_af['if_plddt_av'].values*np.log10(plDDT_marks_negative_af['num_atoms_in_interface'].values+1)
neg_features_negatome_if_cp = plDDT_negatome_af['if_plddt_av'].values*np.log10(plDDT_negatome_af['num_atoms_in_interface'].values+1)
neg_features_if_cp = np.concatenate([neg_features_marks_if_cp, neg_features_negatome_if_cp])
correct = np.zeros(len(pos_features_if_cp)+len(neg_features_if_cp))
correct[:len(pos_features_if_cp)]=1
all_features = np.concatenate([pos_features_if_cp, neg_features_if_cp])
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, all_features, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
#plt.plot(fpr, tpr, label = 'IF_plDDT⋅log(IF_contacts)'+': AUC = %0.2f' % roc_auc,color='tab:cyan')
#Do the same with pDockQ
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
pos_features_pdockq = sigmoid(pos_features_if_cp, *sigmoid_params)
neg_features_pdockq = sigmoid(neg_features_if_cp, *sigmoid_params)
all_features = np.concatenate([pos_features_pdockq, neg_features_pdockq])
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, all_features, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'pDockQ'+': AUC = %0.2f' % roc_auc,color='k',linestyle='--')
#TPRs
print('pDockQ TPR at FPR 1%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=0.01)[-1][0]]))
print('pDockQ TPR at FPR 5%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=0.05)[-1][0]]))
#Plot formatting
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
plt.legend(fontsize=9)
plt.title('Identifying interacting proteins\nROC as a function of different metrics')
plt.xlabel('FPR')
plt.ylabel('TPR')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'ROC_pos_neg.svg',format='svg',dpi=300)
#Marks comparison
print('Only marks negative')
neg_features = sigmoid(neg_features_marks_if_cp, *sigmoid_params)
#ROC
correct = np.zeros(len(pos_features_pdockq)+len(neg_features))
correct[:len(pos_features)]=1
all_features = np.concatenate([pos_features_pdockq, neg_features])
fpr, tpr, threshold = metrics.roc_curve(correct, all_features, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
t=0.01
print('Average interface pDockQ TPR at FPR '+str(t*100)+'%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=t)[-1][0]],2),'\nAUC:',roc_auc, 'FPR:',np.round(100*fpr[np.argwhere(np.round(fpr,2)<=t)[-1][0]],2))
t=0.05
print('Average interface pDockQ TPR at FPR '+str(t*100)+'%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=t)[-1][0]],2),'\nAUC:',roc_auc, 'FPR:',np.round(100*fpr[np.argwhere(np.round(fpr,2)<=t)[-1][0]],2))
#Plot distribution of separators
feature_nice_names = {'if_plddt_av':'IF_plDDT', 'num_atoms_in_interface':'IF_contacts',
'pDockQ':'pDockQ'}
xlims = {'if_plddt_av':[-20,120], 'num_atoms_in_interface':[-100,500],
'IF_cp':[0,250], 'pDockQ':[0,1]}
bins = {'if_plddt_av':20, 'num_atoms_in_interface':50,
'IF_cp':20, 'pDockQ':20}
matplotlib.rcParams.update({'font.size': 9})
for key in feature_nice_names:
fig,ax = plt.subplots(figsize=(6/2.54,6/2.54))
if key not in ['IF_cp','pDockQ']:
pos_features = plDDT_marks_af[key+'_1'].values
neg_features = np.concatenate([plDDT_marks_negative_af[key].values, plDDT_negatome_af[key].values])
if key=='IF_cp':
pos_features = pos_features_if_cp
neg_features = neg_features_if_cp
if key=='pDockQ':
pos_features = pos_features_pdockq
neg_features = neg_features_pdockq
plt.hist(pos_features,label='pos',color=colors[key],alpha=0.75,bins=bins[key],density=True)
plt.hist(neg_features,label='neg',color='gray',alpha=0.75,bins=bins[key],density=True)
plt.legend(fontsize=9)
#plt.title('Distribution of '+feature_nice_names[key],fontsize=9)
plt.xlim(xlims[key])
plt.xlabel(feature_nice_names[key])
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+key+'_pos_vs_neg_distr.svg',format='svg',dpi=300)
plt.close()
# #Create df of fpr vs tpr
# roc_df = pd.DataFrame()
# roc_df['FPR']=fpr
# roc_df['TPR']=tpr
# roc_df['Number of interface contacts'] = threshold
def ppv_vs_dockq_marks(marks_dockq_AF,ifstats_marks,outdir):
'''Analysis of the relationship between if stats and DockQ
'''
ifstats_marks['complex_id']=ifstats_marks.id1+'-'+ifstats_marks.id2
merged_if = pd.merge(marks_dockq_AF,ifstats_marks,on=['complex_id'],how='inner')
#Calc PPV
merged_if['PPV'] = merged_if['num_accurate_if']/merged_if['num_if_contacts_total']
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged_if.PPV,merged_if.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1,
s=1,label='AF2+Paired',c='tab:blue')
plt.scatter(merged_if.PPV,merged_if.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10,
s=1,label='Paired',c='tab:orange')
#RA
ra_x = []
ra_y_paired = []
ra_y_both = []
sr_paired = []
sr_both = []
step = 0.05
for i in np.arange(0,0.5,step):
sel = merged_if[(merged_if.PPV>=i)&(merged_if.PPV<i+0.1)]
if len(sel)<1:
continue
ra_x.append(i+step/2)
#ra_y_paired.append(np.average(sel.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10))
#ra_y_both.append(np.average(sel.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1))
#SR
sr_paired.append(np.argwhere(sel.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10.values>=0.23).shape[0]/len(sel))
sr_both.append(np.argwhere(sel.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1.values>=0.23).shape[0]/len(sel))
#RA
# plt.plot(ra_x,ra_y_paired,label='RA Paired',c='tab:orange')
# plt.plot(ra_x,ra_y_both,label='RA AF2+Paired',c='tab:blue')
#SR
plt.plot(ra_x,sr_paired,label='SR Paired',c='tab:orange')
plt.plot(ra_x,sr_both,label='SR AF2+Paired',c='tab:blue')
plt.legend()
plt.title('Interface PPV vs DockQ')
plt.xlabel('Interface PPV')
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'ppv_vs_dockq.svg',format='svg',dpi=300)
plt.close()
#################MAIN####################
#Parse args
args = parser.parse_args()
#Data
#bench4
bench4_dockq_aa = pd.read_csv(args.bench4_dockq_aa[0])
bench4_dockq_RF = pd.read_csv(args.bench4_dockq_RF[0])
plDDT_bench4 = pd.read_csv(args.plDDT_bench4[0])
#pconsdock_bench4 = pd.read_csv(args.pconsdock_bench4[0])
#pconsdock_marks = pd.read_csv(args.pconsdock_marks[0])
bench4_kingdom = pd.read_csv(args.bench4_kingdom[0])
dssp_bench4 = pd.read_csv(args.dssp_bench4[0])
AFneffs_bench4 = pd.read_csv(args.afdefault_neff_bench4[0])
topneffs_bench4 = pd.read_csv(args.tophits_neff_bench4[0])
#Marks positive
marks_dockq_RF = pd.read_csv(args.marks_dockq_RF[0])
marks_dockq_AF_bb = pd.read_csv(args.marks_dockq_AF_bb[0])
marks_dockq_AF_aa = pd.read_csv(args.marks_dockq_AF_aa[0])
marks_dockq_GRAMM = pd.read_csv(args.marks_dockq_GRAMM[0],header=None)
marks_dockq_TMfull = pd.read_csv(args.marks_dockq_TMfull[0])
marks_dockq_TMint = pd.read_csv(args.marks_dockq_TMint[0])
marks_dockq_mdockpp = pd.read_csv(args.marks_dockq_mdockpp[0])
plDDT_marks_af = pd.read_csv(args.plDDT_marks_af[0])
plDDT_marks_fused = pd.read_csv(args.plDDT_marks_fused[0])
dssp_marks = pd.read_csv(args.dssp_marks[0])
ifstats_marks =
|
pd.read_csv(args.ifstats_marks[0])
|
pandas.read_csv
|
"""The structure prediction module provides functionality in respect toe the prediction
and structure of a document.
"""
import pandas
from scipy.sparse import hstack
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import make_union, make_pipeline
from sklearn.preprocessing import LabelBinarizer, Normalizer
from ..argument_pipeline.base import Model
from ..nlp._utils import spacy_download
from ..nlp.transformers import WordSentimentCounter, DiscourseMatcher
from ..utils import logger
nlp = spacy_download()
__all__ = [
"StructurePredictor",
"StructureFeatures"
]
class StructurePredictor(Model):
def __init__(self, model_id=None):
if model_id is None:
model_id = "structure_predictor"
super().__init__(model_id=model_id)
@staticmethod
def default_train():
"""Default training method which supplies the default training set"""
from canary.corpora import load_essay_corpus
from imblearn.over_sampling import RandomOverSampler
from sklearn.model_selection import train_test_split
logger.debug("Resample")
ros = RandomOverSampler(random_state=0, sampling_strategy='not majority')
x, y = load_essay_corpus(purpose="relation_prediction")
x, y = ros.fit_resample(pandas.DataFrame(x),
|
pandas.DataFrame(y)
|
pandas.DataFrame
|
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import check_empty_nwp
from Fuzzy_clustering.version2.dataset_manager.common_utils import rescale_mean
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_2d_dense
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
class DatasetCreatorDense:
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False, dates=None):
self.projects = projects
self.is_for_test = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = njobs
self.variables = data_variables
self.logger = create_logger(logger_name=__name__, abs_path=self.path_nwp,
logger_path=f'log_{self.projects_group}.log', write_type='a')
if not self.data is None:
self.dates = self.check_dates()
elif not dates is None:
self.dates = dates
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates are checked. Number of time samples is %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def stack_by_sample(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, predictions):
timestep = 60
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
preds = predictions[project['_id']]
hor = preds.columns[-1] + timestep
p_dates = [t + pd.DateOffset(minutes=hor)]
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[project['_id']])
data_temp = pd.concat([data[project['_id']].iloc[np.where(data.index < t)].to_frame(), pred])
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
try:
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1))].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2))].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1)), project_id].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2)), project_id].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
for project in projects:
if len(x_3d[project['_id']].shape) == 3:
x_3d[project['_id']] = x_3d[project['_id']][np.newaxis, :, :, :]
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not self.is_for_test:
inp['Obs_lag1'] = inp['Obs_lag1'] + np.random.normal(0, 0.05) * inp['Obs_lag1']
inp['Obs_lag2'] = inp['Obs_lag2'] + np.random.normal(0, 0.05) * inp['Obs_lag2']
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_rabbitmq(self, t, path_nwp, nwp_model, project, variables):
x = dict()
x_3d = dict()
nwps = project['nwp']
p_dates = pd.date_range(t, t + pd.DateOffset(days=3) - pd.DateOffset(hours=1), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_rabbitmq(date, nwp, nwp_prev, nwp_next, project['static_data']['type'])
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_online(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='15min')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date -
|
pd.DateOffset(hours=1)
|
pandas.DateOffset
|
import datetime
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_index_equal
import pandas_market_calendars as mcal
from pandas_market_calendars.exchange_calendar_nyse import NYSEExchangeCalendar
from tests.test_market_calendar import FakeCalendar, FakeBreakCalendar
def test_get_calendar():
assert isinstance(mcal.get_calendar('NYSE'), NYSEExchangeCalendar)
cal = mcal.get_calendar('NYSE', datetime.time(10, 0), datetime.time(14, 30))
assert isinstance(cal, NYSEExchangeCalendar)
assert cal.open_time == datetime.time(10, 0)
assert cal.close_time == datetime.time(14, 30)
# confirm that import works properly
_ = mcal.get_calendar('CME_Equity')
def test_get_calendar_names():
assert 'ASX' in mcal.get_calendar_names()
def test_date_range_exceptions():
cal = FakeCalendar(open_time= datetime.time(9), close_time= datetime.time(11, 30))
schedule = cal.schedule("2021-01-05", "2021-01-05")
### invalid closed argument
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, "15min", closed= "righ")
assert e.exconly() == "ValueError: closed must be 'left', 'right', 'both' or None."
### invalid force_close argument
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, "15min", force_close= "True")
assert e.exconly() == "ValueError: force_close must be True, False or None."
### close_time is before open_time
schedule = pd.DataFrame([["2020-01-01 12:00:00+00:00", "2020-01-01 11:00:00+00:00"]],
index= ["2020-01-01"], columns= ["market_open", "market_close"])
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, "15min", closed="right", force_close= True)
assert e.exconly() == "ValueError: Schedule contains rows where market_close < market_open,"\
" please correct the schedule"
### Overlap -
### the end of the last bar goes over the next start time
bcal = FakeBreakCalendar()
bschedule = bcal.schedule("2021-01-05", "2021-01-05")
with pytest.raises(ValueError) as e1:
# this frequency overlaps
mcal.date_range(bschedule, "2H", closed= "right", force_close= None)
# this doesn't
mcal.date_range(bschedule, "1H", closed="right", force_close=None)
with pytest.raises(ValueError) as e2:
mcal.date_range(bschedule, "2H", closed= "both", force_close= None)
mcal.date_range(bschedule, "1H", closed="right", force_close=None)
with pytest.raises(ValueError) as e3:
mcal.date_range(bschedule, "2H", closed= None, force_close= None)
mcal.date_range(bschedule, "1H", closed="right", force_close=None)
for e in (e1, e2, e3):
assert e.exconly() == "ValueError: The chosen frequency will lead to overlaps in the calculated index. "\
"Either choose a higher frequency or avoid setting force_close to None "\
"when setting closed to 'right', 'both' or None."
try:
# should all be fine, since force_close cuts the overlapping interval
mcal.date_range(bschedule, "2H", closed="right", force_close=True)
with pytest.warns(UserWarning): # should also warn about lost sessions
mcal.date_range(bschedule, "2H", closed="right", force_close=False)
mcal.date_range(bschedule, "2H", closed="both", force_close=True)
mcal.date_range(bschedule, "2H", closed="both", force_close=False)
# closed = "left" should never be a problem since it won't go outside market hours anyway
mcal.date_range(bschedule, "2H", closed="left", force_close=True)
mcal.date_range(bschedule, "2H", closed="left", force_close=False)
mcal.date_range(bschedule, "2H", closed="left", force_close=None)
except ValueError as e:
pytest.fail(f"Unexpected Error: \n{e}")
def test_date_range_permutations():
# open_time = 9, close_time = 11.30, freq = "1H"
cal = FakeCalendar(open_time= datetime.time(9), close_time= datetime.time(11, 30))
schedule = cal.schedule("2021-01-05", "2021-01-05")
# result matching values for: closed force_close
# 9 10 11 left False/ left None/ both False/ None False
expected = pd.DatetimeIndex(
["2021-01-05 01:00:00+00:00", "2021-01-05 02:00:00+00:00",
"2021-01-05 03:00:00+00:00"], tz= "UTC")
actual = mcal.date_range(schedule, "1H", closed= "left", force_close= False)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= "left", force_close= None)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= "both", force_close= False)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= None, force_close= False)
assert_index_equal(actual, expected)
# 9 10 11 11.30 left True/ both True/ None True
expected = pd.DatetimeIndex(
["2021-01-05 01:00:00+00:00", "2021-01-05 02:00:00+00:00",
"2021-01-05 03:00:00+00:00", "2021-01-05 03:30:00+00:00"], tz= "UTC")
actual = mcal.date_range(schedule, "1H", closed= "left", force_close= True)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= "both", force_close= True)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= None, force_close= True)
assert_index_equal(actual, expected)
# 10 11 right False
expected = pd.DatetimeIndex(
["2021-01-05 02:00:00+00:00", "2021-01-05 03:00:00+00:00"], tz="UTC")
actual = mcal.date_range(schedule, "1H", closed="right", force_close=False)
assert_index_equal(actual, expected)
# 10 11 11.30 right True
expected = pd.DatetimeIndex(
["2021-01-05 02:00:00+00:00", "2021-01-05 03:00:00+00:00",
"2021-01-05 03:30:00+00:00"], tz="UTC")
actual = mcal.date_range(schedule, "1H", closed="right", force_close=True)
assert_index_equal(actual, expected)
# 10 11 12 right None
expected = pd.DatetimeIndex(
["2021-01-05 02:00:00+00:00", "2021-01-05 03:00:00+00:00",
"2021-01-05 04:00:00+00:00"], tz="UTC")
actual = mcal.date_range(schedule, "1H", closed="right", force_close=None)
assert_index_equal(actual, expected)
# 9 10 11 12 both None/ None None
expected = pd.DatetimeIndex(
["2021-01-05 01:00:00+00:00", "2021-01-05 02:00:00+00:00",
"2021-01-05 03:00:00+00:00", "2021-01-05 04:00:00+00:00"], tz="UTC")
actual = mcal.date_range(schedule, "1H", closed="both", force_close=None)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed=None, force_close=None)
assert_index_equal(actual, expected)
def test_date_range_daily():
cal = FakeCalendar(open_time=datetime.time(9, 0), close_time=datetime.time(12, 0))
# If closed='right' and force_close False for daily then the result is empty
expected = pd.DatetimeIndex([], tz='UTC')
schedule = cal.schedule('2015-12-31', '2016-01-06')
with pytest.warns(UserWarning):
actual = mcal.date_range(schedule, '1D', force_close=False, closed='right')
assert_index_equal(actual, expected)
# New years is holiday
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2015-12-31 12:00', '2016-01-04 12:00', '2016-01-05 12:00', '2016-01-06 12:00']])
schedule = cal.schedule('2015-12-31', '2016-01-06')
actual = mcal.date_range(schedule, '1D')
assert_index_equal(actual, expected)
# July 3 is early close
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2012-07-02 12:00', '2012-07-03 11:30', '2012-07-04 12:00']])
schedule = cal.schedule('2012-07-02', '2012-07-04')
actual = mcal.date_range(schedule, '1D')
assert_index_equal(actual, expected)
# Dec 14, 2016 is adhoc early close
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2016-12-13 12:00', '2016-12-14 11:40', '2016-12-15 12:00']])
schedule = cal.schedule('2016-12-13', '2016-12-15')
actual = mcal.date_range(schedule, '1D')
assert_index_equal(actual, expected)
# July 3 is late open
expected = pd.DatetimeIndex([
|
pd.Timestamp(x, tz=cal.tz)
|
pandas.Timestamp
|
import json
import sys
import urllib.request
from typing import List, Tuple, Dict, Set
import numpy as np
import pandas as pd
import psycopg2
import xgboost as xgb
from bs4 import BeautifulSoup
from sklearn.model_selection import KFold
COLUMN_RATING = "Rating"
COLUMN_PREDICT = "Predict"
PROBLEM_SET_JSON_NAME = "./problem_set.json"
MODEL_DUMP_NAME = "./save_xgb_predicted_rating"
TMP_DATABASE = "tmp_submissions"
ITER_WIDTH = 3000
BLACK_LIST = {"KokiYmgch"}
def get_submissions(users: List[str], conn, table_name: str, train: bool) -> List[Tuple[str, str, str, int, float]]:
with conn.cursor() as cursor:
cursor.execute("DROP TABLE IF EXISTS {}".format(table_name))
conn.commit()
cursor.execute(
"CREATE TEMPORARY TABLE {} (user_id VARCHAR(255) NOT NULL, PRIMARY KEY (user_id))".format(table_name))
conn.commit()
cursor.executemany("INSERT INTO {} (user_id) VALUES (%s)".format(table_name), [(x,) for x in users])
conn.commit()
if train:
query = """
SELECT
s.problem_id,
s.user_id,
s.result,
a.problem_count,
p.point
FROM submissions AS s
INNER JOIN {} AS t ON s.user_id=t.user_id
LEFT JOIN (
SELECT max(s.epoch_second) AS max_second, s.user_id AS user_id FROM submissions AS s
LEFT JOIN problems AS p ON p.id=s.problem_id
LEFT JOIN contests AS c ON c.id=p.contest_id
WHERE c.rate_change != '×' AND s.epoch_second < c.start_epoch_second+c.duration_second
GROUP BY s.user_id
) AS m ON m.user_id=s.user_id
LEFT JOIN accepted_count AS a ON a.user_id=s.user_id
LEFT JOIN points AS p ON p.problem_id=s.problem_id
WHERE m.max_second > s.epoch_second
""".format(table_name)
else:
query = """
SELECT
s.problem_id,
s.user_id,
s.result,
a.problem_count,
p.point
FROM submissions AS s
INNER JOIN {} AS t ON s.user_id=t.user_id
LEFT JOIN accepted_count AS a ON a.user_id=s.user_id
LEFT JOIN points AS p ON p.problem_id=s.problem_id
""".format(table_name)
with conn.cursor() as cursor:
cursor.execute(query)
submissions = cursor.fetchall()
return submissions
def insert_to_df(df: pd.DataFrame, submissions: List[Tuple[str, str, str, int, float]]):
ac_set = set()
wa_set = set()
count_dict = {}
user_point_count: Dict[Tuple[str, float], int] = {}
problem_point = {}
for problem_id, user_id, result, count, point in submissions:
if result == "AC":
ac_set.add((user_id, problem_id))
if point:
problem_point[problem_id] = point
num = user_point_count.get((user_id, point), 0)
user_point_count[(user_id, point)] = num + 1
else:
wa_set.add((user_id, problem_id))
count_dict[user_id] = count
df["accepted_count"] = pd.Series(count_dict)
print("AC Set:", len(ac_set))
print("WA Set:", len(wa_set))
for user_id, problem_id in wa_set:
df.at[user_id, problem_id] = -1
for user_id, problem_id in ac_set:
df.at[user_id, problem_id] = 1
user_max_point: Dict[str, float] = {}
for (user_id, point), count in user_point_count.items():
if count >= 3:
current = user_max_point.get(user_id, 0)
user_max_point[user_id] = max(current, point)
for problem_id, point in problem_point.items():
for user_id, max_point in user_max_point.items():
if point < max_point:
df.at[user_id, problem_id] = 1
df.fillna(0, inplace=True)
def scrape_rating() -> List[Tuple[str, int]]:
users = []
min_count = 100
for count in range(1, 30):
url = "https://beta.atcoder.jp/ranking?desc=true&orderBy=competitions&page={}".format(count)
print(url)
html = urllib.request.urlopen(url)
soup = BeautifulSoup(html, "lxml")
for tr in soup.findAll("tbody")[0].findAll("tr"):
tds = tr.findAll("td")
user_id = tds[1].findAll("a")[1].text
current = int(tds[3].text)
match_count = int(tds[5].text)
if user_id not in BLACK_LIST:
users.append((user_id, current))
min_count = min(min_count, match_count)
if min_count < 15:
break
return users
def train_model(model, problem_set: Set[str], conn):
# scrape user rating data
users = scrape_rating()
# generate train data
submissions = get_submissions([u[0] for u in users], conn, TMP_DATABASE, True)
user_set = set([s[1] for s in submissions])
for s in submissions:
problem_set.add(s[0])
df =
|
pd.DataFrame(columns=problem_set, index=user_set)
|
pandas.DataFrame
|
import json
import plotly
import plotly.graph_objs as go
import pandas as pd
from flask import flash
BOUNDS = {
"petpt::msalb_-1": [0, 1],
"petpt::srad_-1": [1, 20],
"petpt::tmax_-1": [-30, 60],
"petpt::tmin_-1": [-30, 60],
"petpt::xhlai_-1": [0, 20],
"petasce::doy_-1": [1, 365],
"petasce::meevp_-1": [0, 1],
"petasce::msalb_-1": [0, 1],
"petasce::srad_-1": [1, 30],
"petasce::tmax_-1": [-30, 60],
"petasce::tmin_-1": [-30, 60],
"petasce::xhlai_-1": [0, 20],
"petasce::tdew_-1": [-30, 60],
"petasce::windht_-1": [
0.1,
10,
], # HACK: has a hole in 0 < x < 1 for petasce__assign__wind2m_1
"petasce::windrun_-1": [0, 900],
"petasce::xlat_-1": [3, 12], # HACK: south sudan lats
"petasce::xelev_-1": [0, 6000],
"petasce::canht_-1": [0.001, 3],
}
PRESETS = {
"petpt::msalb_-1": 0.5,
"petpt::srad_-1": 10,
"petpt::tmax_-1": 20,
"petpt::tmin_-1": 10,
"petpt::xhlai_-1": 10,
"petasce::msalb_-1": 0.5,
"petasce::srad_-1": 15,
"petasce::tmax_-1": 10,
"petasce::tmin_-1": -10,
"petasce::xhlai_-1": 10,
}
COVERS = {
"petasce::canht_-1": 2,
"petasce::meevp_-1": "A",
"petasce::cht_0": 0.001,
"petasce::cn_4": 1600.0,
"petasce::cd_4": 0.38,
"petasce::rso_0": 0.062320,
"petasce::ea_0": 7007.82,
"petasce::wind2m_0": 3.5,
"petasce::psycon_0": 0.0665,
"petasce::wnd_0": 3.5,
}
def get_grfn_surface_plot(G, presets=PRESETS, num_samples=10):
try:
X, Y, Z, x_var, y_var = G.S2_surface((8, 6), BOUNDS, presets,
num_samples=num_samples)
z_data =
|
pd.DataFrame(Z, index=X, columns=Y)
|
pandas.DataFrame
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result =
|
read_hdf(path, "df")
|
pandas.io.pytables.read_hdf
|
from time import time
from typing import Tuple, Mapping, Optional, Sequence, TYPE_CHECKING
from itertools import product
import sys
import pytest
from scanpy import settings as s
from anndata import AnnData
from scanpy.datasets import blobs
import scanpy as sc
from pandas.testing import assert_frame_equal
import numpy as np
import pandas as pd
from squidpy.gr import ligrec
from squidpy.gr._ligrec import PermutationTest
from squidpy._constants._pkg_constants import Key
_CK = "leiden"
Interactions_t = Tuple[Sequence[str], Sequence[str]]
Complexes_t = Sequence[Tuple[str, str]]
class TestInvalidBehavior:
def test_not_adata(self):
with pytest.raises(TypeError, match=r"Expected `adata` to be of type `anndata.AnnData`"):
ligrec(None, _CK)
def test_adata_no_raw(self, adata: AnnData):
del adata.raw
with pytest.raises(AttributeError, match=r"No `.raw` attribute"):
ligrec(adata, _CK, use_raw=True)
def test_raw_has_different_n_obs(self, adata: AnnData):
adata.raw = blobs(n_observations=adata.n_obs + 1)
with pytest.raises(ValueError, match=rf"Expected `{adata.n_obs}` cells in `.raw`"):
ligrec(adata, _CK)
def test_invalid_cluster_key(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(KeyError, match=r"Cluster key `foobar` not found"):
ligrec(adata, cluster_key="foobar", interactions=interactions)
def test_cluster_key_is_not_categorical(self, adata: AnnData, interactions: Interactions_t):
adata.obs[_CK] = adata.obs[_CK].astype("string")
with pytest.raises(TypeError, match=rf"Expected `adata.obs\[{_CK!r}\]` to be `categorical`"):
ligrec(adata, _CK, interactions=interactions)
def test_only_1_cluster(self, adata: AnnData, interactions: Interactions_t):
adata.obs["foo"] = 1
adata.obs["foo"] = adata.obs["foo"].astype("category")
with pytest.raises(ValueError, match=r"Expected at least `2` clusters, found `1`."):
ligrec(adata, "foo", interactions=interactions)
def test_invalid_complex_policy(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Invalid option `foobar` for `ComplexPolicy`."):
ligrec(adata, _CK, interactions=interactions, complex_policy="foobar")
def test_invalid_fdr_axis(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Invalid option `foobar` for `CorrAxis`."):
ligrec(adata, _CK, interactions=interactions, corr_axis="foobar", corr_method="fdr_bh")
def test_too_few_permutations(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Expected `n_perms` to be positive"):
ligrec(adata, _CK, interactions=interactions, n_perms=0)
def test_invalid_interactions_type(self, adata: AnnData):
with pytest.raises(TypeError, match=r"Expected either a `pandas.DataFrame`"):
ligrec(adata, _CK, interactions=42)
def test_invalid_interactions_dict(self, adata: AnnData):
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions={"foo": ["foo"], "target": ["bar"]})
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions={"source": ["foo"], "bar": ["bar"]})
def test_invalid_interactions_dataframe(self, adata: AnnData, interactions: Interactions_t):
df =
|
pd.DataFrame(interactions, columns=["foo", "target"])
|
pandas.DataFrame
|
import tabula
import glob, os
import pandas as pd
def clean_saro(df):
unique_id_col = 'SARO NUMBER'
# remove unnecessary blank columns
df_clean = df.dropna(how='all', axis=1)
# fill forward unique_id_col
df_clean.loc[:, unique_id_col] = df_clean[unique_id_col].ffill(axis=0)
# replace NaN with empty string for all other columns
df_clean = df_clean.fillna("")
# remove all repeating header columns
df_clean = df_clean[df_clean[unique_id_col] != unique_id_col].reset_index(drop=True)
id_list_original_order = df_clean[unique_id_col].unique()
# set id col as index
df_clean = df_clean.set_index(unique_id_col)
# extract column names
cols = df_clean.columns
# merge all text data by id column
all_clean_series = []
for col in cols:
df_temp = df_clean.groupby(unique_id_col)[col].apply(lambda x: ' '.join(list(x)).strip())
all_clean_series.append(df_temp)
# combine all merged columns, joined on id column
df_final = pd.concat(all_clean_series, axis=1)
# reindex the final output based on original list order
df_final = df_final.reindex(id_list_original_order)
return df_final
def clean_nca(df):
# define NCA columns
id_col = 'NCA NUMBER'
supp_col = 'NCA TYPE'
unique_id_col = "NCA NUMBER_TYPE_unique"
unique_supp_col = 'NCA TYPE_unique'
# make unique supplementary column
df_clean = df.reset_index()
df_clean[unique_supp_col] = df_clean[[supp_col, 'index']].apply(lambda x: f"{x[supp_col]}|{x['index']}" if not pd.isna(x[supp_col]) else float('nan'), axis=1)
# forward fill key columns
df_clean[[id_col, unique_supp_col]] = df_clean[[id_col, unique_supp_col]].ffill(axis=0)
# make unique transaction identifier
df_clean[unique_id_col] = df_clean[[id_col, unique_supp_col]].apply(lambda x: f"{x[id_col]}|{x[unique_supp_col]}", axis=1)
# replace NaN with empty string for all other columns
df_clean = df_clean.fillna("")
# remove all repeating header columns
df_clean = df_clean[df_clean[id_col] != id_col].reset_index(drop=True)
id_list_original_order = df_clean[unique_id_col].unique()
# set id col as index
df_clean = df_clean.set_index(unique_id_col)
# extract column names
cols = df_clean.columns
remove_cols = ['index', id_col, supp_col, unique_supp_col]
cols = [col for col in cols if col not in remove_cols]
# merge all text data by id column
all_clean_series = []
for col in cols:
df_temp = df_clean.groupby(unique_id_col)[col].apply(lambda x: ' '.join(list(x)).strip())
all_clean_series.append(df_temp)
# combine all merged columns, joined on id column
df_merged =
|
pd.concat(all_clean_series, axis=1)
|
pandas.concat
|
import pandas as pd
import numpy as np
from skimage.io import MultiImage
from skimage.morphology import skeletonize
import maskslic as seg
import cv2
import matplotlib.pyplot as plt
from pathlib import Path
import warnings
VALID_SLIDE_EXTENSIONS = {'.tiff', '.mrmx', '.svs'}
# ~~~~~~~~~~~~ Helper functions ~~~~~~~~~~~~
def generateMetaDF(data_dir, meta_fn:str='train.csv'):
'''
Makes a pandas.DataFrame of paths out of a directory including slides. Drop the `train.csv` in `data_dir`
and the script will also merge any meta data from there on `image_id` key.
'''
all_files = [path.resolve() for path in Path(data_dir).rglob("*.*")]
slide_paths = [path for path in all_files if path.suffix in VALID_SLIDE_EXTENSIONS]
if len(slide_paths)==0:
raise ValueError('No slides in `data_dir`=%s'%data_dir)
data_df = pd.DataFrame({'slide_path':slide_paths})
data_df['image_id'] = data_df.slide_path.apply(lambda x: x.stem)
slides = data_df[~data_df.image_id.str.contains("mask")]
masks = data_df[data_df.image_id.str.contains("mask")]
masks['image_id'] = masks.image_id.str.replace("_mask", "")
masks.columns = ['mask_path', 'image_id']
data_df = slides.merge(masks, on='image_id', how='left')
data_df['slide_path'] = data_df.slide_path.apply(lambda x: str(x) if not
|
pd.isna(x)
|
pandas.isna
|
import itertools
import pandas as pd
from src.util import split_train_test
class GridSearch:
def __init__(self, clf, grid, dim, n_classes):
self.clf = clf
self.grid = grid
self.dim = dim
self.n_classes = n_classes
self.results = None
def _make_opts_combinations(self):
"""
Take the grid and make all combinations of paramters from it.
:return: list of dicts
"""
for k, v in self.grid.items():
self.grid[k] = [(k, vv) for vv in v]
grid_list = [v for k, v in self.grid.items()]
input_combinations = list(itertools.product(*grid_list))
input_combinations_list_dict = []
required_fields = {'dim_in': self.dim, 'n_classes': self.n_classes + 1}
# make dicts for kwargs from combinations
for i, combination in enumerate(input_combinations):
opts = required_fields.copy()
comb_dict = {c[0]: c[1] for c in combination}
opts.update(comb_dict)
input_combinations_list_dict.append(opts)
return input_combinations_list_dict
def train_clf(self, opts, inputs, labels, i):
"""
Train clf with parameters.
:param opts: dict - parameters to init clf.
:param inputs: np.array of inputs. column oriented
:param labels: np.array of labels
:return:
"""
print('{i}. Starting validate model with parameters - {params}'.format(i=i, params=opts))
clf = self.clf(**opts)
train_inputs, train_labels, test_inputs, test_labels = split_train_test(inputs, labels)
trainCE, trainRE = clf.train(train_inputs, train_labels)
testCE, testRE = clf.test(test_inputs, test_labels)
print('{i}. Done'.format(i=i))
score = {
'acc': testCE,
'rmse': testRE
}
params = clf.get_params()
result = {
'score': score,
'params': params
}
print('{i} - results - {r}'.format(i=i, r=result))
return result
def do_search(self, inputs, labels):
"""
Do grid search through all parameters combinations.
:param inputs: np.array of inputs. column oriented
:param labels: np.array of labels
:return: results - list of dicts
{
'score': {'acc':x, 'rmse':y}
'params': {<model_params>}
}
"""
input_combinations_list_dict = self._make_opts_combinations()
print('Length of inputs combination - {l}'.format(l=len(input_combinations_list_dict)))
results = []
for i, input_combination in enumerate(input_combinations_list_dict):
result = self.train_clf(input_combination, inputs, labels, i)
results.append(result)
self.results = results
return results
def get_best_params(self):
"""
In results search for min acc. Return parameters for that model.
:return:
"""
return min(self.results, key=lambda x: x['score']['acc'])
def save_report(self):
"""
Returns report as csv.
Header are model params with acc and rsme.
"""
# flat results
report_data = []
for result in self.results:
flat = result['params'].copy()
flat.update(result['score'])
report_data.append(flat)
df =
|
pd.DataFrame(report_data)
|
pandas.DataFrame
|
import datetime as dt
import matplotlib.pyplot as plt
import lifetimes
import numpy as np
import os
import pandas as pd
import seaborn as sns
def numcard(x):
return x.nunique(), len(x)
def todateclean(x):
return pd.to_datetime(x, errors='coerce').dt.date.astype('datetime64')
"""
- info, shape, dtypes
- df.isnull().sum() #Check for null counts/ value_counts()
- Check for supposed imputed values (are there suspicious values of 0, like for Age. )
- change zeros to nans where appropriate
- Imputation of missing values
- handle stringified json
- df.dtypes # in case obj to (df.colname = df.colname.astype("category"))
- df['colname'] = pd.to_datetime(df['colname']).dt.date
- df.drop("colname", axis=1) # drop columns
- How balanced are the outcomes?
X = df.drop("diagnosis", axis=1) # just saying which axis again
Y = df["diagnosis"] # this is just a series now
col = X.columns # if we do type(col), it's an Index
X.isnull().sum() # this covers every column in the df.
def rangenorm(x):
return (x - x.mean())/(x.max() - x.min())
le = LabelEncoder()
le.fit(Y_norm)
"""
df = pd.read_csv("./ignoreland/onlineretail.csv")
df.info()
df.apply(lambda x: numcard(x))
datecols = ['InvoiceDate']
df.loc[:, datecols] = df.loc[:,datecols].apply(lambda x: todateclean(x))
dfnew = df[(df.Quantity>0) & (df.CustomerID.isnull()==False)]
dfnew['amt'] = dfnew['Quantity'] * dfnew['UnitPrice']
dfnew.describe()
from lifetimes.plotting import *
from lifetimes.utils import *
observation_period_end = '2011-12-09'
monetary_value_col = 'amt'
modeldata = summary_data_from_transaction_data(dfnew,
'CustomerID',
'InvoiceDate',
monetary_value_col=monetary_value_col,
observation_period_end=observation_period_end)
modeldata.head()
modeldata.info() # 4 floats.
# Eyeball distribution of frequency (calculated)
modeldata['frequency'].plot(kind='hist', bins=50)
print(modeldata['frequency'].describe())
print(modeldata['recency'].describe())
print(sum(modeldata['frequency'] == 0)/float(len(modeldata)))
##### Lec21
from lifetimes import BetaGeoFitter
# similar to lifelines
bgf = BetaGeoFitter(penalizer_coef=0.0) # no regularization param.
bgf.fit(modeldata['frequency'], modeldata['recency'], modeldata['T'])
print(bgf)
# See https://www.youtube.com/watch?v=guj2gVEEx4s and
# https://www.youtube.com/watch?v=gx6oHqpRgpY
## residual lifetime value is more useful construct
from lifetimes.plotting import plot_frequency_recency_matrix
plot_frequency_recency_matrix(bgf)
from lifetimes.plotting import plot_probability_alive_matrix
plot_probability_alive_matrix(bgf)
# lec 24:
# set an outer time boundary and predict cumulative purchases by that time
t = 10 # from now until now+t periods
modeldata['predicted_purchases'] = \
bgf.conditional_expected_number_of_purchases_up_to_time(t,
modeldata['frequency'],
modeldata['recency'],
modeldata['T'])
modeldata.sort_values(by='predicted_purchases').tail(5)
modeldata.sort_values(by='predicted_purchases').head(5)
# lec 25: validation of model
from lifetimes.plotting import plot_period_transactions
plot_period_transactions(bgf) # this plot shows very clearly the model performance
# in terms of transaction volume fit
# Lec 26: splitting into train and test (by time period)
summary_cal_holdout = calibration_and_holdout_data(df,
'CustomerID',
'InvoiceDate',
calibration_period_end='2011-06-08',
observation_period_end='2011-12-09')
summary_cal_holdout.head()
bgf.fit(summary_cal_holdout['frequency_cal'],
summary_cal_holdout['recency_cal'],
summary_cal_holdout['T_cal'])
from lifetimes.plotting import plot_calibration_purchases_vs_holdout_purchases
plot_calibration_purchases_vs_holdout_purchases(bgf, summary_cal_holdout)
from lifetimes.plotting import plot_history_alive
days_since_birth = 365
fig = plt.figure(figsize=(12,8))
id = 14621 # choose a customer id
sp_trans = df.loc[df['CustomerID'] == id] # specific customer's covariates
plot_history_alive(bgf, days_since_birth, sp_trans, 'InvoiceDate')
# Lec28: Subsetting to customers who repurchase.
returning_customers_summary = modeldata[modeldata['frequency']>0]
returning_customers_summary.head()
returning_customers_summary.shape
# Lec 29: gamma-gamma model for LTV
# Note: good practice to confirm small/no apparent corr for frequency and mean trxn value
# Rev per trxn: predict total monetary value.
# The Beta param for the gamma model of total spend is itself assumed gamma distributed
# that is where the name comes from.
# teh expectation of total spend for person i is calculated in empirical-bayes fashion, as a weighted
# mean of population average and the sample mean for person i.
# eq 5 in http://www.brucehardie.com/notes/025/gamma_gamma.pdf shows the arithmetic
# https://antonsruberts.github.io/lifetimes-CLV/ also great additional code.
# derivation here: http://www.brucehardie.com/notes/025/gamma_gamma.pdf
# Output of ggf fitter:
# p = the 'alpha' param in the gamma dist: E(Z|p, v) = p/v. Alpha adds upon convolution.
# q = the alpha param in the gamma dist of v -- v is gamma(q, gam) in the pop
# v = the 'beta' param in gamma dist. constant upon convolution.
# -- Note that v varies among customers (ie, is gamma distributed)
from lifetimes import GammaGammaFitter
ggf = GammaGammaFitter(penalizer_coef=0.0)
ggf.fit(returning_customers_summary['frequency'],
returning_customers_summary['monetary_value'])
ggf.summary
ggf.conditional_expected_average_profit(modeldata['frequency'],
modeldata['monetary_value'])
# cond_exp_avg_profit => gives prediction of mean trxn value.
a0 = returning_customers_summary['monetary_value'].shape[0] # 2790 customers
# Total spend:
a1 = returning_customers_summary['monetary_value'].sum()
# Total time units (here, days) with purchase:
a2 = returning_customers_summary['frequency'].sum()
# Mean monetary value (over all purchase days), roughly equal to estimated v
returning_customers_summary['monetary_value'].mean()
ggf.summary
p_here = ggf.summary.iloc[0,0]
q_here = ggf.summary.iloc[1,0]
v_here = ggf.summary.iloc[2,0] # model says 486; empirical average is 477.
money_per_customer = a1/a0
###############
# review, per documentation:
bgf.summary
# r, alpha = shape, scale for gamma dist that represents sum (convolution) of purchase rates
# a = alpha param for beta dist of churn
# b = beta param for beta dist of churn
x = np.random.gamma(.784, 49.28,10000) # r, alpha, n
bgf.summary.loc["a",:][0]/ (bgf.summary.loc["b",:][0] + bgf.summary.loc["a",:][0])
###################################
# lec31: other models
dfnew.dtypes
dfnew_train = dfnew[dfnew.InvoiceDate < '2011-11-09']
dfnew_test = dfnew[dfnew.InvoiceDate >= '2011-11-09']
dfnew_test.shape
dfnew_train.shape
maxdate = dfnew_train.InvoiceDate.max()
mindate = dfnew_train.InvoiceDate.min()
dfnew_train['duration'] = (maxdate - dfnew_train.InvoiceDate)/np.timedelta64(1,'D')
dfsum1 = dfnew_train.groupby(['CustomerID'])['duration'].min().reset_index()
dfsum1.rename(columns = {'duration':'lasttime'}, inplace=True) # time from lasttime to now
dfsum2 = dfnew_train.groupby(['CustomerID'])['duration'].max().reset_index()
dfsum2.rename(columns = {'duration':'firsttime'}, inplace=True) # time from firsttime to now
dfnew_train['freq'] = 1
dfsum3 = dfnew_train.groupby(['CustomerID'])['freq'].sum().reset_index() # count of transactions by customer
dfnew_train['freq3m'] = 1
dfsum4 = dfnew_train[dfnew_train['duration'] < 91].groupby(['CustomerID'])['freq3m'].sum().reset_index()
# now let's merge the 3 customer-level datasets together.
# pd.concat uses indexes as the join keys,
from functools import reduce
dfs = [dfsum1, dfsum2, dfsum3, dfsum4]
dfsum = reduce(lambda left, right: pd.merge(left, right, on=['CustomerID'], how='outer'), dfs)
dfsum.shape
[_ for _ in map(lambda x: x.shape, dfs)]
dfsum.head()
###################
other_data = pd.read_csv("./ignoreland/oth.csv")
other_data.head()
dfsum = pd.merge(dfsum, other_data, on=['CustomerID'], how='left')
dfnew_test['target'] = 1
dfsum_target = dfnew_test.groupby(['CustomerID'])['target'].sum().reset_index()
dfsum =
|
pd.merge(dfsum, dfsum_target, on=['CustomerID'], how='left')
|
pandas.merge
|
""" Testing the payout of the Currency Swap.
We test along 2 dimensions: |
1) exchange rate changes |
2) asset allocations |
"""
import numpy as np
import pandas as pd
import pytest
from src.financial_contracts.swap_contract import payout_currency_swap
@pytest.fixture
def default_data():
out = {}
out["final_exchange_rate"] = pd.Series(data=np.ones(3) + 0.1)
out["start_exchange_rate"] = 1
out["USD_asset_allocation"] = 0.5
out["leverage"] = 5
out["return_on_euro_deposits"] = 0
out["return_on_usd_deposits"] = 0
return out
""" test exchange rate changes """
def test_swap_no_exchange_rate_change(default_data):
data_no_change = default_data
data_no_change["final_exchange_rate"] = pd.Series(data=np.ones(3))
realized_payout = payout_currency_swap(**data_no_change)
expected_payout = pd.DataFrame(
{"EURlong payout": np.ones(3), "EURshort payout": np.ones(3)}
)
pd.testing.assert_frame_equal(realized_payout, expected_payout, atol=0.0001)
""" the unintuitive design that payout of 1 is generated
comes from the fact that rights to participate in this lottery
have to be bought for a price of 1 per unit
"""
def test_swap_small_exchange_rate_change_usd_only(default_data):
data_small_change_usd_only = default_data
data_small_change_usd_only["USD_asset_allocation"] = 1
realized_payout = payout_currency_swap(**data_small_change_usd_only)
expected_payout = pd.DataFrame(
{"EURlong payout": np.ones(3) + 0.5, "EURshort payout": np.ones(3) - 0.50}
)
pd.testing.assert_frame_equal(realized_payout, expected_payout, atol=0.0001)
def test_swap_small_exchange_rate_change_euro_rise(default_data):
realized_payout = payout_currency_swap(**default_data)
expected_payout = pd.DataFrame(
{
"EURlong payout": np.ones(3) + 0.5,
"EURshort payout": np.ones(3) - 0.5 + 0.1,
}
)
pd.testing.assert_frame_equal(realized_payout, expected_payout, atol=0.0001)
def test_swap_small_exchange_rate_change_euro_fall(default_data):
default_data["final_exchange_rate"] = pd.Series(data=np.ones(3) - 0.1)
realized_payout = payout_currency_swap(**default_data)
expected_payout = pd.DataFrame(
{
"EURlong payout": np.ones(3) - 0.5,
"EURshort payout": np.ones(3) + 0.5 - 0.1,
}
)
pd.testing.assert_frame_equal(realized_payout, expected_payout, atol=0.0001)
""" test asset allocations """
def test_swap_total_payout_usd_50(default_data):
default_data["final_exchange_rate"] =
|
pd.Series(data=[0.9, 1, 1.1])
|
pandas.Series
|
import copy
import warnings
from typing import List
import numpy as np
import pandas as pd
import scipy
import simdkalman
from numpy.fft import irfft, rfft, rfftfreq
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter1d
from tqdm import tqdm
from src.postprocess.metric import calc_haversine
warnings.filterwarnings("ignore")
def apply_kf_smoothing(df: pd.DataFrame) -> pd.DataFrame:
"""
from https://www.kaggle.com/emaerthin/demonstration-of-the-kalman-filter
"""
def _get_kalman_filter() -> simdkalman.KalmanFilter:
T = 1.0
state_transition = np.array(
[
[1, 0, T, 0, 0.5 * T ** 2, 0],
[0, 1, 0, T, 0, 0.5 * T ** 2],
[0, 0, 1, 0, T, 0],
[0, 0, 0, 1, 0, T],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
]
)
process_noise = (
np.diag([1e-5, 1e-5, 5e-6, 5e-6, 1e-6, 1e-6]) + np.ones((6, 6)) * 1e-9
)
observation_model = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]])
observation_noise = np.diag([5e-5, 5e-5]) + np.ones((2, 2)) * 1e-9
kf = simdkalman.KalmanFilter(
state_transition=state_transition,
process_noise=process_noise,
observation_model=observation_model,
observation_noise=observation_noise,
)
return kf
kf_ = _get_kalman_filter()
unique_paths = df[["collectionName", "phoneName"]].drop_duplicates().to_numpy()
for collection, phone in tqdm(unique_paths):
cond = np.logical_and(
df["collectionName"] == collection, df["phoneName"] == phone
)
data = df[cond][["latDeg", "lngDeg"]].to_numpy()
data = data.reshape(1, len(data), 2)
smoothed = kf_.smooth(data)
df.loc[cond, "latDeg"] = smoothed.states.mean[0, :, 0]
df.loc[cond, "lngDeg"] = smoothed.states.mean[0, :, 1]
return df
def filter_outlier(df: pd.DataFrame, one_direction: bool = False) -> pd.DataFrame:
"""
https://www.kaggle.com/dehokanta/baseline-post-processing-by-outlier-correction
"""
df["dist_pre"] = 0
df["dist_pro"] = 0
df["latDeg_pre"] = df["latDeg"].shift(periods=1, fill_value=0)
df["lngDeg_pre"] = df["lngDeg"].shift(periods=1, fill_value=0)
df["latDeg_pro"] = df["latDeg"].shift(periods=-1, fill_value=0)
df["lngDeg_pro"] = df["lngDeg"].shift(periods=-1, fill_value=0)
df["dist_pre"] = calc_haversine(df.latDeg_pre, df.lngDeg_pre, df.latDeg, df.lngDeg)
df["dist_pro"] = calc_haversine(df.latDeg, df.lngDeg, df.latDeg_pro, df.lngDeg_pro)
# start, end fix
list_phone = df["phone"].unique()
for phone in list_phone:
ind_s = df[df["phone"] == phone].index[0]
ind_e = df[df["phone"] == phone].index[-1]
df.loc[ind_s, "dist_pre"] = 0
df.loc[ind_e, "dist_pro"] = 0
# 95% tile
pro_95 = df["dist_pro"].mean() + (df["dist_pro"].std() * 2)
pre_95 = df["dist_pre"].mean() + (df["dist_pre"].std() * 2)
# find outlier data
if one_direction:
targets = ["latDeg", "lngDeg"]
dfs = []
for phone, df_ in df.groupby("phone"):
pre_mask = df_["dist_pre"].to_numpy() > pre_95
pre_mask[:-1] += pre_mask[1:]
deg_preds_filtered = copy.deepcopy(df_.loc[~pre_mask][targets].to_numpy())
T_ref_filtered = copy.deepcopy(
df_.loc[~pre_mask]["millisSinceGpsEpoch"].to_numpy()
)
deg_preds = scipy.interpolate.interp1d(
T_ref_filtered,
deg_preds_filtered,
axis=0,
bounds_error=None,
fill_value="extrapolate",
assume_sorted=True,
)(df_["millisSinceGpsEpoch"].to_numpy())
df_.loc[:, targets] = deg_preds
dfs.append(df_)
df = pd.concat(dfs, axis=0)
else:
ind = df[(df["dist_pro"] > pro_95) & (df["dist_pre"] > pre_95)][
["dist_pre", "dist_pro"]
].index
# smoothing
for i in ind:
df.loc[i, "latDeg"] = (
df.loc[i - 1, "latDeg"] + df.loc[i + 1, "latDeg"]
) / 2
df.loc[i, "lngDeg"] = (
df.loc[i - 1, "lngDeg"] + df.loc[i + 1, "lngDeg"]
) / 2
return df
def filter_outlier_with_absloute(
df: pd.DataFrame, max_velocity: float = 45.0, max_acc: float = 10.0
) -> pd.DataFrame:
df["dist_pre"] = 0
df["dist_pro"] = 0
df["latDeg_pre"] = df["latDeg"].shift(periods=1, fill_value=0)
df["lngDeg_pre"] = df["lngDeg"].shift(periods=1, fill_value=0)
df["latDeg_pro"] = df["latDeg"].shift(periods=-1, fill_value=0)
df["lngDeg_pro"] = df["lngDeg"].shift(periods=-1, fill_value=0)
df["dist_pre"] = calc_haversine(df.latDeg_pre, df.lngDeg_pre, df.latDeg, df.lngDeg)
df["dist_pro"] = calc_haversine(df.latDeg, df.lngDeg, df.latDeg_pro, df.lngDeg_pro)
# start, end fix
list_phone = df["phone"].unique()
for phone in list_phone:
ind_s = df[df["phone"] == phone].index[0]
ind_e = df[df["phone"] == phone].index[-1]
df.loc[ind_s, "dist_pre"] = 0
df.loc[ind_e, "dist_pro"] = 0
# 95% tile
# pro_95 = df["dist_pro"].mean() + (df["dist_pro"].std() * 2)
# pre_95 = df["dist_pre"].mean() + (df["dist_pre"].std() * 2)
# find outlier data
ind = df[(df["dist_pro"] > max_velocity) & (df["dist_pre"] > max_velocity)][
["dist_pre", "dist_pro"]
].index
# smoothing
for i in ind:
df.loc[i, "latDeg"] = (df.loc[i - 1, "latDeg"] + df.loc[i + 1, "latDeg"]) / 2
df.loc[i, "lngDeg"] = (df.loc[i - 1, "lngDeg"] + df.loc[i + 1, "lngDeg"]) / 2
return df
def make_lerp_data(df: pd.DataFrame):
"""
Generate interpolated lat,lng values for
different phone times in the same collection.
from https://www.kaggle.com/t88take/gsdc-phones-mean-prediction
"""
org_columns = df.columns
# Generate a combination of time x collection x phone and
# combine it with the original data (generate records to be interpolated)
assert (
len(
df[
df.duplicated(
["collectionName", "millisSinceGpsEpoch", "phoneName"], keep=False
)
]
)
== 0
)
assert (
len(df[df.duplicated(["collectionName", "millisSinceGpsEpoch"], keep=False)])
> 0
), "there are multiple phone at the same obsevation"
time_list = df[["collectionName", "millisSinceGpsEpoch"]].drop_duplicates()
phone_list = df[["collectionName", "phoneName"]].drop_duplicates()
# assert len(phone_list == 73), "all folders for phones equal 73"
# each timestep row = # of unique phone
tmp = time_list.merge(phone_list, on="collectionName", how="outer")
# diffrent phone, eg. Pixel 4 and 4XLModded, has diffrente timestep,
# so there are lots of nan after merge
# and that's the target to be interpolated with the other available data.
lerp_df = tmp.merge(
df, on=["collectionName", "millisSinceGpsEpoch", "phoneName"], how="left"
)
lerp_df["phone"] = lerp_df["collectionName"] + "_" + lerp_df["phoneName"]
lerp_df = lerp_df.sort_values(["phone", "millisSinceGpsEpoch"])
# linear interpolation
lerp_df["latDeg_prev"] = lerp_df["latDeg"].shift(1)
lerp_df["latDeg_next"] = lerp_df["latDeg"].shift(-1)
lerp_df["lngDeg_prev"] = lerp_df["lngDeg"].shift(1)
lerp_df["lngDeg_next"] = lerp_df["lngDeg"].shift(-1)
lerp_df["phone_prev"] = lerp_df["phone"].shift(1)
lerp_df["phone_next"] = lerp_df["phone"].shift(-1)
lerp_df["time_prev"] = lerp_df["millisSinceGpsEpoch"].shift(1)
lerp_df["time_next"] = lerp_df["millisSinceGpsEpoch"].shift(-1)
# Leave only records to be interpolated, nan & non_first, non_last data
lerp_df = lerp_df[
(lerp_df["latDeg"].isnull())
& (lerp_df["phone"] == lerp_df["phone_prev"])
& (lerp_df["phone"] == lerp_df["phone_next"])
].copy()
# calc lerp, velocity x delta(time)
lerp_df["latDeg"] = lerp_df["latDeg_prev"] + (
(lerp_df["latDeg_next"] - lerp_df["latDeg_prev"])
* (
(lerp_df["millisSinceGpsEpoch"] - lerp_df["time_prev"])
/ (lerp_df["time_next"] - lerp_df["time_prev"])
)
)
lerp_df["lngDeg"] = lerp_df["lngDeg_prev"] + (
(lerp_df["lngDeg_next"] - lerp_df["lngDeg_prev"])
* (
(lerp_df["millisSinceGpsEpoch"] - lerp_df["time_prev"])
/ (lerp_df["time_next"] - lerp_df["time_prev"])
)
)
# Leave only the data that has a complete set of previous and next data.
lerp_df = lerp_df[~lerp_df["latDeg"].isnull()]
return lerp_df[org_columns]
def calc_mean_pred(df: pd.DataFrame):
"""
Make a prediction based on the average of the predictions of phones
in the same collection.
from https://www.kaggle.com/t88take/gsdc-phones-mean-prediction
"""
lerp_df = make_lerp_data(df=df)
add_lerp = pd.concat([df, lerp_df])
# each time step == only one row, average over all phone latDeg,
# lanDeg at each time step
# eg. mean(original Deg Pixel4 and interpolated Deg 4XLModded with `make_lerp_data`)
mean_pred_result = (
add_lerp.groupby(["collectionName", "millisSinceGpsEpoch"])[
["latDeg", "lngDeg"]
]
.mean()
.reset_index()
)
base_cols = ["collectionName", "phoneName", "phone", "millisSinceGpsEpoch"]
try:
mean_pred_df = df[base_cols + ["latDeg_gt", "lngDeg_gt", "speedMps"]].copy()
except Exception:
mean_pred_df = df[base_cols].copy()
mean_pred_df = mean_pred_df.merge(
mean_pred_result[["collectionName", "millisSinceGpsEpoch", "latDeg", "lngDeg"]],
on=["collectionName", "millisSinceGpsEpoch"],
how="left",
)
return mean_pred_df
def get_removedevice(
input_df: pd.DataFrame, divece: str = "SamsungS20Ultra"
) -> pd.DataFrame:
"""
from
https://www.kaggle.com/columbia2131/device-eda-interpolate-by-removing-device-en-ja
"""
input_df["index"] = input_df.index
input_df = input_df.sort_values("millisSinceGpsEpoch")
input_df.index = input_df["millisSinceGpsEpoch"].values
output_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
from numpy.core.numeric import _rollaxis_dispatcher
import pandas as pd
from pymbar import BAR as BAR_
from pymbar import MBAR as MBAR_
from alchemlyb.estimators import MBAR
from sklearn.base import BaseEstimator
import copy
import re
import itertools
import logging
logger = logging.getLogger(__name__)
class Estimators():
"""
Return Estimated binding free energy (dG).
Returns the dG between state A and state B using 3 differant Energy estimators
Zwanzig, Thermodynamic Integration TI, or Bennett Acceptance Ratio (BAR).
"""
def Zwanzig(dEs,steps):
"""
Return the estimated binding free energy using Zwanzig estimator.
Computes the binding free (dG) form molecular dynamics simulation
between state A and state B using Zwanzig estimator.
Parameters
----------
dEs : Pandas Dataframe
contains the reduced potentail (dE) between the states.
steps : interger
the number of the steps to be included in the calculation, set to "None" if all steps are needed.
Returns
---------
Zwanzig_df : Pandas Dataframe
contains the binding free energy (dG) between the states.
Examples
--------
>>> Zwanzig(dEs,None)
>>> Zwanzig(dEs,1000)
"""
dEs_df=pd.DataFrame(-0.592*np.log(np.mean(np.exp(-dEs.iloc[:steps]/0.592))))
Lambdas=[]
dGF=[]
dGF_sum=[]
dGR=[]
dGR_sum=[]
dG_Average=[]
dGR.append(0.0)
dG_Average.append(0.0)
for i in range(1,len(dEs_df.index),2):
Lambdas.append(re.split('_|-',dEs_df.index[i-1])[1])
dGF.append(dEs_df.iloc[i,0])
dGR.append(dEs_df.iloc[i-1,0])
Lambdas.append(re.split('_|-',dEs_df.index[-1])[1])
dGF.append(0.0)
dGF=dGF[::-1]
for i in range(len(dGF)):
dGF_sum.append(sum(dGF[:i+1]))
dGR_sum.append(sum(dGR[:i+1]))
dG_average_raw=((pd.DataFrame(dGF[1:]))-pd.DataFrame(dGR[1:][::-1]))/2
for i in range(len(list(dG_average_raw.values))):
dG_Average.append(np.sum(dG_average_raw.values[:i+1]))
Zwanzig_df=
|
pd.DataFrame.from_dict({"Lambda":Lambdas,"dG_Forward":dGF,"SUM_dG_Forward":dGF_sum,"dG_Reverse":dGR[::-1],"SUM_dG_Reverse":dGR_sum[::-1],"dG_Average":dG_Average})
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import concurrent.futures
import functools
from typing import Callable, Dict, List, Optional, Set, Union, Any
from google.api_core import exceptions
from google.auth import credentials as auth_credentials
from google.protobuf import timestamp_pb2
from google.cloud.aiplatform import base
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import pipeline_jobs
from google.cloud.aiplatform.compat.types import artifact as gca_artifact
from google.cloud.aiplatform.compat.types import execution as gca_execution
from google.cloud.aiplatform.compat.types import (
tensorboard_time_series as gca_tensorboard_time_series,
)
from google.cloud.aiplatform.metadata import artifact
from google.cloud.aiplatform.metadata import constants
from google.cloud.aiplatform.metadata import context
from google.cloud.aiplatform.metadata import execution
from google.cloud.aiplatform.metadata import experiment_resources
from google.cloud.aiplatform.metadata import metadata
from google.cloud.aiplatform.metadata import resource
from google.cloud.aiplatform.metadata import utils as metadata_utils
from google.cloud.aiplatform.tensorboard import tensorboard_resource
from google.cloud.aiplatform.utils import rest_utils
_LOGGER = base.Logger(__name__)
def _format_experiment_run_resource_id(experiment_name: str, run_name: str) -> str:
"""Formats the the experiment run resource id as a concatenation of experiment name and run name.
Args:
experiment_name (str): Name of the experiment which is it's resource id.
run_name (str): Name of the run.
Returns:
The resource id to be used with this run.
"""
return f"{experiment_name}-{run_name}"
def _v1_not_supported(method: Callable) -> Callable:
"""Helpers wrapper for backward compatibility. Raises when using an API not support for legacy runs."""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if isinstance(self._metadata_node, execution.Execution):
raise NotImplementedError(
f"{self._run_name} is an Execution run created during Vertex Experiment Preview and does not support"
f" {method.__name__}. Please create a new Experiment run to use this method."
)
else:
return method(self, *args, **kwargs)
return wrapper
class ExperimentRun(
experiment_resources._ExperimentLoggable,
experiment_loggable_schemas=(
experiment_resources._ExperimentLoggableSchema(
title=constants.SYSTEM_EXPERIMENT_RUN, type=context._Context
),
# backwards compatibility with Preview Experiment runs
experiment_resources._ExperimentLoggableSchema(
title=constants.SYSTEM_RUN, type=execution.Execution
),
),
):
"""A Vertex AI Experiment run"""
def __init__(
self,
run_name: str,
experiment: Union[experiment_resources.Experiment, str],
*,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
):
"""
```
my_run = aiplatform.ExperimentRun('my-run, experiment='my-experiment')
```
Args:
run (str): Required. The name of this run.
experiment (Union[experiment_resources.Experiment, str]):
Required. The name or instance of this experiment.
project (str):
Optional. Project where this experiment run is located. Overrides project set in
aiplatform.init.
location (str):
Optional. Location where this experiment run is located. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials used to retrieve this experiment run. Overrides
credentials set in aiplatform.init.
"""
self._experiment = self._get_experiment(
experiment=experiment,
project=project,
location=location,
credentials=credentials,
)
self._run_name = run_name
run_id = _format_experiment_run_resource_id(
experiment_name=self._experiment.name, run_name=run_name
)
metadata_args = dict(
project=project,
location=location,
credentials=credentials,
)
def _get_context() -> context._Context:
with experiment_resources._SetLoggerLevel(resource):
run_context = context._Context(
**{**metadata_args, "resource_name": run_id}
)
if run_context.schema_title != constants.SYSTEM_EXPERIMENT_RUN:
raise ValueError(
f"Run {run_name} must be of type {constants.SYSTEM_EXPERIMENT_RUN}"
f" but is of type {run_context.schema_title}"
)
return run_context
try:
self._metadata_node = _get_context()
except exceptions.NotFound as context_not_found:
try:
# backward compatibility
self._v1_resolve_experiment_run(
{
**metadata_args,
"execution_name": run_id,
}
)
except exceptions.NotFound:
raise context_not_found
else:
self._backing_tensorboard_run = self._lookup_tensorboard_run_artifact()
# initially set to None. Will initially update from resource then track locally.
self._largest_step: Optional[int] = None
def _v1_resolve_experiment_run(self, metadata_args: Dict[str, Any]):
"""Resolves preview Experiment.
Args:
metadata_args (Dict[str, Any): Arguments to pass to Execution constructor.
"""
def _get_execution():
with experiment_resources._SetLoggerLevel(resource):
run_execution = execution.Execution(**metadata_args)
if run_execution.schema_title != constants.SYSTEM_RUN:
# note this will raise the context not found exception in the constructor
raise exceptions.NotFound("Experiment run not found.")
return run_execution
self._metadata_node = _get_execution()
self._metadata_metric_artifact = self._v1_get_metric_artifact()
def _v1_get_metric_artifact(self) -> artifact.Artifact:
"""Resolves metric artifact for backward compatibility.
Returns:
Instance of Artifact that represents this run's metric artifact.
"""
metadata_args = dict(
artifact_name=self._v1_format_artifact_name(self._metadata_node.name),
project=self.project,
location=self.location,
credentials=self.credentials,
)
with experiment_resources._SetLoggerLevel(resource):
metric_artifact = artifact.Artifact(**metadata_args)
if metric_artifact.schema_title != constants.SYSTEM_METRICS:
# note this will raise the context not found exception in the constructor
raise exceptions.NotFound("Experiment run not found.")
return metric_artifact
@staticmethod
def _v1_format_artifact_name(run_id: str) -> str:
"""Formats resource id of legacy metric artifact for this run."""
return f"{run_id}-metrics"
def _get_context(self) -> context._Context:
"""Returns this metadata context that represents this run.
Returns:
Context instance of this run.
"""
return self._metadata_node
@property
def resource_id(self) -> str:
"""The resource ID of this experiment run's Metadata context.
The resource ID is the final part of the resource name:
``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{resource ID}``
"""
return self._metadata_node.name
@property
def name(self) -> str:
"""This run's name used to identify this run within it's Experiment."""
return self._run_name
@property
def resource_name(self) -> str:
"""This run's Metadata context resource name.
In the format: ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``
"""
return self._metadata_node.resource_name
@property
def project(self) -> str:
"""The project that this experiment run is located in."""
return self._metadata_node.project
@property
def location(self) -> str:
"""The location that this experiment is located in."""
return self._metadata_node.location
@property
def credentials(self) -> auth_credentials.Credentials:
"""The credentials used to access this experiment run."""
return self._metadata_node.credentials
@property
def state(self) -> gca_execution.Execution.State:
"""The state of this run."""
if self._is_legacy_experiment_run():
return self._metadata_node.state
else:
return getattr(
gca_execution.Execution.State,
self._metadata_node.metadata[constants._STATE_KEY],
)
@staticmethod
def _get_experiment(
experiment: Optional[Union[experiment_resources.Experiment, str]] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> experiment_resources.Experiment:
"""Helper method ot get the experiment by name(str) or instance.
Args:
experiment(str):
Optional. The name of this experiment. Defaults to experiment set in aiplatform.init if not provided.
project (str):
Optional. Project where this experiment is located. Overrides project set in
aiplatform.init.
location (str):
Optional. Location where this experiment is located. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials used to retrieve this experiment. Overrides
credentials set in aiplatform.init.
Raises:
ValueError if experiment is None and experiment has not been set using aiplatform.init.
"""
experiment = experiment or initializer.global_config.experiment
if not experiment:
raise ValueError(
"experiment must be provided or experiment should be set using aiplatform.init"
)
if not isinstance(experiment, experiment_resources.Experiment):
experiment = experiment_resources.Experiment(
experiment_name=experiment,
project=project,
location=location,
credentials=credentials,
)
return experiment
def _is_backing_tensorboard_run_artifact(self, artifact: artifact.Artifact) -> bool:
"""Helper method to confirm tensorboard run metadata artifact is this run's tensorboard artifact.
Args:
artifact (artifact.Artifact): Required. Instance of metadata Artifact.
Returns:
bool whether the provided artifact is this run's TensorboardRun's artifact.
"""
return all(
[
artifact.metadata.get(constants._VERTEX_EXPERIMENT_TRACKING_LABEL),
artifact.name == self._tensorboard_run_id(self._metadata_node.name),
artifact.schema_title
== constants._TENSORBOARD_RUN_REFERENCE_ARTIFACT.schema_title,
]
)
def _is_legacy_experiment_run(self) -> bool:
"""Helper method that return True if this is a legacy experiment run."""
return isinstance(self._metadata_node, execution.Execution)
def update_state(self, state: gca_execution.Execution.State):
"""Update the state of this experiment run.
```
my_run = aiplatform.ExperimentRun('my-run', experiment='my-experiment')
my_run.update_state(state=aiplatform.gapic.Execution.State.COMPLETE)
```
Args:
state (aiplatform.gapic.Execution.State): State of this run.
"""
if self._is_legacy_experiment_run():
self._metadata_node.update(state=state)
else:
self._metadata_node.update(metadata={constants._STATE_KEY: state.name})
def _lookup_tensorboard_run_artifact(
self,
) -> Optional[experiment_resources._VertexResourceWithMetadata]:
"""Helpers method to resolve this run's TensorboardRun Artifact if it exists.
Returns:
Tuple of Tensorboard Run Artifact and TensorboardRun is it exists.
"""
with experiment_resources._SetLoggerLevel(resource):
try:
tensorboard_run_artifact = artifact.Artifact(
artifact_name=self._tensorboard_run_id(self._metadata_node.name),
project=self._metadata_node.project,
location=self._metadata_node.location,
credentials=self._metadata_node.credentials,
)
except exceptions.NotFound:
tensorboard_run_artifact = None
if tensorboard_run_artifact and self._is_backing_tensorboard_run_artifact(
tensorboard_run_artifact
):
return experiment_resources._VertexResourceWithMetadata(
resource=tensorboard_resource.TensorboardRun(
tensorboard_run_artifact.metadata[
constants.GCP_ARTIFACT_RESOURCE_NAME_KEY
]
),
metadata=tensorboard_run_artifact,
)
@classmethod
def list(
cls,
*,
experiment: Optional[Union[experiment_resources.Experiment, str]] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List["ExperimentRun"]:
"""List the experiment runs for a given aiplatform.Experiment.
```
my_runs = aiplatform.ExperimentRun.list(experiment='my-experiment')
```
Args:
experiment (Union[aiplatform.Experiment, str]):
Optional. The experiment name or instance to list the experiment run from. If not provided,
will use the experiment set in aiplatform.init.
project (str):
Optional. Project where this experiment is located. Overrides project set in
aiplatform.init.
location (str):
Optional. Location where this experiment is located. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials used to retrieve this experiment. Overrides
credentials set in aiplatform.init.
Returns:
List of experiment runs.
"""
experiment = cls._get_experiment(
experiment=experiment,
project=project,
location=location,
credentials=credentials,
)
metadata_args = dict(
project=experiment._metadata_context.project,
location=experiment._metadata_context.location,
credentials=experiment._metadata_context.credentials,
)
filter_str = metadata_utils._make_filter_string(
schema_title=constants.SYSTEM_EXPERIMENT_RUN,
parent_contexts=[experiment.resource_name],
)
run_contexts = context._Context.list(filter=filter_str, **metadata_args)
filter_str = metadata_utils._make_filter_string(
schema_title=constants.SYSTEM_RUN, in_context=[experiment.resource_name]
)
run_executions = execution.Execution.list(filter=filter_str, **metadata_args)
def _initialize_experiment_run(context: context._Context) -> ExperimentRun:
this_experiment_run = cls.__new__(cls)
this_experiment_run._experiment = experiment
this_experiment_run._run_name = context.display_name
this_experiment_run._metadata_node = context
with experiment_resources._SetLoggerLevel(resource):
tb_run = this_experiment_run._lookup_tensorboard_run_artifact()
if tb_run:
this_experiment_run._backing_tensorboard_run = tb_run
else:
this_experiment_run._backing_tensorboard_run = None
this_experiment_run._largest_step = None
return this_experiment_run
def _initialize_v1_experiment_run(
execution: execution.Execution,
) -> ExperimentRun:
this_experiment_run = cls.__new__(cls)
this_experiment_run._experiment = experiment
this_experiment_run._run_name = execution.display_name
this_experiment_run._metadata_node = execution
this_experiment_run._metadata_metric_artifact = (
this_experiment_run._v1_get_metric_artifact()
)
return this_experiment_run
if run_contexts or run_executions:
with concurrent.futures.ThreadPoolExecutor(
max_workers=max([len(run_contexts), len(run_executions)])
) as executor:
submissions = [
executor.submit(_initialize_experiment_run, context)
for context in run_contexts
]
experiment_runs = [submission.result() for submission in submissions]
submissions = [
executor.submit(_initialize_v1_experiment_run, execution)
for execution in run_executions
]
for submission in submissions:
experiment_runs.append(submission.result())
return experiment_runs
else:
return []
@classmethod
def _query_experiment_row(
cls, node: Union[context._Context, execution.Execution]
) -> experiment_resources._ExperimentRow:
"""Retrieves the runs metric and parameters into an experiment run row.
Args:
node (Union[context._Context, execution.Execution]):
Required. Metadata node instance that represents this run.
Returns:
Experiment run row that represents this run.
"""
this_experiment_run = cls.__new__(cls)
this_experiment_run._metadata_node = node
row = experiment_resources._ExperimentRow(
experiment_run_type=node.schema_title,
name=node.display_name,
)
if isinstance(node, context._Context):
this_experiment_run._backing_tensorboard_run = (
this_experiment_run._lookup_tensorboard_run_artifact()
)
row.params = node.metadata[constants._PARAM_KEY]
row.metrics = node.metadata[constants._METRIC_KEY]
row.time_series_metrics = (
this_experiment_run._get_latest_time_series_metric_columns()
)
row.state = node.metadata[constants._STATE_KEY]
else:
this_experiment_run._metadata_metric_artifact = (
this_experiment_run._v1_get_metric_artifact()
)
row.params = node.metadata
row.metrics = this_experiment_run._metadata_metric_artifact.metadata
row.state = node.state.name
return row
def _get_logged_pipeline_runs(self) -> List[context._Context]:
"""Returns Pipeline Run contexts logged to this Experiment Run.
Returns:
List of Pipeline system.PipelineRun contexts.
"""
service_request_args = dict(
project=self._metadata_node.project,
location=self._metadata_node.location,
credentials=self._metadata_node.credentials,
)
filter_str = metadata_utils._make_filter_string(
schema_title=constants.SYSTEM_PIPELINE_RUN,
parent_contexts=[self._metadata_node.resource_name],
)
return context._Context.list(filter=filter_str, **service_request_args)
def _get_latest_time_series_metric_columns(self) -> Dict[str, Union[float, int]]:
"""Determines the latest step for each time series metric.
Returns:
Dictionary mapping time series metric key to the latest step of that metric.
"""
if self._backing_tensorboard_run:
time_series_metrics = (
self._backing_tensorboard_run.resource.read_time_series_data()
)
return {
display_name: data.values[-1].scalar.value
for display_name, data in time_series_metrics.items()
if data.value_type
== gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR
}
return {}
def _log_pipeline_job(self, pipeline_job: pipeline_jobs.PipelineJob):
"""Associate this PipelineJob's Context to the current ExperimentRun Context as a child context.
Args:
pipeline_job (pipeline_jobs.PipelineJob):
Required. The PipelineJob to associate.
"""
pipeline_job_context = pipeline_job._get_context()
self._metadata_node.add_context_children([pipeline_job_context])
@_v1_not_supported
def log(
self,
*,
pipeline_job: Optional[pipeline_jobs.PipelineJob] = None,
):
"""Log a Vertex Resource to this experiment run.
```
my_run = aiplatform.ExperimentRun('my-run', experiment='my-experiment')
my_job = aiplatform.PipelineJob(...)
my_job.submit()
my_run.log(my_job)
```
Args:
pipeline_job (aiplatform.PipelineJob): Optional. A Vertex PipelineJob.
"""
if pipeline_job:
self._log_pipeline_job(pipeline_job=pipeline_job)
@staticmethod
def _validate_run_id(run_id: str):
"""Validates the run id
Args:
run_id(str): Required. The run id to validate.
Raises:
ValueError if run id is too long.
"""
if len(run_id) > 128:
raise ValueError(
f"Length of Experiment ID and Run ID cannot be greater than 128. "
f"{run_id} is of length {len(run_id)}"
)
@classmethod
def create(
cls,
run_name: str,
*,
experiment: Optional[Union[experiment_resources.Experiment, str]] = None,
tensorboard: Optional[Union[tensorboard_resource.Tensorboard, str]] = None,
state: gca_execution.Execution.State = gca_execution.Execution.State.RUNNING,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> "ExperimentRun":
"""Creates a new experiment run in Vertex AI Experiments.
```
my_run = aiplatform.ExperimentRun.create('my-run', experiment='my-experiment')
```
Args:
run_name (str): Required. The name of this run.
experiment (Union[aiplatform.Experiment, str]):
Optional. The name or instance of the experiment to create this run under.
If not provided, will default to the experiment set in `aiplatform.init`.
tensorboard (Union[aiplatform.Tensorboard, str]):
Optional. The resource name or instance of Vertex Tensorbaord to use as the backing
Tensorboard for time series metric logging. If not provided, will default to the
the backing tensorboard of parent experiment if set. Must be in same project and location
as this experiment run.
state (aiplatform.gapic.Execution.State):
Optional. The state of this run. Defaults to RUNNING.
project (str):
Optional. Project where this experiment will be created. Overrides project set in
aiplatform.init.
location (str):
Optional. Location where this experiment will be created. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials used to create this experiment. Overrides
credentials set in aiplatform.init.
Returns:
The newly created experiment run.
"""
experiment = cls._get_experiment(experiment)
run_id = _format_experiment_run_resource_id(
experiment_name=experiment.name, run_name=run_name
)
cls._validate_run_id(run_id)
def _create_context():
with experiment_resources._SetLoggerLevel(resource):
return context._Context._create(
resource_id=run_id,
display_name=run_name,
schema_title=constants.SYSTEM_EXPERIMENT_RUN,
schema_version=constants.SCHEMA_VERSIONS[
constants.SYSTEM_EXPERIMENT_RUN
],
metadata={
constants._PARAM_KEY: {},
constants._METRIC_KEY: {},
constants._STATE_KEY: state.name,
},
project=project,
location=location,
credentials=credentials,
)
metadata_context = _create_context()
if metadata_context is None:
raise RuntimeError(
f"Experiment Run with name {run_name} in {experiment.name} already exists."
)
experiment_run = cls.__new__(cls)
experiment_run._experiment = experiment
experiment_run._run_name = metadata_context.display_name
experiment_run._metadata_node = metadata_context
experiment_run._backing_tensorboard_run = None
experiment_run._largest_step = None
if tensorboard:
cls._assign_backing_tensorboard(
self=experiment_run, tensorboard=tensorboard
)
else:
cls._assign_to_experiment_backing_tensorboard(self=experiment_run)
experiment_run._associate_to_experiment(experiment)
return experiment_run
def _assign_to_experiment_backing_tensorboard(self):
"""Assigns parent Experiment backing tensorboard resource to this Experiment Run."""
backing_tensorboard_resource = (
self._experiment.get_backing_tensorboard_resource()
)
if backing_tensorboard_resource:
self.assign_backing_tensorboard(tensorboard=backing_tensorboard_resource)
@staticmethod
def _format_tensorboard_experiment_display_name(experiment_name: str) -> str:
"""Formats Tensorboard experiment name that backs this run.
Args:
experiment_name (str): Required. The name of the experiment.
Returns:
Formatted Tensorboard Experiment name
"""
# post fix helps distinguish from the Vertex Experiment in console
return f"{experiment_name} Backing Tensorboard Experiment"
def _assign_backing_tensorboard(
self, tensorboard: Union[tensorboard_resource.Tensorboard, str]
):
"""Assign tensorboard as the backing tensorboard to this run.
Args:
tensorboard (Union[tensorboard_resource.Tensorboard, str]):
Required. Tensorboard instance or resource name.
"""
if isinstance(tensorboard, str):
tensorboard = tensorboard_resource.Tensorboard(
tensorboard, credentials=self._metadata_node.credentials
)
tensorboard_resource_name_parts = tensorboard._parse_resource_name(
tensorboard.resource_name
)
tensorboard_experiment_resource_name = (
tensorboard_resource.TensorboardExperiment._format_resource_name(
experiment=self._experiment.name, **tensorboard_resource_name_parts
)
)
try:
tensorboard_experiment = tensorboard_resource.TensorboardExperiment(
tensorboard_experiment_resource_name,
credentials=tensorboard.credentials,
)
except exceptions.NotFound:
with experiment_resources._SetLoggerLevel(tensorboard_resource):
tensorboard_experiment = (
tensorboard_resource.TensorboardExperiment.create(
tensorboard_experiment_id=self._experiment.name,
display_name=self._format_tensorboard_experiment_display_name(
self._experiment.name
),
tensorboard_name=tensorboard.resource_name,
credentials=tensorboard.credentials,
)
)
tensorboard_experiment_name_parts = tensorboard_experiment._parse_resource_name(
tensorboard_experiment.resource_name
)
tensorboard_run_resource_name = (
tensorboard_resource.TensorboardRun._format_resource_name(
run=self._run_name, **tensorboard_experiment_name_parts
)
)
try:
tensorboard_run = tensorboard_resource.TensorboardRun(
tensorboard_run_resource_name
)
except exceptions.NotFound:
with experiment_resources._SetLoggerLevel(tensorboard_resource):
tensorboard_run = tensorboard_resource.TensorboardRun.create(
tensorboard_run_id=self._run_name,
tensorboard_experiment_name=tensorboard_experiment.resource_name,
credentials=tensorboard.credentials,
)
gcp_resource_url = rest_utils.make_gcp_resource_rest_url(tensorboard_run)
with experiment_resources._SetLoggerLevel(resource):
tensorboard_run_metadata_artifact = artifact.Artifact._create(
uri=gcp_resource_url,
resource_id=self._tensorboard_run_id(self._metadata_node.name),
metadata={
"resourceName": tensorboard_run.resource_name,
constants._VERTEX_EXPERIMENT_TRACKING_LABEL: True,
},
schema_title=constants._TENSORBOARD_RUN_REFERENCE_ARTIFACT.schema_title,
schema_version=constants._TENSORBOARD_RUN_REFERENCE_ARTIFACT.schema_version,
state=gca_artifact.Artifact.State.LIVE,
)
self._metadata_node.add_artifacts_and_executions(
artifact_resource_names=[tensorboard_run_metadata_artifact.resource_name]
)
self._backing_tensorboard_run = (
experiment_resources._VertexResourceWithMetadata(
resource=tensorboard_run, metadata=tensorboard_run_metadata_artifact
)
)
@staticmethod
def _tensorboard_run_id(run_id: str) -> str:
"""Helper method to format the tensorboard run artifact resource id for a run.
Args:
run_id: The resource id of the experiment run.
Returns:
Resource id for the associated tensorboard run artifact.
"""
return f"{run_id}-tb-run"
@_v1_not_supported
def assign_backing_tensorboard(
self, tensorboard: Union[tensorboard_resource.Tensorboard, str]
):
"""Assigns tensorboard as backing tensorboard to support timeseries metrics logging for this run.
Args:
tensorboard (Union[aiplatform.Tensorboard, str]):
Required. Tensorboard instance or resource name.
"""
backing_tensorboard = self._lookup_tensorboard_run_artifact()
if backing_tensorboard:
raise ValueError(
f"Experiment run {self._run_name} already associated to tensorboard resource {backing_tensorboard.resource.resource_name}"
)
self._assign_backing_tensorboard(tensorboard=tensorboard)
def _get_latest_time_series_step(self) -> int:
"""Gets latest time series step of all time series from Tensorboard resource.
Returns:
Latest step of all time series metrics.
"""
data = self._backing_tensorboard_run.resource.read_time_series_data()
return max(ts.values[-1].step if ts.values else 0 for ts in data.values())
@_v1_not_supported
def log_time_series_metrics(
self,
metrics: Dict[str, float],
step: Optional[int] = None,
wall_time: Optional[timestamp_pb2.Timestamp] = None,
):
"""Logs time series metrics to backing TensorboardRun of this Experiment Run.
```
run.log_time_series_metrics({'accuracy': 0.9}, step=10)
```
Args:
metrics (Dict[str, Union[str, float]]):
Required. Dictionary of where keys are metric names and values are metric values.
step (int):
Optional. Step index of this data point within the run.
If not provided, the latest
step amongst all time series metrics already logged will be used.
wall_time (timestamp_pb2.Timestamp):
Optional. Wall clock timestamp when this data point is
generated by the end user.
If not provided, this will be generated based on the value from time.time()
Raises:
RuntimeError: If current experiment run doesn't have a backing Tensorboard resource.
"""
if not self._backing_tensorboard_run:
self._assign_to_experiment_backing_tensorboard()
if not self._backing_tensorboard_run:
raise RuntimeError(
"Please set this experiment run with backing tensorboard resource to use log_time_series_metrics."
)
self._soft_create_time_series(metric_keys=set(metrics.keys()))
if not step:
step = self._largest_step or self._get_latest_time_series_step()
step += 1
self._largest_step = step
self._backing_tensorboard_run.resource.write_tensorboard_scalar_data(
time_series_data=metrics, step=step, wall_time=wall_time
)
def _soft_create_time_series(self, metric_keys: Set[str]):
"""Creates TensorboardTimeSeries for the metric keys if one currently does not exist.
Args:
metric_keys (Set[str]): Keys of the metrics.
"""
if any(
key
not in self._backing_tensorboard_run.resource._time_series_display_name_to_id_mapping
for key in metric_keys
):
self._backing_tensorboard_run.resource._sync_time_series_display_name_to_id_mapping()
for key in metric_keys:
if (
key
not in self._backing_tensorboard_run.resource._time_series_display_name_to_id_mapping
):
with experiment_resources._SetLoggerLevel(tensorboard_resource):
self._backing_tensorboard_run.resource.create_tensorboard_time_series(
display_name=key
)
def log_params(self, params: Dict[str, Union[float, int, str]]):
"""Log single or multiple parameters with specified key value pairs.
Parameters with the same key will be overwritten.
```
my_run = aiplatform.ExperimentRun('my-run', experiment='my-experiment')
my_run.log_params({'learning_rate': 0.1, 'dropout_rate': 0.2})
```
Args:
params (Dict[str, Union[float, int, str]]):
Required. Parameter key/value pairs.
Raises:
ValueError: If key is not str or value is not float, int, str.
"""
# query the latest run execution resource before logging.
for key, value in params.items():
if not isinstance(key, str):
raise TypeError(
f"{key} is of type {type(key).__name__} must of type str"
)
if not isinstance(value, (float, int, str)):
raise TypeError(
f"Value for key {key} is of type {type(value).__name__} but must be one of float, int, str"
)
if self._is_legacy_experiment_run():
self._metadata_node.update(metadata=params)
else:
self._metadata_node.update(metadata={constants._PARAM_KEY: params})
def log_metrics(self, metrics: Dict[str, Union[float, int, str]]):
"""Log single or multiple Metrics with specified key and value pairs.
Metrics with the same key will be overwritten.
```
my_run = aiplatform.ExperimentRun('my-run', experiment='my-experiment')
my_run.log_metrics({'accuracy': 0.9, 'recall': 0.8})
```
Args:
metrics (Dict[str, Union[float, int]]):
Required. Metrics key/value pairs.
Raises:
TypeError: If keys are not str or values are not float, int, or str.
"""
for key, value in metrics.items():
if not isinstance(key, str):
raise TypeError(
f"{key} is of type {type(key).__name__} must of type str"
)
if not isinstance(value, (float, int, str)):
raise TypeError(
f"Value for key {key} is of type {type(value).__name__} but must be one of float, int, str"
)
if self._is_legacy_experiment_run():
self._metadata_metric_artifact.update(metadata=metrics)
else:
# TODO: query the latest metrics artifact resource before logging.
self._metadata_node.update(metadata={constants._METRIC_KEY: metrics})
@_v1_not_supported
def get_time_series_data_frame(self) -> "pd.DataFrame": # noqa: F821
"""Returns all time series in this Run as a DataFrame.
Returns:
pd.DataFrame: Time series metrics in this Run as a Dataframe.
"""
try:
import pandas as pd
except ImportError:
raise ImportError(
"Pandas is not installed and is required to get dataframe as the return format. "
'Please install the SDK using "pip install google-cloud-aiplatform[metadata]"'
)
if not self._backing_tensorboard_run:
return
|
pd.DataFrame({})
|
pandas.DataFrame
|
import numpy as np
import scipy as sp
from scipy import sparse
import pandas as pd
from skmisc.loess import loess
def preprocess(
data, cov=None, adj_prop=None, n_mean_bin=20, n_var_bin=20, n_chunk=None, copy=False
):
"""
Preprocess single-cell data for scDRS analysis.
1. Correct covariates by regressing out the covariates (including
a constant term) and adding back the original mean for each gene.
2. Compute gene-level and cell-level statistics for the
covariate-corrected data.
Information is stored in `data.uns["SCDRS_PARAM"]`. It operates in
implicit-covariate-correction mode when `data.X` is sparse and `cov`
not `None` to improve memory efficiency; it operates in normal mode
otherwise.
In normal mode, `data.X` is replaced by the covariate-corrected data.
In implicit-covariate-correction mode, the covariate correction information
is stored in `data.uns["SCDRS_PARAM"]` but is not explicitly applied to
`data.X`, so that `data.X` is always sparse. Subsequent computations on
the covariate-corrected data are based on the original data `data.X` and
the covariate correction information. Specifically,
CORRECTED_X = data.X + COV_MAT * COV_BETA + COV_GENE_MEAN
The `adj_prop` option is used for adjusting for cell group proportions,
where each cell is inversely weighted proportional to its corresponding
cell group size for computing expression mean and variance for genes.
For stability, the smallest group size is set to be at least 1% of the
largest group size.
Parameters
----------
data : anndata.AnnData
Single-cell data of shape (n_cell, n_gene). Assumed
to be size-factor-normalized and log1p-transformed.
cov : pandas.DataFrame, default=None
Covariates of shape (n_cell, n_cov). Should contain
a constant term and have values for at least 75% cells.
adj_prop : str, default=None
Cell group annotation (e.g., cell type) used for adjusting for cell group proportions.
`adj_prop` should be present in `data.obs.columns`.
n_mean_bin : int, default=20
Number of mean-expression bins for matching control genes.
n_var_bin : int, default=20
Number of expression-variance bins for matching control genes.
n_chunk : int, default=None
Number of chunks to split the data into when computing mean and variance
using _get_mean_var_implicit_cov_corr. If n_chunk is None, set to 5/sparsity.
copy : bool, default=False
Return a copy instead of writing to data.
Returns
-------
Overview:
`data.X` will be updated as the covariate-corrected data in normal mode
and will stay untouched in the implicit covariate correctoin mode.
Preprocessing information is stored in `data.uns["SCDRS_PARAM"]`.
FLAG_SPARSE : bool
If data.X is sparse.
FLAG_COV : bool
If covariate correction is performed.
COV_MAT : pandas.DataFrame
Covariate matrix of shape (n_cell, n_cov).
COV_BETA: pandas.DataFrame
Covariate effect sizes of shape (n_gene, n_cov).
COV_GENE_MEAN: pandas.Series
Gene-level mean expression.
GENE_STATS : pandas.DataFrame
Gene-level statistics of shape (n_gene, 7):
- "mean" : mean expression in log scale.
- "var" : expression variance in log scale.
- "var_tech" : technical variance in log scale.
- "ct_mean" : mean expression in original non-log scale.
- "ct_var" : expression variance in original non-log scale.
- "ct_var_tech" : technical variance in original non-log scale.
- "mean_var" : n_mean_bin * n_var_bin mean-variance bins
CELL_STATS : pandas.DataFrame
Cell-level statistics of shape (n_cell, 2):
- "mean" : mean expression in log scale.
- "var" : variance expression in log scale.
Notes
-----
Covariate regression:
adata.X = cov * beta + resid_X.
scDRS saves:
COV_MAT = cov, COV_BETA = (-beta), COV_GENE_MEAN = adata.X.mean(axis=0)
The scDRS covariate-corrected data:
CORRECTED_X = resid_X + GENE_MEAN = adata.X + COV_MAT * COV_BETA + COV_GENE_MEAN.
"""
adata = data.copy() if copy else data
n_cell, n_gene = adata.shape
# Parameters and flags
flag_sparse = sparse.issparse(adata.X)
flag_cov = cov is not None
flag_adj_prop = adj_prop is not None
adata.uns["SCDRS_PARAM"] = {
"FLAG_SPARSE": flag_sparse,
"FLAG_COV": flag_cov,
"FLAG_ADJ_PROP": flag_adj_prop,
}
# Update adata.X
if flag_sparse:
# Force sparse.csr_matrix for the sparse mode
if not isinstance(adata.X, sparse.csr_matrix):
adata.X = sparse.csr_matrix(adata.X)
else:
# Force np.ndarray for the dense mode
if not isinstance(adata.X, np.ndarray):
adata.X = np.array(adata.X)
# Covariate correction
if flag_cov:
# Check if cells in data and cov are consistent
assert (
len(set(cov.index) & set(adata.obs_names)) > 0.75 * n_cell
), "cov does not match the cells in data"
df_cov = pd.DataFrame(index=adata.obs_names)
df_cov = df_cov.join(cov)
df_cov.fillna(df_cov.mean(), inplace=True)
# Add const term if df_cov does not already have it (or a linear combination of it)
v_resid = reg_out(np.ones(n_cell), df_cov.values)
if (v_resid ** 2).mean() > 0.01:
df_cov["SCDRS_CONST"] = 1
# Gene mean: numpy.ndarray of shape (n_gene,)
v_gene_mean = np.array(adata.X.mean(axis=0)).flatten()
if flag_sparse:
# Sparse mode: save correction information
mat_beta = np.linalg.solve(
np.dot(df_cov.values.T, df_cov.values) / n_cell,
sparse.csr_matrix.dot(df_cov.values.T, adata.X) / n_cell,
)
adata.uns["SCDRS_PARAM"]["COV_MAT"] = df_cov
adata.uns["SCDRS_PARAM"]["COV_BETA"] = pd.DataFrame(
-mat_beta.T, index=adata.var_names, columns=df_cov.columns
)
adata.uns["SCDRS_PARAM"]["COV_GENE_MEAN"] = pd.Series(
v_gene_mean, index=adata.var_names
)
else:
# Dense mode: regress out covariate and add back mean
adata.X = reg_out(adata.X, df_cov.values)
adata.X += v_gene_mean
# # Note: this version (dense+cov) should produce the exact toydata results
# adata.var["mean"] = adata.X.mean(axis=0).T
# adata.X -= adata.var["mean"].values
# adata.X = reg_out(adata.X, df_cov[['SCDRS_CONST', 'covariate']].values)
# adata.X += adata.var["mean"]
# Precompute for each gene and mean&var for each cell
if flag_sparse and flag_cov:
implicit_cov_corr = True
if n_chunk is None:
n_chunk = 5 * adata.shape[0] * adata.shape[1] // adata.X.data.shape[0] + 1
else:
implicit_cov_corr = False
if n_chunk is None:
n_chunk = 20
if flag_adj_prop:
err_msg = "'adj_prop'=%s not in 'adata.obs.columns'" % adj_prop
assert adj_prop in adata.obs, err_msg
err_msg = (
"On average <10 cells per group, maybe `%s` is not categorical?" % adj_prop
)
assert adata.obs[adj_prop].unique().shape[0] < 0.1 * n_cell, err_msg
temp_df = adata.obs[[adj_prop]].copy()
temp_df["cell"] = 1
temp_df = temp_df.groupby(adj_prop).agg({"cell": len})
temp_df["cell"].clip(lower=int(0.01 * temp_df["cell"].max()), inplace=True)
temp_dic = {x: n_cell / temp_df.loc[x, "cell"] for x in temp_df.index}
cell_weight = np.array([temp_dic[x] for x in adata.obs[adj_prop]])
cell_weight = cell_weight / cell_weight.mean()
else:
cell_weight = None
df_gene, df_cell = compute_stats(
adata,
implicit_cov_corr=implicit_cov_corr,
cell_weight=cell_weight,
n_mean_bin=n_mean_bin,
n_var_bin=n_var_bin,
n_chunk=n_chunk,
)
adata.uns["SCDRS_PARAM"]["GENE_STATS"] = df_gene
adata.uns["SCDRS_PARAM"]["CELL_STATS"] = df_cell
return adata if copy else None
def compute_stats(
adata,
implicit_cov_corr=False,
cell_weight=None,
n_mean_bin=20,
n_var_bin=20,
n_chunk=20,
):
"""
Compute gene-level and cell-level statstics used for scDRS analysis. `adata`
should be log-scale. It has two modes. In the normal mode, it computes
statistics for `adata.X`. In the implicit covariate correction mode, the
covariate correction has not been performed on `adata.X` but the corresponding
information is stored in `adata.uns["SCDRS_PARAM"]`. In this case, it computes
statistics for the covariate-corrected data
`transformed_X = adata.X + COV_MAT * COV_BETA + COV_GENE_MEAN`
Parameters
----------
adata : anndata.AnnData
Single-cell data of shape (n_cell, n_gene). Assumed to be log-scale.
implicit_cov_corr : bool, default=False
If True, compute statistics for the implicit corrected data
`adata.X + COV_MAT * COV_BETA + COV_GENE_MEAN`. Otherwise, compute
for the original data `adata.X`.
cell_weight : array_like, default=None
Cell weights of length `adata.shape[0]` for cells in `adata`,
used for computing weighted gene-level statistics.
n_mean_bin : int, default=20
Number of mean-expression bins for matching control genes.
n_var_bin : int, default=20
Number of expression-variance bins for matching control genes.
n_chunk : int, default=20
Number of chunks to split the data into when computing mean and variance
using _get_mean_var_implicit_cov_corr.
Returns
-------
df_gene : pandas.DataFrame
Gene-level statistics of shape (n_gene, 7):
- "mean" : mean expression in log scale.
- "var" : variance expression in log scale.
- "var_tech" : technical variance in log scale.
- "ct_mean" : mean expression in original non-log scale.
- "ct_var" : variance expression in original non-log scale.
- "ct_var_tech" : technical variance in original non-log scale.
- "mean_var" : n_mean_bin * n_var_bin mean-variance bins
df_cell : pandas.DataFrame
Cell-level statistics of shape (n_cell, 2):
- "mean" : mean expression in log scale.
- "var" : variance expression in log scale.
"""
if implicit_cov_corr:
assert (
"SCDRS_PARAM" in adata.uns
), "adata.uns['SCDRS_PARAM'] is not found, run `scdrs.pp.preprocess` before calling this function"
df_gene = pd.DataFrame(
index=adata.var_names,
columns=[
"mean",
"var",
"var_tech",
"ct_mean",
"ct_var",
"ct_var_tech",
"mean_var",
],
)
df_cell = pd.DataFrame(index=adata.obs_names, columns=["mean", "var"])
# Gene-level statistics
if not implicit_cov_corr:
# Normal mode
df_gene["mean"], df_gene["var"] = _get_mean_var(
adata.X, axis=0, weights=cell_weight
)
# Get the mean and var for the non-log-scale size-factor-normalized counts
# It is highly correlated to the non-size-factor-normalized counts
if sparse.issparse(adata.X): # sp sparse matrix
temp_X = adata.X.copy().expm1() # exp(X)-1 to get ct matrix from logct
else:
temp_X = np.expm1(adata.X) # numpy ndarray
df_gene["ct_mean"], df_gene["ct_var"] = _get_mean_var(
temp_X, axis=0, weights=cell_weight
)
del temp_X
else:
# Implicit covariate correction mode
df_gene["mean"], df_gene["var"] = _get_mean_var_implicit_cov_corr(
adata, axis=0, n_chunk=n_chunk, weights=cell_weight
)
df_gene["ct_mean"], df_gene["ct_var"] = _get_mean_var_implicit_cov_corr(
adata, transform_func=np.expm1, axis=0, n_chunk=n_chunk, weights=cell_weight
)
# Borrowed from scanpy _highly_variable_genes_seurat_v3
not_const = df_gene["ct_var"].values > 0
estimat_var = np.zeros(adata.shape[1], dtype=np.float64)
y = np.log10(df_gene["ct_var"].values[not_const])
x = np.log10(df_gene["ct_mean"].values[not_const])
model = loess(x, y, span=0.3, degree=2)
model.fit()
estimat_var[not_const] = model.outputs.fitted_values
df_gene["ct_var_tech"] = 10 ** estimat_var
# Recipe from Frost Nucleic Acids Research 2020
df_gene["var_tech"] = df_gene["var"] * df_gene["ct_var_tech"] / df_gene["ct_var"]
df_gene.loc[df_gene["var_tech"].isna(), "var_tech"] = 0
# Add n_mean_bin*n_var_bin mean_var bins
n_bin_max = np.floor(np.sqrt(adata.shape[1] / 10)).astype(int)
if (n_mean_bin > n_bin_max) | (n_var_bin > n_bin_max):
n_mean_bin, n_var_bin = n_bin_max, n_bin_max
print(
"Too few genes for 20*20 bins, setting n_mean_bin=n_var_bin=%d"
% (n_bin_max)
)
v_mean_bin =
|
pd.qcut(df_gene["mean"], n_mean_bin, labels=False, duplicates="drop")
|
pandas.qcut
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import calendar
import datetime
import cairo
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from itertools import product
#==================================================================================================================
def add_months(sourcedate, months):
"""Función que permite sumar o restar 'months' meses a una fecha 'sourcedate' determinada.
El formato de 'sourcedate' es de la forma datetime.date(año, mes, dia)."""
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
day = min(sourcedate.day, calendar.monthrange(year,month)[1])
return datetime.date(year, month, day)
#==================================================================================================================
def datetime_to_integer(dt_time):
"""Función que permite cambiar el formato de una fecha 'dt_time' a un número entero.
El formato de 'dt_time' es datetime.date(año, mes, dia)"""
integer = 10000*dt_time.year + 100*dt_time.month + dt_time.day
return integer
#====================================================================================================================
def preprocesamiento(rfm):
df = rfm[rfm.ANTIGUEDAD >= 6]
#df = df.dropna(how='any',axis=0)
return df
#=====================================================================================================================
def distribucion_aperturas(df):
a4_dims = (15, 8)
df['APERTURA'] = df['APERTURA'].apply(lambda x: x[0:4])
df = df[['APERTURA', 'DNI']].groupby(['APERTURA']).count()
a4_dims = (15, 8)
fig, ax = plt.subplots(figsize=a4_dims)
ax.set_title('Distribución de aperturas de cuenta por año')
m = sns.barplot(ax = ax, y=df['DNI'], x=df.index)
m.set_xticklabels(rotation=45, labels=df.index)
ax.set(xlabel='Año de apertura', ylabel='Cantidad')
plt.show()
#====================================================================================================================
def histogramas(R_serie, F_serie, M_serie):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 8.27))
plt.subplots_adjust(wspace = 0.4)
fig.suptitle('Distribución de clientes según Recencia, Frecuencia y Monto')
ax1.hist(R_serie, bins = 50, range = [0,100] , facecolor = 'green', alpha = 0.75,
edgecolor = 'black', linewidth = 0.5 )
ax1.set(xlabel='Recencia (días)', ylabel = 'Cantidad')
ax1.tick_params(axis='both', labelrotation = 45)
ax2.hist(F_serie, bins = 50, range = [0,90] , facecolor = 'blue', alpha = 0.75,
edgecolor = 'black', linewidth = 0.5 )
ax2.set(xlabel='Frecuencia')
ax2.tick_params(axis='both', labelrotation = 45)
ax3.hist(M_serie, bins = 50, range = [0,150000] , facecolor = 'red', alpha = 0.75,
edgecolor = 'black', linewidth = 0.5 )
ax3.set(xlabel='Monto (Pesos)')
ax3.tick_params(axis='both', labelrotation = 45)
plt.show()
#==========================================================================================================================
def RScore(x,p,d):
"""Funcion para obtener el Recency score. x es cada registro de la serie rfm['RECENCIA'] y d[p] es la serie quantile['RECENCIA'] """
if x <= d[p][0.20]:
return 5
elif x <= d[p][0.4]:
return 4
elif x <= d[p][0.6]:
return 3
elif x <= d[p][0.8]:
return 2
else:
return 1
def FMScore(x,p,d):
"""Funcion para obtener el score para la frecuencia y para el monto"""
if x <= d[p][0.20]:
return 1
elif x <= d[p][0.4]:
return 2
elif x <= d[p][0.6]:
return 3
elif x <= d[p][0.8]:
return 4
else:
return 5
def quintil_rent(x,p,d):
"""Funcion para obtener la division por quintiles de la rentabilidad"""
if x <= d[p][0.20]:
return 'Q1'
elif x <= d[p][0.4]:
return 'Q2'
elif x <= d[p][0.6]:
return 'Q3'
elif x <= d[p][0.8]:
return 'Q4'
else:
return 'Q5'
#=========================================================================================================================
def rfm_scoring(rfm, fecha_in, fecha_out):
"""Genera la división de las variables recencia, frecuencia y monto en quintiles. Además calcula un RFMscore con
formato string y un Total_score cuyo valor es la suma de los scores individuales. Finalmente, se guarda un .csv
listo para analizar. Como argumentos la función necesita 'rfm' que es el dataframe generado por la consulta SQL
y 'fecha_in' y 'fecha_out', strings de las fechas 'fi_int' y 'ff_int' para poder etiquetar el nombre del
dataframe guardado."""
quantile = rfm[['RECENCIA', 'MONTO', 'FRECUENCIA']].quantile(q=[0.2,0.4,0.6,0.8])
rfm['R_Quintil'] = rfm['RECENCIA'].apply(RScore,args=('RECENCIA',quantile))
rfm['F_Quintil'] = rfm['FRECUENCIA'].apply(FMScore, args=('FRECUENCIA',quantile))
rfm['M_Quintil'] = rfm['MONTO'].apply(FMScore, args=('MONTO',quantile))
rfm['RFMScore'] = rfm.R_Quintil.map(str) \
+ rfm.F_Quintil.map(str) \
+ rfm.M_Quintil.map(str)
rfm['Total_score'] = rfm['R_Quintil'] + rfm['F_Quintil'] + rfm['M_Quintil']
rfm.to_csv(f'rfm-final-{fecha_in}--{fecha_out}.csv', index = False)
return rfm
#=======================================================================================================
def rentabilidad(fecha_in_1, fecha_out_1):
rfm = pd.read_csv(f'rfm-segmentos-{fecha_in_1}--{fecha_out_1}.csv')
q6 = float(rfm[['RENTABILIDAD']].mean() + 3 * rfm[['RENTABILIDAD']].std())
q0 = float(rfm[['RENTABILIDAD']].mean() - 3 * rfm[['RENTABILIDAD']].std())
#Los siguientes son dataframes que contienen q0 y q6 (peores y mejores clientes segun rentabilidad, respectivamente)
#los descartamos para el análisis siguiente.
#df_q6 = rfm[rfm['RENTABILIDAD'] > q6 ]
#df_q0 = rfm[rfm['RENTABILIDAD'] < q0 ]
df_quintil = rfm[rfm['RENTABILIDAD'] > q0][rfm['RENTABILIDAD'] < q6]
quintiles = df_quintil[['RENTABILIDAD']].quantile(q=[0.2,0.4,0.6,0.8])
rfm['RENT_QUINTIL'] = rfm['RENTABILIDAD'].apply(quintil_rent, args=('RENTABILIDAD',quintiles))
df_quintil_segment = rfm[['RENT_QUINTIL', 'Segment']]
summary = pd.pivot_table(data=df_quintil_segment,
index='RENT_QUINTIL',columns='Segment',
aggfunc='size').apply(lambda x: (x/sum(x))*100,axis=0)
summary.to_csv(f'rentabilidad--{fecha_in_1}-{fecha_out_1}.csv')
fig, ((ax0, ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8, ax9)) = plt.subplots(2, 5, figsize=(15, 9))
plt.subplots_adjust(wspace = 0.5)
fig.suptitle('Distribución de rentabilidad para cada segmento RFM', fontsize=25)
sns.barplot(ax=ax0, y=summary['Campeones'], x=summary.index)
ax0.set(ylabel = 'Campeones (%)')
sns.barplot(ax=ax1, y=summary['Leales'], x=summary.index)
ax1.set(ylabel = 'Leales (%)')
sns.barplot(ax=ax2, y=summary['Potencialmente Leales'], x=summary.index)
ax2.set(ylabel = 'Potencialmente Leales (%)')
sns.barplot(ax=ax3, y=summary['Prometedores'], x=summary.index)
ax3.set(ylabel = 'Prometedores (%)')
sns.barplot(ax=ax4, y=summary['Reciente operativo'], x=summary.index)
ax4.set(ylabel = 'Reciente operativo (%)')
sns.barplot(ax=ax5, y=summary['No se pueden perder'], x=summary.index)
ax5.set(ylabel = 'No se pueden perder (%)')
sns.barplot(ax=ax6, y=summary['Necesitan Atencion'], x=summary.index)
ax6.set(ylabel = 'Necesitan Atencion (%)')
sns.barplot(ax=ax7, y=summary['En Riesgo'], x=summary.index)
ax7.set(ylabel = 'En Riesgo (%)')
sns.barplot(ax=ax8, y=summary['Cercanos a Hibernar'], x=summary.index)
ax8.set(ylabel = 'Cercanos a Hibernar (%)')
sns.barplot(ax=ax9, y=summary['Hibernando'], x=summary.index)
ax9.set(ylabel = 'Hibernando (%)')
return summary
#=======================================================================================================
def rentabilidad_acum(fecha_in_1, fecha_out_1, c):
rfm = pd.read_csv(f'rfm-segmentos-{fecha_in_1}--{fecha_out_1}.csv')
q6 = float(rfm[['RENTABILIDAD']].mean() + 3 * rfm[['RENTABILIDAD']].std())
q0 = float(rfm[['RENTABILIDAD']].mean() - 3 * rfm[['RENTABILIDAD']].std())
df_percentil = rfm[rfm['RENTABILIDAD'] > q0][rfm['RENTABILIDAD'] < q6]
percentil = df_percentil[['RENTABILIDAD']].quantile(q=[round(c/100, 2) for c in range(1, 100 + 1)])
lista = []
for i in range(1, 100 + 1):
if i == 1:
lista.append(df_percentil[df_percentil['RENTABILIDAD']<=percentil['RENTABILIDAD'][round(i/100, 2)]]['RENTABILIDAD'].sum())
elif i != 101:
acum_1 = df_percentil[df_percentil['RENTABILIDAD']<=percentil['RENTABILIDAD'][round((i/100)-0.01, 2)]]['RENTABILIDAD'].sum()
acum_2 = df_percentil[df_percentil['RENTABILIDAD']<=percentil['RENTABILIDAD'][round(i/100, 2)]]['RENTABILIDAD'].sum()
lista.append(round(acum_2-acum_1, 2))
else:
pass
dic = {'Percentil': [round(x/100,2) for x in range(1, 100 + 1)], 'Rent_Acum': lista}
df_acum = pd.DataFrame(dic)
a = df_acum[['Percentil', 'Rent_Acum']].iloc[99-c:]
b = round(((a.Rent_Acum.sum())/(df_acum.Rent_Acum.sum()))*100, 2)
print('-------------------------------------------------------------------------------------------------')
print(f'El {c}% de los clientes más rentables explican el {b}% de la rentabilidad total')
print('-------------------------------------------------------------------------------------------------')
a4_dims = (15, 8)
fig, ax = plt.subplots(figsize=a4_dims)
fig.suptitle('Rentabilidad acumulada por cada percentil', fontsize=25)
sns.lineplot(x=df_acum.Percentil, y=df_acum.Rent_Acum, color='red', linewidth=2.5)
ax.set(ylabel = 'Rentabilidad Acumulada ($)', xlabel = 'Percentil')
plt.gca().invert_xaxis()
plt.show()
return df_acum
#=======================================================================================================
def rent_div_percentiles(fecha_in_1, fecha_out_1):
df_percentil = pd.read_csv(f'rfm-segmentos-{fecha_in_1}--{fecha_out_1}.csv')
percentiles = df_percentil[['RENTABILIDAD']].quantile(q = [round(c/100, 2) for c in range(1, 100 + 1)])
for i in range(1,100 + 1):
for j in df_percentil['RENTABILIDAD']:
if j <= percentiles['RENTABILIDAD'][i/100]:
df_percentil['P_RENT'] = f'Q{i}'
else:
pass
df_percentil.to_csv('percentil.csv', index = False)
return df_percentil
#=======================================================================================================
def histogramas2(rfm):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 9))
plt.subplots_adjust(wspace = 0.5)
fig.suptitle('Media de las variables RFM para cada valor de Score Total')
rfm.groupby('Total_score')['RECENCIA'].mean().plot(ax=ax1, kind ='bar', color = 'green')
ax1.set(xlabel = 'Score total' , ylabel = 'Recencia Promedio')
ax1.tick_params(axis='both', labelrotation = 45)
rfm.groupby('Total_score')['FRECUENCIA'].mean().plot(ax=ax2, kind ='bar', color = 'blue')
ax2.set(xlabel = 'Score total' ,ylabel ='Frecuencia Promedio')
ax2.tick_params(axis='both', labelrotation = 45)
rfm.groupby('Total_score')['MONTO'].mean().plot(ax=ax3, kind ='bar', color = 'red')
ax3.set(xlabel = 'Score total' ,ylabel = 'Monto Promedio')
ax3.tick_params(axis='both', labelrotation = 45)
plt.show()
#=========================================================================================================
def segmentar(rfm, fecha_in, fecha_out):
"""Cuenta cantidad de clientes segun el RFMscore y genera un barplot. Requiere como argumentos 'rfm' que es
el dataframe final obtenido por la función rfm_scoring y 'fecha_in' y 'fecha_out', strings de las fechas
'fi_int' y 'ff_int' para poder etiquetar el nombre del dataframe guardado"""
lista = [str(i) for i in range(1,6)]
perm = [p for p in product(lista, repeat=3)]
tuple_to_string = [''.join(i) for i in perm]
dict_aux = {}
for k in tuple_to_string:
dict_aux[k] =[]
for i in dict_aux.keys():
dict_aux[i] = rfm[rfm['RFMScore']==i]['DNI']
cant = {} #para generar un dataframe con RFScore, cantidad de comercios y porcentaje
for cat in dict_aux.keys():
cant[cat] = len(dict_aux[cat])
data = pd.DataFrame(list(cant.items()))
data = data.rename(columns={0: "RFMScore", 1: "cantidad"})
data['%'] = round(data['cantidad']/data['cantidad'].sum() * 100, 2)
#graficamos barplots con la distribucion de RFMScores
a4_dims = (15, 8.27)
fig, ax = plt.subplots(figsize=a4_dims)
sns.barplot(ax=ax, y=data['%'], x=data['RFMScore'])
ax.set_xticklabels(rotation=90, labels=data['RFMScore'])
plt.show()
return data
#=============================================================================================================
def label_segmentos(rfm, fecha_in, fecha_out):
"""Función que segmenta los clientes según su Frecuencia y Recencia en 10 grupos. Además se crea un diccionario
con los dni's en cada categoría en el intervalo de tiempo analizado. Finalmente genera un barplot con la fracción
de clientes en cada categoría. Requiere como argumentos 'rfm' que es el dataframe final obtenido por la función
rfm_scoring y 'fecha_in' y 'fecha_out', strings de las fechas 'fi_int' y 'ff_int' para poder etiquetar el nombre
del dataframe guardado. """
segt_map = {
r'[1-2][1-2]': 'Hibernando',
r'[1-2][3-4]': 'En Riesgo',
r'[1-2]5': 'No se pueden perder',
r'3[1-2]': 'Cercanos a Hibernar',
r'33': 'Necesitan Atencion',
r'[3-4][4-5]': 'Leales',
r'41': 'Prometedores',
r'51': 'Reciente operativo',
r'[4-5][2-3]': 'Potencialmente Leales',
r'5[4-5]': 'Campeones'
}
rfm['Segment'] = rfm['R_Quintil'].map(str) + rfm['F_Quintil'].map(str)
rfm['Segment'] = rfm['Segment'].replace(segt_map, regex=True)
rfm.to_csv(f'rfm-segmentos-{fecha_in}--{fecha_out}.csv', index = False)
dic_dni = {'Campeones': [], 'En Riesgo': [], 'Reciente operativo': [],
'Leales': [], 'Potencialmente Leales': [], 'Hibernando': [], 'Necesitan Atencion': [],
'Cercanos a Hibernar': [], 'No se pueden perder': [], 'Prometedores': []}
for i in dic_dni.keys():
dic_dni[i] = rfm[rfm['Segment']==i]['DNI']
cant = {} #para generar un dataframe con segmento, cantidad de cuits y porcentaje
for cat in dic_dni.keys():
cant[cat] = len(dic_dni[cat])
print(f'La cantidad de comercios en el segmento {cat} es de: {len(dic_dni[cat])}')
rfm_segmentado = pd.DataFrame(list(cant.items()))
rfm_segmentado = rfm_segmentado.rename(columns={0: "Segment", 1: "cantidad"})
rfm_segmentado['%'] = round(rfm_segmentado['cantidad']/rfm_segmentado['cantidad'].sum() * 100, 2)
rfm_segmentado = rfm_segmentado.set_index('Segment')
rfm_segmentado.to_csv(f'line_plot-{fecha_in}--{fecha_out}.csv')
#print('-------------------------------------------------------------------------------------------------')
#print('Distribución porcentual de los clientes según los segmentos')
#a4_dims = (15, 8)
#fig, ax = plt.subplots(figsize=a4_dims)
#m=sns.barplot(ax=ax, y=rfm_segmentado['%'], x=rfm_segmentado.index)
#m.set_xticklabels(rotation=45, labels=rfm_segmentado.index)
#fig.savefig(f"barplot-seg-{fecha_in}--{fecha_out}.pdf")
#plt.show()
return rfm_segmentado, dic_dni
#================================================================================================================
def evolucion_trimestral_segmentos(df_lineas):
plt.figure(figsize = (15,10))
plt.plot('Trimestre', 'Campeones', data = df_lineas, marker = 'o', markerfacecolor = 'blue', markersize = 6, color = 'blue', linewidth = 1 )
plt.plot('Trimestre', 'Leales', data = df_lineas, marker = 'o', markerfacecolor = 'red', markersize = 6, color = 'red', linewidth = 1 )
plt.plot('Trimestre', 'Potencialmente Leales', data = df_lineas, marker = 'o', markerfacecolor = 'green', markersize = 6, color = 'green', linewidth = 1 )
plt.plot('Trimestre', 'Reciente operativo', data = df_lineas, marker = 'o', markerfacecolor = 'black', markersize = 6, color = 'black', linewidth = 1 )
plt.plot('Trimestre', 'Prometedores', data = df_lineas, marker = 'o', markerfacecolor = 'olive', markersize = 6, color = 'olive', linewidth = 1 )
plt.plot('Trimestre', 'No se pueden perder', data = df_lineas, marker = 'o', markerfacecolor = 'yellow', markersize = 6, color = 'yellow', linewidth = 1 )
plt.plot('Trimestre', 'En Riesgo', data = df_lineas, marker = 'o', markerfacecolor = 'blue', markersize = 6, color = 'skyblue', linewidth = 1 )
plt.plot('Trimestre', 'Necesitan Atencion', data = df_lineas, marker = 'o', markerfacecolor = 'brown', markersize = 6, color = 'brown', linewidth = 1 )
plt.plot('Trimestre', 'Cercanos a Hibernar', data = df_lineas, marker = 'o', markerfacecolor = 'magenta', markersize = 6, color = 'magenta', linewidth = 1 )
plt.plot('Trimestre', 'Hibernando', data = df_lineas, marker = 'o', markerfacecolor = 'pink', markersize = 6, color = 'pink', linewidth = 1 )
plt.legend(loc = 'upper right', prop ={'size': 12})
plt.xticks(ha='right',rotation=45)
plt.xlabel('Trimestre')
plt.ylabel('% poblacion')
plt.savefig('lineas_tiempo.pdf')
plt.show()
#===============================================================================================================
def migraciones(fecha_in_1, fecha_in_2, fecha_out_1, fecha_out_2):
df1 = pd.read_csv(f'rfm-segmentos-{fecha_in_1}--{fecha_out_1}.csv')
df2 =
|
pd.read_csv(f'rfm-segmentos-{fecha_in_2}--{fecha_out_2}.csv')
|
pandas.read_csv
|
"""Create a table of simulated RL (Rescorla Wagner) data"""
import os
import argparse
import pandas as pd
import numpy as np
from modelmodel.behave import behave
from modelmodel.behave import reinforce
from modelmodel.hrf import double_gamma as dg
from modelmodel.dm import convolve_hrf
parser = argparse.ArgumentParser(
description="Create a table of simulated RL (Rescorla Wagner) data",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"name", type=str,
help="Name of table"
)
parser.add_argument(
"N", type=int,
help="Number of samples"
)
parser.add_argument(
"--behave", type=str, default='learn',
help="Behavior learning mode (learn, random)"
)
parser.add_argument(
"--n_cond", type=int, default=1,
help="N cond"
)
parser.add_argument(
"--n_trials", type=int, default=60,
help="N trials/cond"
)
parser.add_argument(
"--alpha",
type=float, default=None,
help="Set alpha values"
)
parser.add_argument(
"--convolve", type=bool, default=False,
help="Convolve each col with the (canonical) double gamma HRF"
)
parser.add_argument(
"--seed",
default=42, type=int,
help="RandomState seed"
)
args = parser.parse_args()
prng = np.random.RandomState(args.seed)
dfs = []
for n in range(args.N):
if args.behave == 'learn':
trial, acc, p, prng = behave.learn(
args.n_cond, args.n_trials,
loc=prng.normal(3, .3), prng=prng
)
elif args.behave == 'random':
trial, acc, p, prng = behave.random(
args.n_cond, args.n_trials, prng=prng
)
else:
raise ValueError('--behave not understood')
df, rlpars = reinforce.rescorla_wagner(
trial, acc, p, alpha=args.alpha, prng=prng
)
# del df['rand']
l = trial.shape[0]
df['count'] = np.repeat(n, l)
df['index'] = np.arange(l, dtype=np.int)
dfs.append(df)
df =
|
pd.concat(dfs, axis=0)
|
pandas.concat
|
import pandas as pd
import numpy as np
import psycopg2
from sklearn.model_selection import KFold
import Constants
import sys
from pathlib import Path
output_folder = Path(sys.argv[1])
output_folder.mkdir(parents=True, exist_ok=True)
# update database credentials if MIMIC data stored in postgres database
conn = psycopg2.connect(
"dbname=mimic user=darius host='/var/run/postgresql' password=password")
pats = pd.read_sql_query('''
select subject_id, gender, dob, dod from public.patients
''', conn)
n_splits = 12
pats = pats.sample(frac=1, random_state=42).reset_index(drop=True)
kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
for c, i in enumerate(kf.split(pats, groups=pats.gender)):
pats.loc[i[1], 'fold'] = str(c)
adm = pd.read_sql_query('''
select subject_id, hadm_id, insurance, language,
religion, ethnicity,
admittime, deathtime, dischtime,
HOSPITAL_EXPIRE_FLAG, DISCHARGE_LOCATION,
diagnosis as adm_diag
from public.admissions
''', conn)
df = pd.merge(pats, adm, on='subject_id', how='inner')
def merge_death(row):
if not(
|
pd.isnull(row.deathtime)
|
pandas.isnull
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 1 21:02:07 2021
@author: suriyaprakashjambunathan
"""
# Fitting the nan values with the average
def avgfit(l):
na = pd.isna(l)
arr = []
for i in range(len(l)):
if na[i] == False:
arr.append(l[i])
avg = sum(arr)/len(arr)
fit_arr = []
for i in range(len(l)):
if na[i] == False:
fit_arr.append(l[i])
elif na[i] == True:
fit_arr.append(avg)
return(fit_arr)
# Weighted Mean Absolute Percentage Error
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = list(y_true), list(y_pred)
l = len(y_true)
num = 0
den = 0
for i in range(l):
num = num + (abs(y_pred[i] - y_true[i]))
den = den + y_true[i]
return abs(num/den) * 100
# Importing the Libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import explained_variance_score
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.utils import class_weight
from sklearn.ensemble import RandomForestClassifier
#Regressors
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import TheilSenRegressor
from sklearn.linear_model import HuberRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeRegressor
from sklearn.linear_model import LassoLarsIC
import warnings
warnings.simplefilter(action='ignore')
# Importing the Dataset
dataset = pd.read_csv('antenna.csv')
#X
X = dataset.loc[:, dataset.columns != 'vswr']
X = X.loc[:, X.columns != 'gain']
X = X.loc[:, X.columns != 'bandwidth']
Xi = X.iloc[:, :-3]
Xi = pd.DataFrame(Xi)
#y
bw = avgfit(list(dataset['bandwidth']))
dataset['bandwidth'] = bw
for i in range(len(bw)):
if bw[i] < 100:
bw[i] = 'Class 1'
elif bw[i] >= 100 and bw[i] < 115:
bw[i] = 'Class 2'
elif bw[i] >= 115 and bw[i] < 120:
bw[i] = 'Class 3'
elif bw[i] >= 120 and bw[i] < 121:
bw[i] = 'Class 4'
elif bw[i] >= 121 and bw[i] < 122:
bw[i] = 'Class 5'
elif bw[i] >= 122 :
bw[i] = 'Class 6'
gain =avgfit(list(dataset['gain']))
dataset['gain'] = gain
for i in range(len(gain)):
if gain[i] < 1.3:
gain[i] = 'Class 1'
elif gain[i] >= 1.3 and gain[i] < 1.5:
gain[i] = 'Class 2'
elif gain[i] >= 1.5 and gain[i] < 2.4:
gain[i] = 'Class 3'
elif gain[i] >= 2.4 and gain[i] < 2.7:
gain[i] = 'Class 4'
elif gain[i] >= 2.7 and gain[i] < 2.9:
gain[i] = 'Class 5'
elif gain[i] >= 2.9 and gain[i] < 3.5:
gain[i] = 'Class 6'
vswr =avgfit(list(dataset['vswr']))
dataset['vswr'] = vswr
for i in range(len(vswr)):
if vswr[i] >= 1 and vswr[i] < 1.16:
vswr[i] = 'Class 1'
elif vswr[i] >= 1.16 and vswr[i] < 1.32:
vswr[i] = 'Class 2'
elif vswr[i] >= 1.32 and vswr[i] < 1.5:
vswr[i] = 'Class 3'
elif vswr[i] >= 1.5 and vswr[i] < 2:
vswr[i] = 'Class 4'
elif vswr[i] >= 2 and vswr[i] < 4:
vswr[i] = 'Class 5'
elif vswr[i] >= 4:
vswr[i] = 'Class 6'
y1 = pd.DataFrame(bw)
y2 = pd.DataFrame(gain)
y3 =
|
pd.DataFrame(vswr)
|
pandas.DataFrame
|
#!/usr/bin/env python3
"""<NAME>, Programming Assignment 3, preprocessing.py
This module provides the Preprocessor class.
"""
# Standard library imports
from pathlib import Path
import collections as c
import typing as t
# Third party libraries
import numpy as np
import pandas as pd
# Local imports
from p4.preprocessing.jenks import compute_two_break_jenks
from p4.preprocessing.split import make_splits
class Preprocessor:
def __init__(self, dataset_name: str, dataset_meta: dict, data_dir: Path):
self.dataset_name = dataset_name
self.dataset_meta = dataset_meta
self.data_dir = Path(data_dir)
self.dataset_src: Path = self.data_dir / dataset_meta["data_filename"]
self.names_meta = pd.DataFrame(self.dataset_meta["names_meta"]).set_index("name")
self.names = list(self.names_meta.index.values)
self.imputed_data: t.Union[pd.DataFrame, None] = None
self.numeric_columns: t.Union[list, None] = None
self.jenks_breaks: dict = {}
self.discretize_dict = c.defaultdict(lambda: {})
self.data_classes: t.Union[c.OrderedDict, None] = None
self.drops = ["sample_code_number", "model_name", "vendor_name", "erp"]
def __repr__(self):
return f"{self.dataset_name} Loader"
def compute_natural_breaks(self, numeric_cols: list = None, n_breaks=2, exclude_ordinal=True) -> pd.DataFrame:
"""
Compute two-class natural Jenks breaks for each numeric column.
:param numeric_cols: List of numeric columns to compute breaks for
:param n_breaks: Number of breaks to split list into
:param exclude_ordinal: True to exclude ordinal columns
:return: Dataframe of indexed break assignments
"""
if n_breaks != 2:
msg = "Jenks breaks are only available for two classes / breaks."
raise NotImplementedError(msg)
# Select all numeric columns if none are provided
numeric_cols = self.get_numeric_columns() if numeric_cols is None else numeric_cols
# If indicated, remove ordinal columns
if exclude_ordinal:
ordinal_cols = self.names_meta[self.names_meta.data_class == "ordinal"].index
numeric_cols = [x for x in numeric_cols if x not in ordinal_cols]
for numeric_col in numeric_cols:
values = self.data[numeric_col].tolist()
self.jenks_breaks[numeric_col] = compute_two_break_jenks(values)
self.jenks_breaks = pd.DataFrame.from_dict(self.jenks_breaks).transpose()
self.jenks_breaks.sort_values(by="gcvf", ascending=False, inplace=True)
return self.jenks_breaks
def discretize(self, discretize_dict: dict) -> pd.DataFrame:
"""
Discretize indicated columns using provided discretize_dict.
:param discretize_dict: Dictionary keyed by column
:return: Discretized columns
Example discretize_dict structure:
{"bare_nuclei": {"n_bins": 2, "binning": "equal_width"},
"normal_nucleoli": {"n_bins": 2, "binning": "equal_width"}}
"""
self.discretize_dict = c.defaultdict(lambda: {}, discretize_dict)
for col, bin_dict in self.discretize_dict.items():
frame, retbins = self._discretize(self.data[col], bin_dict["n_bins"], bin_dict["binning"])
self.data.drop(axis=1, labels=col, inplace=True)
self.data = self.data.join(frame)
self.discretize_dict[col]["retbins"] = retbins
return self.data[list(discretize_dict.keys())]
def drop(self, labels: t.Union[list, None] = None) -> pd.DataFrame:
"""
Drop selected columns.
:param labels: Columns to drop
:return: Updated dataset
"""
if labels is None:
labels = self.drops
self.data.drop(axis=1, labels=labels, inplace=True, errors="ignore")
return self.data
def dummy(self, columns: t.Union[list[str], str, None] = "default") -> pd.DataFrame:
"""
Dummy categorical columns.
:param columns: 'default' for defaults, list to specify them, False / None to do nothing
:return: Data
"""
if columns == "default":
mask = self.names_meta["data_class"] == "categorical"
mask = mask & self.names_meta["feature"]
columns = self.names_meta[mask].index.values.tolist()
if columns:
self.data = pd.get_dummies(self.data, columns=columns)
# Update features list
self.features = [x for x in self.data if (x not in self.label) and (x not in self.index)]
index_cols = self.names_meta[self.names_meta["id"]].index.values
self.features = [x for x in self.data if x not in index_cols]
return self.data
def get_numeric_columns(self, exclude_index=True):
"""
Retrieve numeric columns using names metadata.
:return: List of numeric columns
"""
mask = self.names_meta["data_type"].isin(["int", "float"])
if exclude_index:
mask = mask & (self.names_meta["id"] is False)
self.numeric_columns = self.names_meta[mask].index.tolist()
return self.numeric_columns
def identify_features_label_id(self) -> pd.DataFrame:
"""
Parse features, label, and ID columns from metadata.
:return: Modified dataframe
"""
# Identify features, label, and id columns
mask = self.names_meta["feature"]
self.features: list = self.names_meta[mask].index.values.tolist()
mask = self.names_meta["label"]
self.label: str = self.names_meta[mask].index.values[0]
self.data["index"] = list(range(len(self.data)))
self.index = "index"
# Set the index
self.data[self.index] = self.data[self.index].astype(int)
self.data.set_index(self.index, inplace=True)
return self.data
def impute(self, numeric_cols: t.Union[list[str], str] = "default", strategy: str = "mean"):
"""
Impute missing values of numeric columns.
:param strategy: Currently only mean is implemented
"""
if strategy != "mean":
raise NotImplementedError(f"Strategy {strategy} is not implemented.")
data = self.data.copy()
if numeric_cols == "default":
numeric_cols = list(data.select_dtypes(np.number))
numeric_cols = [x for x in numeric_cols if x in self.data_classes]
for col in numeric_cols:
is_ordinal = True if self.data_classes[col] == "ordinal" else False
self.data[col] = self._impute(data[col], strategy, is_ordinal)
return self.data
def load(self) -> pd.DataFrame:
"""
Load CSV of dataset for Projects 1 to 4 into a dataframe.
:return: Loaded dataset
"""
# Set column names, replace missing values with NaNs, and set data types
na_values = self.dataset_meta["missing"]
dtypes = self.make_dtypes(self.names_meta.data_type.to_dict())
kwargs = {"names": self.names,
"na_values": na_values,
"dtype": dtypes}
if self.dataset_meta["header"]:
kwargs.update({"header": 0})
self.data =
|
pd.read_csv(self.dataset_src, **kwargs)
|
pandas.read_csv
|
import sys
import pandas as pd
import numpy as np
from shutil import copyfile
from datetime import datetime,timedelta
input_files = {}
if len(sys.argv) > 1:
input_files['model_data'] = sys.argv[1]
print("Model-Data-File: %s"%input_files['model_data'])
input_files['unit'] = sys.argv[2]
print("Unit-Data-File: %s"%input_files['unit'] )
input_files['time_series'] = sys.argv[3]
print("Time-Series-File: %s"%input_files['time_series'])
# Output Files
output_files = {'time_series':"time_series_spine.csv",
'model_data':'model_data_spine.xlsx'}
# sys.argv[1] is empty if script is not called from Spine
else:
input_files['model_data'] = "model_data.xlsx"
input_files['unit'] = "unit_parameters.csv"
input_files['time_series'] = "time_series.csv"
# Output Files
output_files = {'time_series':'manuel/time_series_spine.csv',
'model_data':'manuel/model_data_spine.xlsx'}
def convert_model_data(input_files,output_files):
# Read Time series values
time_series = pd.read_csv(input_files['time_series'],sep=";",header=0,index_col=None)
# times = time_series['Time'].tolist()
power_load_raw = time_series['Load'].to_numpy()
heat_load_raw = time_series['Heat'].to_numpy()
wind_power_raw = time_series['Wind'].to_numpy()
# Read unit capacities
units = pd.read_csv(input_files['unit'],sep=";",header=0,index_col=0)
max_powers = units['power_max'].to_numpy()
power_cap = sum(max_powers[~np.isnan(max_powers)])
heat_cap = sum(g['heat_max'] for idx,g in units.iterrows())
print(power_cap)
print('Power Capacity:\t%.2f MW_el'%power_cap)
print('Heat Capacity:\t%.2f MW_th'%heat_cap)
# Read model data parameter
model_data = pd.read_excel(input_files['model_data'], sheet_name='model')
storage_data = pd.read_excel(input_files['model_data'], sheet_name='storage')
cost_data = pd.read_excel(input_files['model_data'], sheet_name='costs')
t_max = int(model_data['t_max'])
power_load_max = float(model_data['power_load_max'])
heat_load_max = float(model_data['heat_load_max'])
wind_supply = float(model_data['wind_supply'])
print('Time Steps: \t%i'%t_max)
t_end = min(t_max+1,len(time_series))
# Create times
start_date = model_data['start_date'].to_list()
date = start_date[0].to_pydatetime()
times = [date]
for ii in range(1,t_end):
date += timedelta(hours=1)
times.append(date)
time_start = times[0]
time_end = times[-1]
print('Time Start:\t%s'%time_start)
print('Time End: \t%s'%time_end)
# Find inital state of units
units_on = pd.DataFrame()
units_before = []
times_before = []
values_before = []
for idx,g in units.iterrows():
t_before_end = max(g['t_down_0'],g['t_up_0'])
atime = time_start
for ii in range(1,t_before_end):
atime = atime-timedelta(hours=1)
units_before.append(g['name'])
times_before.append(atime)
if g['t_up_0'] == 0: # unit was down
values_before.append(0)
else: #unit was up
values_before.append(1)
units_on['Unit'] = units_before
units_on['Time'] = times_before
units_on['Status'] = values_before
#Save start time-1 in storage
storage_data['t-1'] = [start_date[0] -timedelta(hours=1)]
# Normalize power load time series
power_load_norm = power_load_raw/max(power_load_raw)
max_load = power_load_max/100*power_cap
power_load = max_load*power_load_norm
# Normalize heat load time series
heat_load_norm = heat_load_raw/max(heat_load_raw)
max_load = heat_load_max/100*heat_cap
heat_load = max_load*heat_load_norm
# Taking only part untill t_end
power_load = power_load[0:t_end-1]
heat_load = heat_load[0:t_end-1]
wind_power_raw = wind_power_raw[0:t_end-1]
print(sum(wind_power_raw))
# Normalize wind time series
wind_power_norm = wind_power_raw/sum(wind_power_raw)
wind_annual = wind_supply/100*sum(power_load)
wind_power = wind_annual*wind_power_norm
# Save time series in dataframe
time_series_out = pd.DataFrame()
time_series_out['Time'] = times[0:-1]
time_series_out['Load'] = power_load
time_series_out['Heat'] = heat_load
time_series_out['Wind'] = wind_power
# Set new model parameters
spine_parameter = pd.DataFrame()
spine_parameter['time_start'] = [time_start]
spine_parameter['time_end'] = [time_end]
#Save output files ---------------------------------------------------------------------
time_series_out.to_csv(output_files['time_series'],sep=";",index=False)
with
|
pd.ExcelWriter(output_files['model_data'])
|
pandas.ExcelWriter
|
from io import StringIO
from copy import deepcopy
import numpy as np
import pandas as pd
import re
from glypnirO_GUI.get_uniprot import UniprotParser
from sequal.sequence import Sequence
from sequal.resources import glycan_block_dict
# Defining important colume names within the dataset
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column = "Modification Type(s)"
observed_mz_column_name = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
rt = "Scan Time"
selected_aa = {"N", "S", "T"}
tmt_mod_regex = re.compile("\w(\d+)\((.+)\)")
# Defining important regular expression pattern to parse the dataset
regex_glycan_number_pattern = "\d+"
glycan_number_regex = re.compile(regex_glycan_number_pattern)
regex_pattern = "\.[\[\]\w\.\+\-]*\."
sequence_regex = re.compile(regex_pattern)
uniprot_regex = re.compile("(?P<accession>[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})(?P<isoform>-\d)?")
glycan_regex = re.compile("(\w+)\((\d+)\)")
# Function to filter for only PSM collections that do not only containing unglycosylated peptides
def filter_U_only(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 or True not in np.isin(unique_glycan, "U"):
# print(unique_glycan)
return True
return False
# Filter for only PSMs that are unglycosylated within PSM collections that do not only containing unglycosylated peptides
def filter_with_U(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 \
and \
True in np.isin(unique_glycan, "U"):
return True
return False
# parse modification mass and convert it from string to float
def get_mod_value(amino_acid):
if amino_acid.mods:
if amino_acid.mods[0].value.startswith("+"):
return float(amino_acid.mods[0].value[1:])
else:
return -float(amino_acid.mods[0].value[1:])
else:
return 0
# load fasta file into a dictionary
def load_fasta(fasta_file_path, selected=None, selected_prefix=""):
with open(fasta_file_path, "rt") as fasta_file:
result = {}
current_seq = ""
for line in fasta_file:
line = line.strip()
if line.startswith(">"):
if selected:
if selected_prefix + line[1:] in selected:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[current_seq] += line
return result
# Storing analysis result for each protein
class Result:
def __init__(self, df):
self.df = df
self.empty = df.empty
def separate_result(self):
normal_header = []
df = self.df
for c in df.columns:
if c in {"Protein", "Peptides", "Position", "Glycans"}:
normal_header.append(c)
else:
yield Result(df[normal_header+[c]])
def calculate_proportion(self, occupancy=True, separate_sample_df=False):
"""
calculate proportion of each glycoform from the dataset
:type occupancy: bool
whether or not to calculate the proportion as occupancy which would includes unglycosylated form.
"""
df = self.df.copy()
#print(df)
grouping_peptides = [# "Isoform",
"Peptides", "Position"]
grouping_position = [# "Isoform",
"Position"]
if "Protein" in df.columns:
grouping_peptides = ["Protein"] + grouping_peptides
grouping_position = ["Protein"] + grouping_position
if not occupancy:
df = df[df["Glycans"] != "U"]
if "Peptides" in df.columns:
gr = grouping_peptides
else:
gr = grouping_position
for _, g in df.groupby(gr):
if "Value" in g.columns:
total = g["Value"].sum()
for i, r in g.iterrows():
df.at[i, "Value"] = r["Value"] / total
else:
for c in g.columns:
if c not in {"Protein", "Peptides", "Position", "Glycans"}:
total = g[c].sum()
for i, r in g.iterrows():
df.at[i, c] = r[c] / total
if separate_sample_df:
return [df[gr + [c]] for c in df.columns]
return df
def to_summary(self, df=None, name="", trust_byonic=False, occupancy=True):
"""
:type trust_byonic: bool
whether or not to calculate calculate raw values for each individual position assigned by byonic
:type occupancy: bool
whether or not to calculate the proportion as occupancy which would includes unglycosylated form.
:type df: pd.DataFrame
"""
grouping_peptides = [# "Isoform",
"Peptides", "Position", "Glycans"]
grouping_position = [# "Isoform",
"Position", "Glycans"]
if df is None:
df = self.df
if "Protein" in df.columns:
grouping_peptides = ["Protein"] + grouping_peptides
grouping_position = ["Protein"] + grouping_position
if not occupancy:
df = df[df["Glycans"] != "U"]
if trust_byonic:
temp = df.set_index(grouping_position)
else:
temp = df.set_index(grouping_peptides)
if "Value" in temp.columns:
temp.rename(columns={"Value": name}, inplace=True)
else:
temp = temp.rename(columns={k: name for k in temp.columns if k not in {"Protein", "Peptides", "Position", "Glycans"}})
#print(temp)
return temp
# Object containing each individual protein. much of the methods involved in the analysis is contained within this object
# Each protein is assigned one of the GlypnirOcomponent object with a subset of their PD and Byonic data
class GlypnirOComponent:
def __init__(self, filename, area_filename=None, replicate_id=None, condition_id=None, protein_name=None, protein_column=protein_column_name, minimum_score=0, trust_byonic=False, legacy=False, mode=1):
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column = "Modification Type(s)"
observed_mz_column_name = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
self.protein_column = protein_column
self.sequence_column = None
self.glycans_column = None
self.starting_position_column = None
self.modifications_column = None
self.observed_mz_column = None
if type(filename) == pd.DataFrame:
data = filename.copy()
else:
if filename.endswith(".xlsx"):
data = pd.read_excel(filename, sheet_name="Spectra")
elif filename.endswith(".txt"):
data = pd.read_csv(filename, sep="\t")
if mode == 1:
self.protein_column = protein_column_name
self.sequence_column = sequence_column_name
self.glycans_column = glycans_column_name
self.starting_position_column = starting_position_column_name
self.modifications_column = modifications_column
self.observed_mz_column = observed_mz_column_name
if area_filename is not None:
if type(area_filename) == pd.DataFrame:
file_with_area = area_filename
else:
if area_filename.endswith("xlsx"):
file_with_area = pd.read_excel(area_filename)
else:
file_with_area = pd.read_csv(area_filename, sep="\t")
# Joining of area and glycan data for each PSM using scan number as merging point
data["Scan number"] = pd.to_numeric(data["Scan #"].str.extract("scan=(\d+)", expand=False))
data = pd.merge(data, file_with_area, left_on="Scan number", right_on="First Scan")
# Subset and filtering data for those with non blank area value and passing minimum score cutoff
self.protein_name = protein_name
self.data = data.sort_values(by=['Area'], ascending=False)
self.replicate_id = replicate_id
self.condition_id = condition_id
self.data = data[data["Area"].notnull()]
self.data = self.data[(self.data["Score"] >= minimum_score) &
((self.data[protein_column].str.contains(protein_name, regex=False)) | (self.data[protein_column].str.startswith(protein_name)))
# (data["Protein Name"] == ">"+protein_name) &
]
self.data = self.data[~self.data[protein_column].str.contains(">Reverse")]
elif mode == 2:
self.data, self.tmt_sample_info = self.process_tmt_pd_byonic(data)
if len(self.data.index) > 0:
self.empty = False
else:
self.empty = True
self.row_to_glycans = {}
self.glycan_to_row = {}
self.trust_byonic = trust_byonic
self.legacy = legacy
self.sequon_glycosites = set()
self.glycosylated_seq = set()
if mode == 2:
self.sequon_glycosites = {}
self.glycosylated_seq = {}
self.unique_rows = []
# method for calculate glycan mass from string syntax using regular expression and a dictionary of glycan block and their mass
def calculate_glycan(self, glycan):
current_mass = 0
current_string = ""
for i in glycan:
current_string += i
if i == ")":
s = glycan_regex.search(current_string)
if s:
name = s.group(1)
amount = s.group(2)
current_mass += glycan_block_dict[name]*int(amount)
current_string = ""
return current_mass
#process tmt_pd_byonic dataframe
def process_tmt_pd_byonic(self, df):
pattern = re.compile("\((\w+), (\w+)\)/\((\w+), (\w+)\)")
samples = {}
df = df[(df["Search Engine Rank"] == 1) & (df["Quan Usage"] == "Use")]
for c in df.columns:
s = pattern.search(c)
if s:
if s.group(4) not in samples:
samples[s.group(4)] = set()
samples[s.group(4)].add(s.group(3))
if s.group(2) not in samples:
samples[s.group(2)] = set()
samples[s.group(2)].add(s.group(1))
return df, samples
# process the protein data
def process(self, mode=1, tmt_info=None, tmt_minimum=2, **kwargs):
for k in kwargs:
if k in self.__dict__:
setattr(self, k, kwargs[k])
for i, r in self.data.iterrows():
glycan_dict = {}
if mode == 1:
search = sequence_regex.search(r[self.sequence_column])
# get peptide sequence without flanking prefix and suffix amino acids then create a Sequence object from the string
seq = Sequence(search.group(0))
# get unformated string from the Sequence object. This unformatted string contain a "." at both end
elif mode == 2:
seq = Sequence(r[self.sequence_column].upper())
stripped_seq = seq.to_stripped_string()
origin_seq = r[self.starting_position_column] - 1
self.data.at[i, "origin_start"] = origin_seq
# Get parse glycans from glycan column into a list
glycans = []
if mode == 2:
assert tmt_info is not None
score = 0
tmt_pass = False
for c in tmt_info:
tmt_data = r.loc[tmt_info[c]]
if tmt_data.count() >= tmt_minimum:
score += 1
if score >= len(tmt_info):
tmt_pass = True
self.data.at[i, "tmt_pass"] = tmt_pass
if not tmt_pass:
pass
if pd.notnull(r[self.glycans_column]):
glycans = r[self.glycans_column].split(",")
if mode == 1:
if search:
# store the unformatted sequence without "." at both end into the dataframe
self.data.at[i, "stripped_seq"] = stripped_seq.rstrip(".").lstrip(".")
# calculate the programmatic starting position of the sequence
glycan_reordered = []
#calculate the programmatic stopping position of the sequence
self.data.at[i, "Ending Position"] = r[self.starting_position_column] + len(self.data.at[i, "stripped_seq"])
self.data.at[i, "position_to_glycan"] = ""
if self.trust_byonic:
n_site_status = {}
p_n = r[self.protein_column].lstrip(">")
# current_glycan = 0
max_glycans = len(glycans)
glycosylation_count = 1
# creating dictionary storing the glycan and its mass
if max_glycans:
self.row_to_glycans[i] = np.sort(glycans)
for g in glycans:
data_gly = self.calculate_glycan(g)
glycan_dict[str(round(data_gly, 3))] = g
self.glycan_to_row[g] = i
glycosylated_site = []
# iterating through the unformated sequence and assign glycan to modified position based on the modified mass
for aa in range(1, len(seq) - 1):
if seq[aa].mods:
try:
mod_value = float(seq[aa].mods[0].value)
round_mod_value = round(mod_value)
round_3 = round(mod_value, 3)
# if the glycan is identified to be found, store the position of the glycosylated amino acid on the protein sequence for later reference
if str(round_3) in glycan_dict:
seq[aa].extra = "Glycosylated"
pos = int(r[self.starting_position_column]) + aa - 2
self.sequon_glycosites.add(pos + 1)
position = "{}_position".format(str(glycosylation_count))
self.data.at[i, position] = seq[aa].value + str(pos + 1)
glycosylated_site.append(self.data.at[i, position] + "_" + str(round_mod_value))
glycosylation_count += 1
glycan_reordered.append(glycan_dict[str(round_3)])
except ValueError:
pass
if glycan_reordered:
self.data.at[i, "position_to_glycan"] = ",".join(glycan_reordered)
self.data.at[i, "glycoprofile"] = ";".join(glycosylated_site)
else:
# if the analysis is only done on peptide and glycan combination, we would only need to set whether the peptide is glycosylated and store the unformatted peptide sequence of the glycosylated one for later reference
if pd.notnull(r[self.glycans_column]):
glycans = r[self.glycans_column].split(",")
glycans.sort()
self.data.at[i, self.glycans_column] = ",".join(glycans)
self.data.at[i, "glycosylation_status"] = True
self.glycosylated_seq.add(self.data.at[i, "stripped_seq"])
#print(self.glycosylated_seq)
elif mode == 2:
self.data.at[i, "stripped_seq"] = stripped_seq
glycan_reordered = []
# calculate the programmatic stopping position of the sequence
self.data.at[i, "Ending Position"] = r[self.starting_position_column] + len(
self.data.at[i, "stripped_seq"])
if self.trust_byonic:
# current_glycan = 0
max_glycans = len(glycans)
glycosylation_count = 1
# creating dictionary storing the glycan and its mass
if max_glycans:
self.row_to_glycans[i] = np.sort(glycans)
for g in glycans:
data_gly = self.calculate_glycan(g)
glycan_dict[str(round(data_gly, 3))] = g
if r[self.protein_column] not in self.glycan_to_row:
self.glycan_to_row[r[self.protein_column]] = {}
self.glycan_to_row[r[self.protein_column]][g] = i
glycosylated_site = []
# iterating through the unformated sequence and assign glycan to modified position based on
# the modified mass
if pd.notnull(r["Modifications"]):
mod_list = r["Modifications"].split(";")
for mod in mod_list:
search_mod = tmt_mod_regex.search(mod.strip())
if search_mod:
if search_mod.group(2) in glycans:
if r[self.protein_column] not in self.sequon_glycosites:
self.sequon_glycosites[r[self.protein_column]] = set()
self.sequon_glycosites[r[self.protein_column]].add(int(search_mod.group(1))-1 + r[self.starting_position_column])
position = "{}_position".format(str(glycosylation_count))
self.data.at[i, position] = stripped_seq[int(search_mod.group(1))-1].upper() + str(int(search_mod.group(1))-1 + r[self.starting_position_column])
glycosylated_site.append(self.data.at[i, position] + "_" + search_mod.group(2))
glycosylation_count += 1
glycan_reordered.append(search_mod.group(2))
if glycan_reordered:
if len(glycan_reordered) > 1:
self.data.at[i, "position_to_glycan"] = ",".join(glycan_reordered)
else:
self.data.at[i, "position_to_glycan"] = glycan_reordered[0]
if glycosylated_site:
if len(glycosylated_site) > 1:
self.data.at[i, "glycoprofile"] = ";".join(glycosylated_site)
else:
self.data.at[i, "glycoprofile"] = glycosylated_site[0]
else:
# if the analysis is only done on peptide and glycan combination, we would only need to set whether the peptide is glycosylated and store the unformatted peptide sequence of the glycosylated one for later reference
if pd.notnull(r[self.glycans_column]):
glycans = r[self.glycans_column].split(",")
glycans.sort()
self.data.at[i, self.glycans_column] = ",".join(glycans)
self.data.at[i, "glycosylation_status"] = True
if r[self.protein_column] not in self.glycosylated_seq:
self.glycosylated_seq[r[self.protein_column]] = set()
self.glycosylated_seq[r[self.protein_column]].add(self.data.at[i, "stripped_seq"])
# print(self.glycosylated_seq)
# analyze the compiled data by identifying unique PSM and calculate cumulative raw area under the curve
def analyze(self, max_sites=0, combine_d_u=True, splitting_sites=False, debug=False, protein_column=protein_column_name, glycans_column=glycans_column_name, starting_position_column=starting_position_column_name, observed_mz_column=observed_mz_column_name, mode=1, tmt_info=None):
result = []
# sort the data first by area then score in descending order.
if mode == 1:
temp = self.data.sort_values(["Area", "Score"], ascending=False)
if self.trust_byonic:
grouping = ["stripped_seq", "glycoprofile", observed_mz_column]
else:
grouping = ["stripped_seq", glycans_column, starting_position_column, observed_mz_column]
elif mode == 2:
from scipy.stats import rankdata
temp = self.data[self.data["tmt_pass"] == True]
if self.trust_byonic:
grouping = [protein_column, "stripped_seq", "Modifications", "glycoprofile", "Charge"]
else:
grouping = [protein_column, "stripped_seq", glycans_column, starting_position_column, observed_mz_column]
area_columns = []
for a in tmt_info.values():
for aa in a:
area_columns.append(aa)
area_columns.sort(key=int)
# cells in glycan column with no glycan will be assigned a string "None"
temp[glycans_column] = temp[glycans_column].fillna("None")
if "glycoprofile" in temp.columns:
temp["glycoprofile"] = temp["glycoprofile"].fillna("U")
out = []
if self.trust_byonic:
# if trust byonic we would analyze by grouping the data at unformatted sequence, glycosylated positions and calculated m/z
if mode == 1:
seq_glycosites = list(self.sequon_glycosites)
elif mode == 2:
seq_glycosites = {}
for i in self.sequon_glycosites:
seq_glycosites[i] = list(self.sequon_glycosites[i])
seq_glycosites[i].sort()
for i, g in temp.groupby(grouping):
seq_within = []
# select row with highest area value in a group
if mode == 1:
max_area_row = g["Area"].idxmax()
elif mode == 2:
channel_ranks = []
for channel in area_columns:
g[channel+"_rank"] = pd.Series(rankdata(g[channel].values), index=g[channel].index)
channel_ranks.append(channel+"_rank")
g["score_rank"] = g.apply(lambda row: np.sum(row[channel_ranks]), axis=1)
max_area_row = g["score_rank"].idxmax()
unique_row = g.loc[max_area_row]
#print(unique_row)
#print(seq_glycosites, i)
if mode == 1:
for n in seq_glycosites:
#print(n, unique_row[starting_position_column], unique_row["Ending Position"])
# create a list of n glycosylation sites that can be found within the peptide sequence
if unique_row[starting_position_column] <= n < unique_row["Ending Position"]:
# print(unique_row["stripped_seq"], n, unique_row[starting_position_column_name])
seq_within.append(
unique_row["stripped_seq"][
int(n - unique_row[starting_position_column])].upper() + str(n))
if mode == 2:
if i[0] in seq_glycosites:
if seq_glycosites[i[0]]:
for n in seq_glycosites[i[0]]:
#print(n, unique_row[starting_position_column], unique_row["Ending Position"])
# create a list of n glycosylation sites that can be found within the peptide sequence
if unique_row[starting_position_column] <= n < unique_row["Ending Position"]:
# print(unique_row["stripped_seq"], n, unique_row[starting_position_column_name])
seq_within.append(
unique_row["stripped_seq"][int(n - unique_row[starting_position_column])].upper() + str(n))
glycosylation_count = 0
glycans = []
if pd.notnull(unique_row["position_to_glycan"]):
glycans = unique_row["position_to_glycan"].split(",")
# create a dataset of position, glycans associate to that position and area under the curve of them
if debug:
if seq_within:
self.unique_rows.append(unique_row)
for c in range(len(unique_row.index)):
if unique_row.index[c].endswith("_position"):
if pd.notnull(unique_row[unique_row.index[c]]):
pos = unique_row[unique_row.index[c]]
#print(pos)
#print(seq_within)
if glycans:
if mode == 1:
result.append({"Position": pos, "Glycans": glycans[glycosylation_count], "Value": unique_row["Area"]})
elif mode == 2:
dic = {"Protein": i[0], "Position": pos, "Glycans": glycans[glycosylation_count]}
for column in area_columns:
dic[column] = unique_row[column]
result.append(dic)
ind = seq_within.index(pos)
seq_within.pop(ind)
glycosylation_count += 1
if seq_within:
for s in seq_within:
if mode == 1:
result.append({"Position": s, "Glycans": "U", "Value": unique_row["Area"]})
elif mode == 2:
dic = {"Protein": i[0], "Position": pos, "Glycans": "U"}
for column in area_columns:
dic[column] = unique_row[column]
result.append(dic)
if result:
result = pd.DataFrame(result)
# sum area under the curve of those with the same glycosylation position and glycan composition
if mode == 1:
group = result.groupby(["Position", "Glycans"])
elif mode == 2:
group = result.groupby(["Protein", "Position", "Glycans"])
out = group.agg(np.sum).reset_index()
else:
if mode == 1:
out = pd.DataFrame([], columns=["Position", "Glycans", "Values"])
elif mode == 2:
out = pd.DataFrame([], columns=["Protein", "Position", "Glycans", "Values"])
else:
# if a peptide level analysis was done, the grouping would be on unformatted sequence, glycan combination, position of the peptide N-terminus, calculated m/z
for i, g in temp.groupby(grouping):
# select and create a dataset of unique psm compositing of the unformatted sequence, glycans, area under the curve and position of the peptide N-terminus
if mode == 1:
max_area_row = g["Area"].idxmax()
elif mode == 2:
for channel in area_columns:
g[channel+"_rank"] = pd.Series(rankdata(g[channel]), index=g[channel].index)
g["score_rank"] = g.apply(lambda row: np.sum(row[area_columns]), axis=1)
max_area_row = g["score_rank"].idxmax()
#print(g)
unique_row = g.loc[max_area_row]
if debug:
self.unique_rows.append(unique_row)
if unique_row[glycans_column] != "None":
if mode == 1:
result.append(
{"Peptides": i[0], "Glycans": i[1], "Value": unique_row["Area"], "Position": i[2]})
elif mode == 2:
dic = {"Protein": i[0], "Peptides": i[1], "Position": i[3], "Glycans": i[2]}
for column in area_columns:
dic[column] = unique_row[column]
result.append(dic)
else:
if mode == 1:
result.append({"Peptides": i[0], "Glycans": "U", "Value": unique_row["Area"], "Position": i[2]})
elif mode == 2:
dic = {"Protein": i[0], "Peptides": i[1], "Position": i[3], "Glycans": "U"}
for column in area_columns:
dic[column] = unique_row[column]
result.append(dic)
result = pd.DataFrame(result)
# sum those area under the curve with the same peptides, position and glycans
if mode == 1:
group = result.groupby(["Peptides", "Position", "Glycans"])
elif mode == 2:
group = result.groupby(["Protein", "Peptides", "Position", "Glycans"])
out = group.agg(np.sum, axis=0).reset_index()
#print(out)
return Result(out)
class GlypnirO:
def __init__(self, trust_byonic=False, get_uniprot=False, debug=False, parse_uniprot=False):
self.trust_byonic = trust_byonic
self.components = None
self.uniprot_parsed_data = pd.DataFrame([])
self.get_uniprot = get_uniprot
self.unique_dict = {}
self.debug = debug
self.parse_uniprot = parse_uniprot
def add_component(self, filename, area_filename, replicate_id, sample_id):
component = GlypnirOComponent(filename, area_filename, replicate_id, sample_id)
# loading of input experiment file
def add_batch_component(self, component_list, minimum_score, protein=None, combine_uniprot_isoform=True, legacy=False, protein_column=protein_column_name, starting_position_column=starting_position_column_name):
self.load_dataframe(component_list)
protein_list = []
if protein is not None:
self.components["Protein"] = pd.Series([protein]*len(self.components.index), index=self.components.index)
for i, r in self.components.iterrows():
comp = GlypnirOComponent(r["filename"], r["area_filename"], r["replicate_id"], condition_id=r["condition_id"], protein_name=protein, minimum_score=minimum_score, trust_byonic=self.trust_byonic, legacy=legacy)
self.components.at[i, "component"] = comp
print("{} - {}, {} peptides has been successfully loaded".format(r["condition_id"], r["replicate_id"], str(len(comp.data.index))))
else:
components = []
for i, r in self.components.iterrows():
data = pd.read_excel(r["filename"], sheet_name="Spectra")
data = data[data[starting_position_column].notnull()]
protein_id_column = protein_column
if combine_uniprot_isoform:
protein_id_column = "master_id"
for i2, r2 in data.iterrows():
# search for uniprot accession id in protein column name
# if the protein is not a decoy or labelled as common contaminant, the accession id would be saved into a master_id column. If no accession id, the whole protein name would be saved there instead.
if self.parse_uniprot:
search = uniprot_regex.search(r2[protein_column])
if not r2[protein_column].startswith(">Reverse") and not r2[protein_column].endswith("(Common contaminant protein)"):
if search:
data.at[i2, "master_id"] = search.groupdict(default="")["accession"]
if not self.get_uniprot:
protein_list.append([search.groupdict(default="")["accession"], r2[protein_column]])
if search.groupdict(default="")["isoform"] != "":
data.at[i2, "isoform"] = int(search.groupdict(default="")["isoform"][1:])
else:
data.at[i2, "isoform"] = 1
else:
protein_list.append([r2[protein_column], r2[protein_column]])
data.at[i2, "master_id"] = r2[protein_column]
data.at[i2, "isoform"] = 1
else:
protein_list.append([r2[protein_column], r2[protein_column]])
data.at[i2, "master_id"] = r2[protein_column]
data.at[i2, "isoform"] = 1
else:
protein_list.append([r2[protein_column], r2[protein_column]])
data.at[i2, "master_id"] = r2[protein_column]
data.at[i2, "isoform"] = 1
# read pd file
if r["area_filename"].endswith("xlsx"):
file_with_area = pd.read_excel(r["area_filename"])
else:
file_with_area = pd.read_csv(r["area_filename"], sep="\t")
for index, g in data.groupby([protein_id_column]):
u = index
if not u.startswith(">Reverse") and not u.endswith("(Common contaminant protein)"):
# merging of byonic and pd data for appropriate protein
comp = GlypnirOComponent(g, file_with_area, r["replicate_id"],
condition_id=r["condition_id"], protein_name=u,
minimum_score=minimum_score, trust_byonic=self.trust_byonic, legacy=legacy)
if not comp.empty:
components.append({"filename": r["filename"], "area_filename": r["area_filename"], "condition_id": r["condition_id"], "replicate_id": r["replicate_id"], "Protein": u, "component": comp})
yield i, r
print(
"{} - {} peptides has been successfully loaded".format(r["condition_id"],
r["replicate_id"]))
self.components = pd.DataFrame(components, columns=list(self.components.columns) + ["component", "Protein"])
if not self.get_uniprot:
protein_df = pd.DataFrame(protein_list, columns=["Entry", "Protein names"])
self.uniprot_parsed_data = protein_df
#print(self.uniprot_parsed_data)
def load_dataframe(self, component_list):
if type(component_list) == list:
self.components = pd.DataFrame(component_list)
elif type(component_list) == pd.DataFrame:
self.components = component_list
elif type(component_list) == str:
if component_list.endswith(".txt"):
self.components = pd.read_csv(component_list, sep="\t")
elif component_list.endswith(".csv"):
self.components = pd.read_csv(component_list)
elif component_list.endswith(".xlsx"):
self.components = pd.read_excel(component_list)
else:
raise ValueError("Input have to be list, pandas dataframe, or csv, xlsx, or tabulated txt filepath.")
else:
raise ValueError("Input have to be list, pandas dataframe, or csv, xlsx, or tabulated txt filepath.")
def process_components(self):
for i, r in self.components.iterrows():
# print("Processing {} - {} {} for {}".format(r["condition_id"], r["replicate_id"], r["Protein"], analysis))
r["component"].process()
# analysis of the compiled data
def analyze_components(self, relabel):
result = []
result_without_u = []
result_occupancy_no_calculation_u = []
for i, r in self.components.iterrows():
print("Analyzing", r["Protein"], r["condition_id"], r["replicate_id"], r["component"].protein_name)
unique_name = str(r["condition_id"]) + str(r["replicate_id"])
if unique_name not in self.unique_dict:
self.unique_dict[unique_name] = []
analysis_result = r["component"].analyze(debug=self.debug)
#print(analysis_result.df)
self.format_result(analysis_result, r, result, result_occupancy_no_calculation_u, result_without_u,
unique_name)
result_occupancy = self._summary_format(result, relabeling=relabel)
result_occupancy_with_u = self._summary_format(result, filter_with_U, True, relabeling=relabel)
result_glycoform = self._summary_format(result_without_u, relabeling=relabel)
tempdf_index_reset_result_occupancy_with_u = result_occupancy_with_u.reset_index()
tempdf_index_reset_result_glycoform = result_glycoform.reset_index()
result_occupancy_glycoform_sep = pd.concat(
[tempdf_index_reset_result_glycoform, tempdf_index_reset_result_occupancy_with_u])
# format the output with the correct column name for site specific or peptide level analysis
if self.trust_byonic:
if relabel:
grouping_array = ["Protein", "Protein names",
# "Isoform",
"Glycosylated positions in peptide", "Labels", "Glycans"]
sorting_array = ["Protein", "Protein names",
# "Isoform",
"Glycosylated positions in peptide", "Labels"]
else:
grouping_array = ["Protein", "Protein names",
# "Isoform",
"Glycosylated positions in peptide", "Glycans"]
sorting_array = ["Protein", "Protein names",
# "Isoform",
"Glycosylated positions in peptide"]
result_occupancy_glycoform_sep = result_occupancy_glycoform_sep.set_index(grouping_array)
result_occupancy_glycoform_sep = result_occupancy_glycoform_sep.sort_index(
level=sorting_array)
else:
if relabel:
grouping_array = ["Protein", "Protein names",
# "Isoform",
"Position peptide N-terminus", "Peptides", "Labels", "Glycans"]
sorting_array = ["Protein", "Protein names",
# "Isoform",
"Position peptide N-terminus", "Peptides", "Labels"]
else:
grouping_array = ["Protein", "Protein names",
# "Isoform",
"Position peptide N-terminus", "Peptides", "Glycans"]
sorting_array = ["Protein", "Protein names",
# "Isoform",
"Position peptide N-terminus", "Peptides"]
result_occupancy_glycoform_sep = result_occupancy_glycoform_sep.set_index(grouping_array)
result_occupancy_glycoform_sep = result_occupancy_glycoform_sep.sort_index(level=sorting_array)
print("Finished analysis.")
return {"Glycoforms":
result_glycoform,
"Occupancy":
result_occupancy,
"Occupancy_With_U":
result_occupancy_with_u,
"Occupancy_Without_Proportion_U":
result_occupancy_glycoform_sep}
def format_result(self, analysis_result, r, result, result_occupancy_no_calculation_u, result_without_u,
unique_name):
if not analysis_result.empty:
if self.debug:
self.unique_dict[unique_name] += r["component"].unique_rows
# get raw and proportional calculation of unique psm auc with unglycosylated peptide
a = analysis_result.to_summary(name="Raw", trust_byonic=self.trust_byonic)
pro = analysis_result.calculate_proportion()
#print(a)
b = analysis_result.to_summary(pro, "Proportion", trust_byonic=self.trust_byonic)
temp_df = self._summary(a, b, r)
# print(temp_df)
result.append(temp_df)
# proportion for glycoforms here are calculated without unglycosylated form.
a_without_u = analysis_result.to_summary(name="Raw", trust_byonic=self.trust_byonic, occupancy=False)
pro_without_u = analysis_result.calculate_proportion(occupancy=False)
b_without_u = analysis_result.to_summary(pro_without_u, "Proportion", trust_byonic=self.trust_byonic,
occupancy=False)
temp_df_without_u = self._summary(a_without_u, b_without_u, r)
result_without_u.append(temp_df_without_u)
temp_df_no_calculation_u = self._summary(a, b_without_u ,r)
result_occupancy_no_calculation_u.append(temp_df_no_calculation_u)
# summarize the data and collect uniprot protein directly fromt the online uniprot database if get_uniprot is True
def _summary_format(self, result, filter_method=filter_U_only, select_for_u=False, relabeling=""):
#print(result)
result_data = pd.concat(result)
result_data = result_data.reset_index(drop=True)
accessions = result_data["Protein"].unique()
# print(self.uniprot_parsed_data)
if self.uniprot_parsed_data.empty:
if self.get_uniprot:
parser = UniprotParser(accessions, True)
data = []
for i in parser.parse("tab"):
frame = pd.read_csv(StringIO(i), sep="\t")
frame = frame.rename(columns={frame.columns[-1]: "query"})
data.append(frame)
self.uniprot_parsed_data = pd.concat(data, ignore_index=True)
not_in = []
for i in accessions:
if not (self.uniprot_parsed_data["query"] == i).any():
not_in.append([i, i])
not_in = pd.DataFrame(not_in, columns=["Entry", "Protein names"])
self.uniprot_parsed_data = pd.concat([self.uniprot_parsed_data[['Entry', 'Protein names']], not_in], ignore_index=True)
# print(self.uniprot_parsed_data)
else:
self.uniprot_parsed_data = self.uniprot_parsed_data.groupby(["Entry"]).head(1).reset_index().drop(["index"], axis=1)
result_data = result_data.merge(self.uniprot_parsed_data, left_on="Protein", right_on="Entry")
result_data.drop("Entry", 1, inplace=True)
if self.trust_byonic:
groups = result_data.groupby(by=["Protein", "Protein names",
# "Isoform",
"Position"])
else:
groups = result_data.groupby(by=["Protein", "Protein names",
# "Isoform",
"Position", "Peptides"])
result_data = groups.filter(filter_method)
if select_for_u:
result_data = result_data[result_data["Glycans"] == "U"]
if relabeling:
result_data = add_custom_glycan_categories(relabeling, result_data)
if self.trust_byonic:
result_data = result_data.rename({"Position": "Glycosylated positions in peptide"}, axis="columns")
if relabeling:
result_data = result_data.set_index(
["Label", "condition_id", "replicate_id", "Protein", "Protein names",
# "Isoform",
"Glycosylated positions in peptide", "Categories", "Glycans"])
else:
result_data = result_data.set_index(
["Label", "condition_id", "replicate_id", "Protein", "Protein names",
# "Isoform",
"Glycosylated positions in peptide", "Glycans"])
else:
result_data = result_data.rename({"Position": "Position peptide N-terminus"}, axis="columns")
if relabeling:
result_data = result_data.set_index(
["Label", "condition_id", "replicate_id", "Protein", "Protein names",
# "Isoform",
"Position peptide N-terminus", "Peptides", "Categories", "Glycans"])
else:
result_data = result_data.set_index(
["Label", "condition_id", "replicate_id", "Protein", "Protein names",
# "Isoform",
"Position peptide N-terminus", "Peptides", "Glycans"])
result_data = result_data.unstack(["Label", "condition_id", "replicate_id"])
result_data = result_data.sort_index(level=["Label", "condition_id", "replicate_id"], axis=1)
#result_data.to_csv("test.txt", sep="\t")
if self.trust_byonic:
if relabeling:
result_data = result_data.sort_index(level=["Protein", "Protein names",
# "Isoform",
"Glycosylated positions in peptide", "Categories"])
else:
result_data = result_data.sort_index(level=["Protein", "Protein names",
# "Isoform",
"Glycosylated positions in peptide"])
else:
if relabeling:
result_data = result_data.sort_index(level=["Protein", "Protein names",
# "Isoform",
"Position peptide N-terminus", "Peptides", "Categories"])
else:
result_data = result_data.sort_index(level=["Protein", "Protein names",
# "Isoform",
"Position peptide N-terminus", "Peptides"])
result_data.columns = result_data.columns.droplevel()
return result_data
# combine output from different protein, condition and replicate
def _summary(self, a, b, r=None, add_protein=True, condition=None, replicate=None):
#print(a)
#print(b)
temp_df = pd.concat([a, b], axis=1)
temp_df.columns.name = "Label"
temp_df = temp_df.stack()
lc = [temp_df]
if add_protein:
for c in ["Protein", "condition_id", "replicate_id"]:
lc.append(pd.Series([r[c]] * len(temp_df.index), index=temp_df.index, name=c))
else:
lc.append(pd.Series([condition] * len(temp_df.index), index=temp_df.index, name="condition_id"))
lc.append(pd.Series([replicate] * len(temp_df.index), index=temp_df.index, name="replicate_id"))
temp_df = pd.concat(lc, axis=1)
temp_df = temp_df.reset_index()
return temp_df
def process_tmt_pd_byonic(df):
pattern = re.compile("\((\w+), (\w+)\) \/ \((\w+), (\w+)\)")
samples = {}
df = df[(df["Search Engine Rank"] == 1) & (df["Quan Usage"] == "Use")]
for c in df.columns:
s = pattern.search(c)
if s:
if s.group(4) not in samples:
samples[s.group(4)] = set()
samples[s.group(4)].add(s.group(3))
if s.group(2) not in samples:
samples[s.group(2)] = set()
samples[s.group(2)].add(s.group(1))
return df, samples
def add_custom_glycan_categories(custom_name_file, dataframe):
custom_name =
|
pd.read_csv(custom_name_file, sep="\t")
|
pandas.read_csv
|
#%%
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
import glob
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors
import seaborn as sns
import fit_seq.viz
import git
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# matplotlib.use('Agg')
fit_seq.viz.pboc_style_mpl()
# Find date
workdir = os.getcwd().split('/')[-1]
DATE = int(workdir.split('_')[0])
RUN_NO = int(workdir.split('_')[1][-1])
# List matplotlib colormaps
cmaps = fit_seq.viz.mpl_cmaps_dict()
#%%
# Read features on plate layout
xl =
|
pd.ExcelFile(f'./{DATE}_plate_layout.xlsx')
|
pandas.ExcelFile
|
from typing import Optional
import pandas as pd
import pytest
from evidently.analyzers.cat_target_drift_analyzer import CatTargetDriftAnalyzer
from evidently.model.widget import BaseWidgetInfo
from evidently.options import OptionsProvider
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.dashboard.widgets.cat_output_drift_widget import CatOutputDriftWidget
@pytest.fixture
def widget() -> CatOutputDriftWidget:
options_provider = OptionsProvider()
widget = CatOutputDriftWidget("test_cat_output_drift_widget")
widget.options_provider = options_provider
return widget
def test_cat_output_widget_analyzer_list(widget: CatOutputDriftWidget) -> None:
assert widget.analyzers() == [CatTargetDriftAnalyzer]
@pytest.mark.parametrize(
"reference_data, current_data, data_mapping, kind, expected_result",
(
(
pd.DataFrame({"target": [1, 2, 3, 4]}),
pd.DataFrame({"target": [1, 2, 3, 1]}),
ColumnMapping(),
None,
BaseWidgetInfo(type="big_graph", title="Target Drift: detected, p_value=0.0", size=2),
),
(
pd.DataFrame({"target": [1, 2, 3, 4]}),
pd.DataFrame({"target": [1, 2, 3, 1]}),
ColumnMapping(),
"target",
BaseWidgetInfo(type="big_graph", title="Target Drift: detected, p_value=0.0", size=2),
),
(
pd.DataFrame({"data": [1, 2, 3, 4]}),
pd.DataFrame({"data": [1, 2, 3, 1]}),
ColumnMapping(target=None),
"target",
None,
),
(
|
pd.DataFrame({"prediction": [1, 2, 3, 4]})
|
pandas.DataFrame
|
"""Compare performance
MPC>=2
Train w/ single panel
Train w/ all panels
Test clivnar or denovo-db
Does training on panel do better than MPC>=2?
Does training on panel do better than training w/ hold-one-out?
"""
from collections import defaultdict
import pandas as pd
import numpy, argparse
from scipy.stats import entropy
from sklearn import linear_model, metrics, tree, svm
from sklearn.neural_network import MLPClassifier
from sklearn.externals.six import StringIO
from sklearn.preprocessing import PolynomialFeatures
from sklearn.ensemble import ExtraTreesClassifier
def eval_pred(row, col):
if row[col] == row['y']:
if row['y'] == 1:
return 'CorrectPath'
return 'CorrectBenign'
if row['y'] == 1:
return 'WrongPath'
return 'WrongBenign'
def mk_basic_call(row, score_cols):
"""Call is pathogenic/1 if all scores are met"""
cutoffs = {'mpc':2, 'revel':.375, 'ccr':.9}
for col in score_cols:
if row[col] < cutoffs[col]:
return 0
return 1
def eval_mpc_raw(row, score_cols):
call = mk_basic_call(row, score_cols)
if row['y'] == 1:
if call == 1:
return 'CorrectPath'
return 'WrongPath'
# else benign
if call == 1:
return 'WrongBenign'
return 'CorrectBenign'
def print_data_stats(disease, test_df, train_df_pre, fout):
"""Remove testing data from training"""
key_cols = ['chrom', 'pos', 'ref', 'alt']
test_keys = {':'.join([str(x) for x in v]):True for v in test_df[key_cols].values}
crit = train_df_pre.apply(lambda row: not ':'.join([str(row[x]) for x in key_cols]) in test_keys, axis=1)
train_df = train_df_pre[crit]
print('train w/o testing data - %s: %d' % (disease, len(train_df)), file=fout)
test_panel_gene_count = len(set(test_df['gene']))
gg = test_df.groupby('y').size().reset_index().rename(columns={0:'size'})
if len(gg[gg.y==0]):
benign_ex = list(gg[gg.y==0]['size'])[0]
else:
benign_ex = 0
path_ex = list(gg[gg.y==1]['size'])[0]
print('test gene count: %d (%d pathogenic, %d benign)' % (test_panel_gene_count, path_ex, benign_ex), file=fout)
return train_df
def eval_basic_training(clin_type, test_df_init, fout_stats, fout_eval, cols):
"""Train w/ hold out one.
Also test mpc>2
"""
# train
# one gene at a time
acc_df_ls = []
genes = set(test_df_init['gene'])
for test_gene in genes:
sub_train_df = test_df_init[test_df_init.gene != test_gene]
tree_clf_sub = tree.DecisionTreeClassifier( max_depth=len(cols) )
X, y = sub_train_df[cols], sub_train_df['y']
tree_clf_sub.fit(X, y)
test_df_sub = test_df_init[test_df_init.gene == test_gene]
X_test_sub = test_df_sub[cols]
preds = tree_clf_sub.predict(X_test_sub)
test_df_sub['mpc_pred_holdOut'] = preds
test_df_sub.loc[:, 'PredictionStatusMPC_holdOut'] = test_df_sub.apply(lambda row: eval_pred(row, 'mpc_pred_holdOut'), axis=1)
acc_df_ls.append(test_df_sub)
test_df_final = pd.concat(acc_df_ls)
metrics_ls = ('PredictionStatusMPC_holdOut',)
for metric in metrics_ls:
counts = test_df_final.groupby(metric).size().reset_index().reset_index().rename(columns={0:'size'})
d = defaultdict(int)
for v in list(counts.values):
_, label, count = v
ls = (clin_type, 'no_disease', 'global_' + metric, label, str(count))
d[label] = count
print('\t'.join(ls), file=fout_eval)
tot_bad = d['WrongPath'] + d['WrongBenign']
ls = (clin_type, 'no_disease', 'global_' + metric, 'TotWrong', str(tot_bad))
print('\t'.join(ls), file=fout_eval)
# just mpc>2
test_df = test_df_init
X_test = test_df[cols]
#preds = tree_clf_sub.predict(X_test)
#test_df['mpc_pred_holdOut'] = preds
#test_df.loc[:, 'PredictionStatusMPC_holdOut'] = test_df.apply(lambda row: eval_pred(row, 'mpc_pred_holdOut'), axis=1)
# apply mpc>=2
test_df.loc[:, 'PredictionStatusMPC>2'] = test_df.apply(lambda x: eval_mpc_raw(x, cols), axis=1)
# acc_df_ls.append(test_df)
#test_df = pd.concat(acc_df_ls)
#metrics_ls = ('PredictionStatusMPC_holdOut', 'PredictionStatusMPC>2',)
metrics_ls = ('PredictionStatusMPC>2',)
for metric in metrics_ls:
counts = test_df.groupby(metric).size().reset_index().reset_index().rename(columns={0:'size'})
d = defaultdict(int)
for v in list(counts.values):
_, label, count = v
ls = (clin_type, 'no_disease', 'global_' + metric, label, str(count))
d[label] = count
print('\t'.join(ls), file=fout_eval)
tot_bad = d['WrongPath'] + d['WrongBenign']
ls = (clin_type, 'no_disease', 'global_' + metric, 'TotWrong', str(tot_bad))
print('\t'.join(ls), file=fout_eval)
def eval_disease_as_training(clin_type, disease, test_df_init, train_df_pre, fout_stats, fout_eval, cols):
"""Train w/ disease, and test w/ test_df (clinvar or denovo-db)"""
train_df = print_data_stats(disease, test_df_init, train_df_pre, fout_stats)
cols = ['mpc']
# train
tree_clf = tree.DecisionTreeClassifier( max_depth=len(cols) )
X, y = train_df[cols], train_df['y']
tree_clf.fit(X, y)
tree.export_graphviz(tree_clf, out_file=disease + '.dot')
# one gene at a time
# acc_df_ls = []
# genes = set(test_df_init['gene'])
# for test_gene in genes:
test_df = test_df_init
X_test = test_df[cols]
preds = tree_clf.predict(X_test)
test_df['mpc_pred_' + disease] = preds
test_df.loc[:, 'PredictionStatusMPC_' + disease] = test_df.apply(lambda row: eval_pred(row, 'mpc_pred_' + disease), axis=1)
# acc_df_ls.append(test_df)
# test_df = pd.concat(acc_df_ls)
metrics_ls = ('PredictionStatusMPC_' + disease,)
for metric in metrics_ls:
counts = test_df.groupby(metric).size().reset_index().reset_index().rename(columns={0:'size'})
d = defaultdict(int)
for v in list(counts.values):
_, label, count = v
ls = (clin_type, disease, 'global_' + metric, label, str(count))
d[label] = count
print('\t'.join(ls), file=fout_eval)
tot_bad = d['WrongPath'] + d['WrongBenign']
ls = (clin_type, disease, 'global_' + metric, 'TotWrong', str(tot_bad))
print('\t'.join(ls), file=fout_eval)
def main(args):
score_cols = args.score_cols.split('-')
FOCUS_GENES = ('SCN1A','SCN2A','KCNQ2', 'KCNQ3', 'CDKL5',
'PCDH19', 'SCN1B', 'SCN8A', 'SLC2A1',
'SPTAN1', 'STXBP1', 'TSC1')
# load test df
if 'clinvar' in args.test_file:
test_df = pd.read_csv(args.test_file, sep='\t').rename(columns={'clin_class':'y'})
elif 'denovo' in args.test_file:
test_df = pd.read_csv(args.test_file, sep='\t')
else:
i = 1/0
# now load testing data from panels
# load genedx epi
disease_genedx_df = pd.read_csv(args.gene_dx, sep='\t')
disease_genedx_df.loc[:, 'y'] = disease_genedx_df.apply(lambda row: 1 if row['class']=='P' else 0, axis=1)
disease_genedx_df['Disease'] = 'genedx-epi'
crit = disease_genedx_df.apply(lambda row: row['gene'] in FOCUS_GENES, axis=1)
disease_genedx_limitGene_df= disease_genedx_df[crit]
disease_genedx_limitGene_df['Disease'] = 'genedx-epi-limitGene'
# load uc epi
disease_uc_df =
|
pd.read_csv(args.uc, sep='\t')
|
pandas.read_csv
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Timeseries Analysis
.. module:: Stats
:platform: Unix
:synopis:
.. moduleauther: CEMAC (UoL)
.. description: This module was developed by CEMAC as part of the UNRESP
Project. This script takes CALPUFF concrec data from 2 models and compares
with observations.
From the output of timeseries.py normalise the data
:copyright: © 2019 University of Leeds.
:license: BSD-2 Clause.
Example:
To use::
coming soon
.. CEMAC_UNRESPForcastingSystem:
https://github.com/cemac/UNRESPForcastingSystem
"""
import os
import glob
import matplotlib as mpl
import pandas as pd
import warnings
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn import preprocessing
import pandas_profiling
warnings.filterwarnings("ignore")
# University System python may be broken
# If some one insists on using it...
BACKEND = mpl.get_backend()
if BACKEND == 'Qt4Agg' and sys.version_info[0] == 2:
# Fix the backend
print('swapping to Agg Backend')
mpl.pyplot.switch_backend('Agg')
Towns = ['ElPanama', 'Pacaya']
NormalisePlots = False
CompositePlots = False
ScatterPlots = False
def dateparse(x): return pd.datetime.strptime(x, '%d/%m/%Y %H:%M')
# -------------------------------------------------------------------------- #
# Take data out put from timeseries.py and nomalise #
# #
# #
# ------------------------ 1. Normalise ------------------------------------ #
def gen_normalised_plots(town, plot=None):
"""gen_normalised_plots
..description: generate normalised plots from data sets, rescale to maxium
= 1?
..args:
town(str)
dataset(str)
"""
fname = 'Timeseries_obs_model_raw_processed.csv'
try:
ts = pd.read_csv('TimeSeries_Data/' + town + '/' + town + fname,
index_col=0, parse_dates=True)
except FileNotFoundError:
ts = pd.read_csv('TimeSeries_Data/' + town + '/' + town + '_' + fname,
index_col=0, parse_dates=True)
Obs = ts[town + '_KNN']
NAM = ts['NAM_area']
ECMWF = ts['ECMWF_area']
All = ts[[town + '_KNN', 'NAM_area', 'ECMWF_area']]
# The end of March is rubbish for El Panama
if town == 'ElPanama':
# Get rid of that
start_remove = pd.to_datetime('2017-3-25')
end_remove =
|
pd.to_datetime('2017-4-01')
|
pandas.to_datetime
|
# Modified from
# https://github.com/bhattbhavesh91/cowin-vaccination-slot-availability
import datetime
import json
import numpy as np
import requests
import pandas as pd
import streamlit as st
from copy import deepcopy
# Faking chrome browser
browser_header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
df_18 = pd.DataFrame()
df_45 = pd.DataFrame()
st.set_page_config(layout='wide', initial_sidebar_state='collapsed')
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def load_mapping():
df =
|
pd.read_csv("./district_list.csv")
|
pandas.read_csv
|
import talib
from datetime import datetime, tzinfo
from datetime import date
import yfinance as yf
import pandas as pd
import MetaTrader5 as mt5
import pytz
# ^BVSP
# data = yf.download("SPY", start="2020-09-01", end="2021-11-02")
# real = talib.CDLMORNINGSTAR(
# data['Open'], data['High'], data['Low'], data['Close'])
# print(real[real != 0])
if not mt5.initialize():
print("Inicialize faleid")
mt5.shutdown()
print(f"MT5 version: {mt5.__version__}")
print(f"Empresa: {mt5.__author__}")
# solicitamos 1 000 ticks de EURAUD
# euraud_ticks = mt5.copy_ticks_from("EURUSD", datetime(2021,8,28,13), 1000, mt5.COPY_TICKS_ALL)
# Obter timezone da corretora activtrade
# timezone = pytz.timezone("Europe/Luxembourg")
fusoHorario = pytz.timezone("Europe/Luxembourg")
data_atual = datetime.now()
data_hora_luxembourg = data_atual.astimezone(fusoHorario)
data_lux_tx = data_hora_luxembourg.strftime('%d/%m/%Y %H:%M')
ano= int(date.strftime(data_hora_luxembourg, '%Y'))
mes= int(date.strftime(data_hora_luxembourg, '%m'))
dia= int(date.strftime(data_hora_luxembourg, '%d'))
print(f"HOra luxemburgo: {data_lux_tx}")
# criamos o objeto datetime no fuso horário UTC para que não seja aplicado o deslocamento do fuso horário local
print(f"TimeZone Luxemburgo: {data_lux_tx}")
# dataAtual = date.today()
# mes = int(gmt.strftime('%m'))
# ano = int(gmt.strftime('%Y'))
# dia = int(gmt.strftime('%d'))
# gmtL = datetime.now(gmt)
# dl = datetime.strftime(gmt, '%Y-%d-%m %H:%M')
# print(f"DAta Luxembourg: {dl}")
# utc_from = datetime(ano, mes, dia)
# gmt_from = datetime(ano, mes, dia, tzinfo=fusoHorario)
# solicitamos 20 barras de EURUSD M5 do dia atual
rates = mt5.copy_rates_from_pos("EURUSD", mt5.TIMEFRAME_M5, 0, 200)
mt5.shutdown()
rates_frame = pd.DataFrame(rates)
rates_frame['time'] =
|
pd.to_datetime(rates_frame['time'], unit='s')
|
pandas.to_datetime
|
#!/usr/bin/env python
# coding: utf-8
# # Loading data
import pandas as pd
import plotly.express as px
from tqdm import tqdm
import functools
import numpy as np
from difflib import SequenceMatcher
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
from datetime import datetime, timedelta
import pprint
import requests
import os
import getpass
import json
from queue import Queue
from threading import Thread
from time import time
import logging
import os
#cashing in case of multiple calls.
@functools.lru_cache(maxsize=128)
def get_tiles(municipalityId: int) -> pd.DataFrame:
"""Fetches tile information for a municipality id.
Args:
municipalityId: id of the municipality as defined in by the federal office of statistics,
https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.assetdetail.11467406.html
Return:
A dataframe containing the following columns:
[tileId, ll_lon, ll_lat, urL-lon, ur_lat]
tileID: corresponds to a unique ID as defined in the Swisscom FAQ page.
ll_lon: longitude coordinate of the lower left corner of the tile.
ll_lat: latitude coordinate of the lower left corner of the tile.
ur_lon: longitude coordinate of the upper right corner of the tile.
ur_lat: latitude coordinate of the upper right corner of the tile.
If municipalityId is invalid will print an error message and return an empty DataFrame
"""
api_request = (
BASE_URL
+ f'/grids/municipalities/{municipalityId}'
)
data = oauth.get(api_request, headers=headers).json()
if(data.get('status') == None):
tileID = [t['tileId'] for t in data['tiles']]
ll_lon = [t['ll']['x'] for t in data['tiles']]
ll_lat= [t['ll']['y'] for t in data['tiles']]
ur_lon = [t['ur']['x'] for t in data['tiles']]
ur_lat = [t['ur']['y'] for t in data['tiles']]
else:
print(f'get_tiles: failed with status code {data.get("status")}. {data.get("message")}')
return pd.DataFrame(data={'tileID': [], 'll_lat': [], 'll_lon': [], 'ur_lat': [], 'ur_lon': []})
return pd.DataFrame(data={'tileID': tileID, 'll_lat': ll_lat, 'll_lon': ll_lon, 'ur_lat': ur_lat, 'ur_lon': ur_lon})
def get_municipalityID(name: str) -> np.array(int):
"""Converts a municipality name to ID
Args:
name of municipality.
Returns:
An array containing all the municipality ID's corresponding to the name.
If the name invalid will return an empty array.
"""
return commune.loc[commune.GDENAME == name].GDENR.to_numpy()
def visualize_coordinates(df: pd.DataFrame, latitude: str, longitude: str) -> None :
"""Visualizes coordinates in dataframe on map
Retrieves columns with name latitude and logitude and visualizes it on a map.
Args:
df: A dataframe containing the coordinates.
latitude: String key of the column in the dataframe containing the latitude.
longitude: String key of the column in the dataframe containing the longitude.
"""
fig = px.scatter_mapbox(df, lat=latitude, lon=longitude,
color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10,
mapbox_style="carto-positron")
fig.show()
def get_all_tiles_switzerland() -> pd.DataFrame:
"""Fetches the tile information for all the tiles in Switzerland.
Returns:
A Dataframe containg the tile information for every tile in switzerland.
The format of the DataFrame is the same as the return of get_tiles()
"""
tiles = get_tiles(commune.GDENR.unique()[0])
for c in tqdm(commune.GDENR.unique().tolist()):
tiles = tiles.append(get_tiles(c))
return tiles
def get_daily_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches daily demographics
Fetches the daily demographics, age distribution, of the tiles.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
A dataframe containing as a key the tileID and as columns ageDistribution and the maleProportion
+----------+-----------------------+---------------------+
| | ageDistribution | maleProportion |
+----------+-----------------------+---------------------+
| 44554639 | NaN | 0.49828359484672546 |
+----------+-----------------------+---------------------+
| 44271906 | [0.21413850784301758, | 0.493218 |
| | 0.27691012620925903, | |
| | 0.37422287464141846, | |
| | 0.13472850620746613] | |
+----------+-----------------------+---------------------+
In the example above tile 44554639 does not have any age distribution data.
The data is k-anonymized. Therefor is some tiles are missing data it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score[t['tileId']] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return pd.DataFrame.from_dict(date2score).transpose()
def get_hourly_demographics_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches hourly demographics of age categories for 24 hours
Fetches the hourly demographics, age distribution, of the tiles.
Age categories are the following 0 - 19, 20 - 39, 40 - 64, >64
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the demographics. The name
of the collumns are:
[age_cat, age_distribution, male_proportion]
+----------+---------------------+---------+------------------+-----------------+
| | | age_cat | age_distribution | male_proportion |
+----------+---------------------+---------+------------------+-----------------+
| tileID | time | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44394309 | 2020-01-27T00:00:00 | NaN | NaN | 0.474876 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T01:00:00 | NaN | NaN | 0.483166 |
+----------+---------------------+---------+------------------+-----------------+
| | ... | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44290729 | 2020-01-27T06:00:00 | 0.0 | 0.192352 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 1.0 | 0.269984 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 2.0 | 0.363481 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 3.0 | 0.174183 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
def get_hourly_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches hourly male proportion and age categories for 24 hours
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
Returns a dictionary with as a key the tileID, and as a value an object that is as follows:
{tileID: {dateTime:{ "ageDistribution": [0-19, 20-39, 40-64, 64+], "maleProportion": value},
{dateTime2: ...}}}
26994514: {'2020-01-27T00:00:00': {'ageDistribution': [0.1925136297941208,
0.2758632302284241,
0.362215131521225,
0.16940800845623016],
'maleProportion': 0.4727686941623688},
'2020-01-27T01:00:00': {'ageDistribution': None,
'maleProportion': 0.4896690547466278},
'2020-01-27T02:00:00': {'ageDistribution': None,
'maleProportion': 0.48882684111595154},
The data is k-anonymized. Therefor is some values are None it means that no data was available
To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for dt in tqdm(dates, desc="get_hourly_demographics: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return date2score
data = get_hourly_demographics(tiles, day)
tile_id = []
time_data = []
age_distribution = []
age_cat = []
male_proportion = []
for i in data:
for time in data[i]:
if data[i][time].get("ageDistribution") != None:
for (idx,a) in enumerate(data[i][time].get("ageDistribution", [])):
age_cat.append(idx)
age_distribution.append(a)
tile_id.append(i)
time_data.append(time)
male_proportion.append(data[i][time].get("maleProportion"))
else:
tile_id.append(i)
time_data.append(time)
age_distribution.append(None)
male_proportion.append(data[i][time].get("maleProportion"))
age_cat.append(None)
return pd.DataFrame(data={'tileID': tile_id, "age_cat": age_cat, 'age_distribution':age_distribution, "male_proportion": male_proportion, 'time': time_data}).set_index(['tileID', 'time'])
def get_daily_density(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches the daily density of tiles.
Fetches the daily density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the tileID
+----------+-------+
| | score |
+----------+-------+
| tileID | |
+----------+-------+
| 44394309 | 1351 |
+----------+-------+
| 44394315 | 1103 |
+----------+-------+
| 44460297 | 875 |
+----------+-------+
| 44488589 | 1387 |
+----------+-------+
| 44498028 | 678 |
+----------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
tileID = []
score = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
if data.get("tiles") != None:
for t in data["tiles"]:
tileID.append(t['tileId'])
score.append(t["score"])
return pd.DataFrame(data={'tileID': tileID, 'score':score}).set_index("tileID")
def get_hourly_density_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches the hourly density of tiles for 24 hours.
Fetches the hourly density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the [tileID, time]
+----------+---------------------+-------+
| | | score |
+----------+---------------------+-------+
| tileID | time | |
+----------+---------------------+-------+
| 44394309 | 2020-01-27T00:00:00 | 52 |
| +---------------------+-------+
| | 2020-01-27T01:00:00 | 68 |
| +---------------------+-------+
| | 2020-01-27T02:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T03:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T04:00:00 | 69 |
+----------+---------------------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
def get_hourly_density(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
print("getHourlyDensity")
for dt in tqdm(dates, desc="get_hourly_density: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
for t in oauth.get(api_request, headers=headers).json().get("tiles",[]):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = t['score']
return date2score
tiles_data = []
time_data = []
score = []
data = get_hourly_density(tiles, day)
for t in data:
for time in data[t]:
time_data.append(time)
tiles_data.append(t)
score.append(data[t][time])
return pd.DataFrame(data={'tileID': tiles_data, 'score':score, 'time': time_data}).set_index(['tileID', 'time'])
def fetch_data_city(city: str) -> None:
"""Fetches the data for a city if the data is not yet cashed on the computer.
"""
compression = ".xz"
folder = os.path.join(".","data")
def file_path(file_name: str) -> str:
return os.path.join(folder, file_name)
if not(os.path.exists(folder)):
os.mkdir(folder)
tiles_path = file_path(f'{city}Tiles.pkl{compression}')
hourly_dem_path = file_path(f'{city}HourlyDemographics.pkl{compression}')
hourly_density_path = file_path(f'{city}HourlyDensity.pkl{compression}')
daily_density_path = file_path(f'{city}DensityDaily.pkl{compression}')
daily_demographics_path = file_path(f'{city}DemographicsDaily.pkl{compression}')
if not(os.path.isfile(tiles_path)):
tiles = get_tiles(get_municipalityID(city)[0])
tiles.to_pickle(tiles_path)
else:
tiles = pd.read_pickle(tiles_path)
if not(os.path.isfile(hourly_dem_path)):
hourly_dem = get_hourly_demographics_dataframe(tiles['tileID'].to_numpy())
hourly_dem.to_pickle(hourly_dem_path)
if not(os.path.isfile(hourly_density_path)):
hourly_dens = get_hourly_density_dataframe(tiles['tileID'].to_numpy())
hourly_dens.to_pickle(hourly_density_path)
if not(os.path.isfile(daily_density_path)):
get_daily_density(tiles['tileID'].to_numpy()).to_pickle(daily_density_path)
if not(os.path.isfile(daily_demographics_path)):
get_daily_demographics(tiles['tileID'].to_numpy()).to_pickle(daily_demographics_path)
def clean_cities_list(cities: [str]) -> [str]:
"""Cleans the list of cities by removing all the cities that are not found in the
official list of cities provided by the Federal Statisitics Office.
Args:
List of cities to check and clean.
Return:
List containing a subset of the input list such that all elements are valid.
"""
invalid_cities = []
#validation that the cities names are valid
for c in cities:
if len(commune.loc[commune.GDENAME == c].GDENR.to_numpy()) == 0:
city = []
sim_value = []
for f in commune.GDENAME:
r = SequenceMatcher(None, c, f).ratio()
if r > 0.5:
city.append(f)
sim_value.append(r)
d =
|
pd.DataFrame(data={"city": city, "value": sim_value})
|
pandas.DataFrame
|
import pickle
import pandas as pd
import requests
from bs4 import BeautifulSoup
import flask
from flask import Flask, request, render_template
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from konlpy.tag import Okt
from crawling_image.get_image import image_poster
from wordcloud_file.word_cloud import make_words_cloud
from pos_neg_graph.graph import percent_graph2
from flask import Flask, render_template
from flask_paginate import Pagination, get_page_args
t = Okt()
okt = Okt()
app = Flask(__name__)
# app.template_folder = ''
# users = list(range(100))
#
#
# def get_users(offset=0, per_page=10):
# return users[offset: offset + per_page]
# 메인 페이지 라우팅
@app.route("/")
@app.route("/index")
def index():
return flask.render_template('index.html')
# 캐시 삭제
@app.after_request
def set_response_headers(response):
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '0'
return response
# 데이터 예측 처리
@app.route('/predict', methods=['POST'])
# print("\n 테스트 정확도: %.4f" % (loaded_model.evaluate(X_test, y_test)[1]))
def make_prediction():
if request.method == 'POST':
url = request.form['url']
# image_url = url.split('basic')[0] + 'photoViewPopup' + url.split('basic')[1]
image_poster(url)
wordcloud_text = []
# url = 'https://movie.naver.com/movie/bi/mi/basic.nhn?code=196839'
review_list = []
label_list = []
good_label_list = []
bad_label_list = []
good_review_list = []
bad_review_list = []
good_score_list = []
bad_score_list = []
score_list = []
## == 페이지 크롤링 ==
# request 보냄.
naver_movie_page_url = url.split('basic')[0] + 'pointWriteFormList' + url.split('basic')[1] + '&page=1'
response = requests.get(naver_movie_page_url)
# HTML 텍스트 추출
html = response.text.strip()
# BeautifulSoup 객체 생성
soup = BeautifulSoup(markup=html, features='html5lib')
# 찾고자 하는 element의 selector
page_selector = 'div.score_total em'
# element 찾기
search_pages = soup.select(page_selector)
for link in search_pages:
if ',' in link.text:
a = link.text
a1 = a.split(',')[0]
a2 = a.split(',')[1]
total_pages = int(a1 + a2)
else:
total_pages = int(link.text)
pages = int(total_pages / 10)
if pages < 100:
final_pages = pages
else:
final_pages = 100
## == 리뷰 크롤링 ==
for page in range(1, final_pages + 1):
# URL
naver_movie_url = url.split('basic')[0] + 'pointWriteFormList' + url.split('basic')[1] + f'&page={page}'
# request 보냄.
response = requests.get(naver_movie_url)
# print(response)
# HTML 텍스트 추출
html = response.text.strip()
# BeautifulSoup 객체 생성
soup = BeautifulSoup(markup=html, features='html5lib')
# 찾고자 하는 element의 selector
## 리뷰 찾기
for num in range(0, 11):
review_selector = f'div.score_reple p span#_filtered_ment_{num}'
# _filtered_ment_0
# element 찾기
search_reviews = soup.select(review_selector)
for review in search_reviews:
review_list.append(review.text.strip())
review_list = list(filter(bool, review_list))
# review2 = pd.Series(review_list, name='review')
# movie_review = review2
# new_sentence = '엄청 재밌어요'
stopwords = ['의', '가', '이', '은', '들', '는', '좀', '잘', '걍', '과', '도',
'를', '으로', '자', '에', '와', '한', '하다']
max_len = 30
with open('./ml/tokenizer.pickle', 'rb') as f:
tokenizer = pickle.load(f)
# for review in movie_review:
# review_list.append(review)
# for new_sentence, review1 in zip(movie_review, review_list):
for new_sentence in review_list:
pos = new_sentence
new_sentence = okt.morphs(new_sentence, stem=True) # 토큰화
new_sentence = [word for word in new_sentence if not word in stopwords] # 불용어 제거
wordcloud_text.append(new_sentence)
encoded = tokenizer.texts_to_sequences([new_sentence]) # 정수 인코딩
pad_new = pad_sequences(encoded, maxlen=max_len) # 패딩
score = float(loaded_model.predict(pad_new)) # 예측
if (score > 0.5):
label = f'{int(score * 100)} %'
label_list.append(label)
good_label_list.append(label)
good_review_list.append(pos)
n = '긍정'
score_list.append(n)
good_score_list.append(n)
else:
label = f'{int((1 - score) * 100)} %'
label_list.append(label)
bad_label_list.append(label)
bad_review_list.append(pos)
n = '부정'
score_list.append(n)
bad_score_list.append(n)
result = zip(review_list, label_list, score_list)
result_len = len(review_list)
good_result = zip(good_label_list, good_review_list, good_score_list)
bad_result = zip(bad_label_list, bad_review_list, bad_score_list)
review = pd.Series(review_list, name='review')
label1 = pd.Series(label_list, name='label')
score1 = pd.Series(score_list, name='score')
final_result = pd.merge(review, label1, left_index=True, right_index=True)
final_result =
|
pd.merge(final_result, score1, left_index=True, right_index=True)
|
pandas.merge
|
import pandas as pd
import numpy as np
import os
import time
import calendar
import holidays
import logging
from tempset.logger import Logger
import matplotlib.pyplot as plt
class results(Logger):
def __init__(
self,
summary_file="../data/electric/summary.csv",
param_file="../data/electric/htgsetp_params_electric.csv",
case_study="I",
fig_dir="../figures/prob_dist",
fig_ext=".svg",
out_dir=None,
write_logfile=False
):
# start time for model run
self.out_dir = out_dir
self.start_time = time.time()
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
self.logfile = os.path.join(self.out_dir, f'tempset_logfile_{self.start_time}.log')
self.write_logfile = write_logfile
# initialize console handler for logger
self.console_handler()
if self.write_logfile:
self.file_handler()
logging.info('Starting Analysis of Results')
# inherit logger class attributes
super(results, self).__init__(self.write_logfile, self.logfile)
# get values from arguments
self.summary_file = summary_file
self.param_file = param_file
self.case_study = case_study
self.fig_dir = fig_dir
self.fig_ext = fig_ext
plt.rcParams.update({"font.size": 16})
self.year = 2017
if case_study == "I":
self.months_to_simulate = [
"January",
"February",
"March",
"April",
"November",
"December",
]
self.labels = ["Setback", "No setback"]
elif case_study == "IIA":
self.months_to_simulate = [
"January",
"February",
"March",
"April",
"November",
"December",
]
self.labels = ["$T_{op,heat} \\leq 21~C$", "$T_{op,heat} > 21~C$"]
elif case_study == "IIB":
self.months_to_simulate = [
"January",
"February",
"March",
"April",
"November",
"December",
]
self.labels = ["$t_{p} > 0$", "$t_{p} = 0$"]
elif case_study == "III":
self.months_to_simulate = [
"May", "June", "July", "August", "September"]
self.labels = ["Setback", "No setback"]
else:
print("Enter valid case study. Valid entries")
if not os.path.exists(self.fig_dir):
os.makedirs(self.fig_dir)
#create directory
if self.out_dir is not None and not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
self.df, self.df_month, self.df_param = self.read_csv()
self.prob_distributions()
def prob_distributions(self):
"""
method to compute and plot probability distributions
"""
def set_foldername(fig_dir, m):
"""
function to create directories by month, in which monthly probability plots will be saved
"""
month = calendar.month_name[m + 1]
path_name = fig_dir + "/" + str(month)
os.chdir(path=fig_dir)
if not os.path.exists(month):
os.makedirs(month)
print(os.getcwd())
os.chdir("..")
return path_name, str(month)
def plotter(
data,
labels,
bin_size,
xlabel,
fig_name,
ylim=None,
title=None,
package="matplotlib",
):
"""
function to plot probability distributions
:param labels: list of str -> corresponding to baseline and modified case (=self.labels)
:param data: list of dataframe (length = 2) -> corresponding to baseline and modified for a given month
:param bin_size: int -> bin size for histogram
:param xlabel: x axis label
:param fig_name: str
:param ylim: list -> ylim of y axis
:param title: str
:param package: str -> 'matplotlib', don't try anything else!
"""
if package == "matplotlib":
plt.rcParams.update({"font.size": 20})
plt.hist(
data[0],
density=True,
bins=bin_size,
alpha=0.5,
histtype="bar",
color="tab:blue",
stacked=True,
label=labels[0],
)
plt.axvline(
x=data[0].median(),
color="darkblue",
linestyle="--",
linewidth=1.5)
plt.hist(
data[1],
density=True,
bins=bin_size,
alpha=0.5,
histtype="bar",
color="tab:orange",
stacked=True,
label=labels[1],
)
plt.axvline(
x=data[1].median(),
color="darkred",
linestyle="--",
linewidth=1.5)
plt.xlabel(xlabel=xlabel)
plt.ylabel(ylabel="PDF")
if ylim is not None:
plt.ylim(ylim)
if title is not None:
plt.title(title)
plt.legend()
plt.tight_layout()
plt.savefig(fig_name)
plt.clf()
return None
# parse data
df_list_1, df_list_2 = self.parse_data()
df_out = [] #empty list to append df
# for each month, we can compute a probability distribution
for m, (df_1, df_2) in enumerate(zip(df_list_1, df_list_2)):
df_1, df_2 = remove_holidays(df=df_1), remove_holidays(
df=df_2
) # removes weekends
df_1, df_2 = remove_federal_holidays(
df=df_1), remove_federal_holidays(
df=df_2) # removes federal holidays
df_e1, df_p1, df_tc1 = (
(df_1["daily_elec_total"]),
(df_1["daily_elec_peak"]),
(df_1["daily_tc_mean"]),
)
df_e2, df_p2, df_tc2 = (
(df_2["daily_elec_total"]),
(df_2["daily_elec_peak"]),
(df_2["daily_tc_mean"]),
)
data_e, data_p, data_tc = [
df_e1, df_e2], [
df_p1, df_p2], [
df_tc1, df_tc2]
path_name, month = set_foldername(fig_dir=self.fig_dir, m=m)
if month in self.months_to_simulate:
"Step 03: Use Plotter"
fig_name = path_name + "/" + "elec_consumption" + self.fig_ext
plotter(
data=data_e,
labels=self.labels,
bin_size=75,
xlabel="$y_1$ (kWh)",
fig_name=fig_name,
)
fig_name = path_name + "/" + "daily_peak" + self.fig_ext
plotter(
data=data_p,
labels=self.labels,
bin_size=75,
xlabel="$y_2$ (kWh)",
fig_name=fig_name,
title=month,
ylim=[0, 0.40],
) # lim = [0, 0.40]
fig_name = path_name + "/" + "daily_tc" + self.fig_ext
plotter(
data=data_tc,
labels=self.labels,
bin_size=75,
xlabel="$y_3$(%)",
fig_name=fig_name,
title=month,
) # ylim = [0, 1.30]
logging.info(f"Created figures for month: {calendar.month_name[m+1]}")
##export to a csv
data = {
'Month': [calendar.month_name[m + 1]],
'Median y1 (modified)': [df_e1.median()],
'Median y1 (baseline)': [df_e2.median()],
'Median y2 (modified)': [df_p1.median()],
'Median y2 (baseline)': [df_p2.median()],
'Median y3 (modified)': [df_tc1.median()],
'Median y3 (baseline)': [df_tc2.median()],
'Delta y1': [(df_e1.median() - df_e2.median()) / (df_e2.median())],
'Delta y2': [(df_p1.median() - df_p2.median()) / (df_p2.median())],
'Delta y3': [df_tc1.median() - df_tc2.median()]
}
df_out.append(pd.DataFrame(data))
df = (pd.concat(df_out)).reset_index(drop=True)
if self.out_dir is not None:
df.to_csv(os.path.join(self.out_dir, 'case_study_{}_results.csv'.format(self.case_study)))
return None
def parse_data(self):
"""
method to call self.set_query, and then partition data by month
:return
"""
df_p1, df_p2 = self.set_query(df=self.df_param)
simid_1, simid_2 = df_p1["sim_id"].values, df_p2["sim_id"].values
list_1, list_2 = [], []
for df in self.df_month:
df_1, df_2 = df[df["sim_id"].isin(
simid_1)], df[df["sim_id"].isin(simid_2)]
list_1.append(df_1)
list_2.append(df_2)
return list_1, list_2
def set_query(self, df):
"""
method to identify two sets of data for proobability distribution based on self.case study
:return df_1: dataframe corresponding to Modified case
:return df_2: dataframe corresponding to Baseline case
"""
gamma = (df["T_max"] - df["T_min"]) / df["t_ramp"]
gamma[gamma == np.inf] = 0
if self.case_study == "I" or self.case_study == "III":
bool_1 = df["setback"]
bool_2 = df["setback"] == False
elif self.case_study == "IIA":
bool_1 = (df["setback"]) & (df["T_max"] <= 21)
bool_2 = (df["setback"]) & (df["T_max"] > 21)
elif self.case_study == "IIB":
bool_1 = df["t_p"].abs() > 0
bool_2 = df["t_p"].abs() <= 0
else:
print("Please Enter Valid Case Study. Valid Entries: I, IIA, IIB, III")
return None
df_1, df_2 = df[bool_1], df[bool_2]
return df_1, df_2
def read_csv(self):
"""
method to read summary files (from simulations) and parameter files
:return df: dataframe -> entire summary file (from e+ simulations)
:return df_month: list of dataframes -> grouped by month
:return df_param: dataframe -> parameter file created in batchprocess.py
"""
df =
|
pd.read_csv(self.summary_file)
|
pandas.read_csv
|
"""
main.py
-------
All the functionality is in this one file. They are for personal use, they are
largely undocumented! Use at your own risk!
"""
import os
from pathlib import Path
from typing import List, Tuple, Optional, Sequence, Any, Union, Generator
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import penguins as pg
from penguins import dataset as ds # for type annotations
# These are pure convenience routines for my personal use.
# Default save location for plots
dsl = Path("/Users/yongrenjie/Desktop/a_plot.png")
# Path to NMR spectra. The $nmrd environment variable should resolve to
# .../dphil/expn/nmr. On my Mac this is set to my SSD.
def __getenv(key):
if os.getenv(key) is not None:
x = Path(os.getenv(key))
if x.exists():
return x
raise FileNotFoundError("$nmrd does not point to a valid location.")
def nmrd():
return __getenv("nmrd")
# -- Seaborn plotting functions for SNR comparisons
def hsqc_stripplot(molecule: Any,
datasets: Union[ds.Dataset2D, Sequence[ds.Dataset2D]],
ref_dataset: ds.Dataset2D,
expt_labels: Union[str, Sequence[str]],
xlabel: str = "Experiment",
ylabel: str = "Intensity",
title: str = "",
edited: bool = False,
show_averages: bool = True,
ncol: int = 3,
loc: str = "upper center",
ax: Optional[Any] = None,
**kwargs: Any,
) -> Tuple[Any, Any]:
"""
Plot HSQC strip plots (i.e. plot relative intensities, split by
multiplicity).
Parameters
----------
molecule : pg.private.Andrographolide or pg.private.Zolmitriptan
The class from which the hsqc attribute will be taken from
datasets : pg.Dataset2D or sequence of pg.Dataset2D
Dataset(s) to analyse intensities of
ref_dataset : pg.Dataset2D
Reference dataset
expt_labels : str or sequence of strings
Labels for the analysed datasets
xlabel : str, optional
Axes x-label, defaults to "Experiment"
ylabel : str, optional
Axes y-label, defaults to "Intensity"
title : str, optional
Axes title, defaults to empty string
edited : bool, default False
Whether editing is enabled or not.
show_averages : bool, default True
Whether to indicate averages in each category using sns.pointplot.
ncol : int, optional
Passed to ax.legend(). Defaults to 4.
loc : str, optional
Passed to ax.legend(). Defaults to "upper center".
ax : matplotlib.axes.Axes, optional
Axes instance to plot on. If not provided, uses plt.gca().
kwargs : dict, optional
Keywords passed on to sns.stripplot().
Returns
-------
(fig, ax).
"""
# Stick dataset/label into a list if needed
if isinstance(datasets, ds.Dataset2D):
datasets = [datasets]
if isinstance(expt_labels, str):
expt_labels = [expt_labels]
# Calculate dataframes of relative intensities.
rel_ints_dfs = [molecule.hsqc.rel_ints_df(dataset=ds,
ref_dataset=ref_dataset,
label=label,
edited=edited)
for (ds, label) in zip(datasets, expt_labels)]
all_dfs = pd.concat(rel_ints_dfs)
# Calculate the average integrals by multiplicity
avgd_ints = pd.concat((df.groupby("mult").mean() for df in rel_ints_dfs),
axis=1)
avgd_ints.drop(columns=["f1", "f2"], inplace=True)
# Get currently active axis if none provided
if ax is None:
ax = plt.gca()
# Plot the intensities.
stripplot_alpha = 0.3 if show_averages else 0.8
sns.stripplot(x="expt", y="int", hue="mult",
zorder=0, alpha=stripplot_alpha,
dodge=True, data=all_dfs, ax=ax, **kwargs)
if show_averages:
sns.pointplot(x="expt", y="int", hue="mult", zorder=1,
dodge=0.5, data=all_dfs, ax=ax, join=False,
markers='_', palette="dark", ci=None, scale=1.25)
# Customise the plot
ax.set(xlabel=xlabel, ylabel=ylabel, title=title)
handles, _ = ax.get_legend_handles_labels()
l = ax.legend(ncol=ncol, loc=loc,
markerscale=0.4,
handles=handles[0:3],
labels=["CH", r"CH$_2$", r"CH$_3$"])
ax.axhline(y=1, color="grey", linewidth=0.5, linestyle="--")
# Set y-limits. We need to expand it by ~20% to make space for the legend,
# as well as the averaged values.
EXPANSION_FACTOR = 1.2
ymin, ymax = ax.get_ylim()
ymean = (ymin + ymax)/2
ylength = (ymax - ymin)/2
new_ymin = ymean - (EXPANSION_FACTOR * ylength)
new_ymax = ymean + (EXPANSION_FACTOR * ylength)
ax.set_ylim((new_ymin, new_ymax))
# add the text
for x, (_, expt_avgs) in enumerate(avgd_ints.items()):
for i, ((_, avg), color) in enumerate(zip(expt_avgs.items(),
sns.color_palette("deep"))):
ax.text(x=x-0.25+i*0.25, y=0.02, s=f"({avg:.2f})",
color=color, horizontalalignment="center",
transform=ax.get_xaxis_transform())
pg.style_axes(ax, "plot")
return plt.gcf(), ax
def cosy_stripplot(molecule: Any,
datasets: Union[ds.Dataset2D, Sequence[ds.Dataset2D]],
ref_dataset: ds.Dataset2D,
expt_labels: Union[str, Sequence[str]],
xlabel: str = "Experiment",
ylabel: str = "Intensity",
title: str = "",
ncol: int = 2,
separate_type: bool = True,
loc: str = "upper center",
ax: Optional[Any] = None,
**kwargs: Any,
) -> Tuple[Any, Any]:
"""
Plot COSY strip plots (i.e. plot relative intensities, split by peak type).
Parameters
----------
molecule : pg.private.Andrographolide or pg.private.Zolmitriptan
The class from which the cosy attribute will be taken from
datasets : pg.Dataset2D or sequence of pg.Dataset2D
Dataset(s) to analyse intensities of
ref_dataset : pg.Dataset2D
Reference dataset
expt_labels : str or sequence of strings
Labels for the analysed datasets
xlabel : str, optional
Axes x-label, defaults to "Experiment"
ylabel : str, optional
Axes y-label, defaults to "Intensity"
title : str, optional
Axes title, defaults to empty string
ncol : int, optional
Passed to ax.legend(). Defaults to 4.
loc : str, optional
Passed to ax.legend(). Defaults to "upper center".
ax : matplotlib.axes.Axes, optional
Axes instance to plot on. If not provided, uses plt.gca().
kwargs : dict, optional
Keywords passed on to sns.stripplot().
Returns
-------
(fig, ax).
"""
# Stick dataset/label into a list if needed
if isinstance(datasets, ds.Dataset2D):
datasets = [datasets]
if isinstance(expt_labels, str):
expt_labels = [expt_labels]
# Calculate dataframes of relative intensities.
rel_ints_dfs = [molecule.cosy.rel_ints_df(dataset=ds,
ref_dataset=ref_dataset,
label=label)
for (ds, label) in zip(datasets, expt_labels)]
if not separate_type:
rel_ints_dfs = [rel_int_df.assign(type="cosy")
for rel_int_df in rel_ints_dfs]
all_dfs = pd.concat(rel_ints_dfs)
# Calculate the average integrals by type
avgd_ints = pd.concat((df.groupby("type").mean() for df in rel_ints_dfs),
axis=1)
avgd_ints.drop(columns=["f1", "f2"], inplace=True)
# Get currently active axis if none provided
if ax is None:
ax = plt.gca()
# Plot the intensities.
sns.stripplot(x="expt", y="int", hue="type",
dodge=True, data=all_dfs, ax=ax,
palette=sns.color_palette("deep")[3:], **kwargs)
# Customise the plot
ax.set(xlabel=xlabel, ylabel=ylabel, title=title)
if separate_type:
ax.legend(ncol=ncol, loc=loc,
labels=["diagonal", "cross"]).set(title=None)
else:
ax.legend().set_visible(False)
ax.axhline(y=1, color="grey", linewidth=0.5, linestyle="--")
# Set y-limits. We need to expand it by ~20% to make space for the legend,
# as well as the averaged values.
EXPANSION_FACTOR = 1.2
ymin, ymax = ax.get_ylim()
ymean = (ymin + ymax)/2
ylength = (ymax - ymin)/2
new_ymin = ymean - (EXPANSION_FACTOR * ylength)
new_ymax = ymean + (EXPANSION_FACTOR * ylength)
ax.set_ylim((new_ymin, new_ymax))
# add the text
offset = -0.2 if separate_type else 0
dx = 0.4 if separate_type else 1
for x, (_, expt_avgs) in enumerate(avgd_ints.items()):
for i, ((_, avg), color) in enumerate(zip(
expt_avgs.items(), sns.color_palette("deep")[3:])):
ax.text(x=x-offset+i*dx, y=0.02, s=f"({avg:.2f})",
color=color, horizontalalignment="center",
transform=ax.get_xaxis_transform())
pg.style_axes(ax, "plot")
return plt.gcf(), ax
def hsqc_cosy_stripplot(molecule: Any,
datasets: Sequence[ds.Dataset2D],
ref_datasets: Sequence[ds.Dataset2D],
xlabel: str = "Experiment",
ylabel: str = "Intensity",
title: str = "",
edited: bool = False,
show_averages: bool = True,
separate_mult: bool = True,
ncol: int = 4,
loc: str = "upper center",
ax: Optional[Any] = None,
font_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> Tuple[Any, Any]:
"""
Plot HSQC and COSY relative intensities on the same Axes. HSQC peaks are
split by multiplicity, COSY peaks are not split.
Parameters
----------
molecule : pg.private.Andrographolide or pg.private.Zolmitriptan
The class from which the hsqc and cosy attributes will be taken from
datasets : (pg.Dataset2D, pg.Dataset2D)
HSQC and COSY dataset(s) to analyse intensities of
ref_datasets : (pg.Dataset2D, pg.Dataset2D)
Reference HSQC and COSY datasets
xlabel : str, optional
Axes x-label, defaults to "Experiment"
ylabel : str, optional
Axes y-label, defaults to "Intensity"
title : str, optional
Axes title, defaults to empty string
edited : bool, default False
Whether editing in the HSQC is enabled or not.
show_averages : bool, default True
Whether to indicate averages in each category using sns.pointplot.
ncol : int, optional
Passed to ax.legend(). Defaults to 4.
loc : str, optional
Passed to ax.legend(). Defaults to "upper center".
ax : matplotlib.axes.Axes, optional
Axes instance to plot on. If not provided, uses plt.gca().
kwargs : dict, optional
Keywords passed on to sns.stripplot().
Returns
-------
(fig, ax).
"""
# Set up default font_kwargs if not provided.
font_kwargs = font_kwargs or {}
# Calculate dataframes of relative intensities.
hsqc_rel_ints_df = molecule.hsqc.rel_ints_df(dataset=datasets[0],
ref_dataset=ref_datasets[0],
edited=edited)
# Rename mult -> type to match COSY
hsqc_rel_ints_df = hsqc_rel_ints_df.rename(columns={"mult": "type"})
# Remove multiplicity information if separation is not desired
if not separate_mult:
hsqc_rel_ints_df = hsqc_rel_ints_df.assign(type="hsqc")
cosy_rel_ints_df = molecule.cosy.rel_ints_df(dataset=datasets[1],
ref_dataset=ref_datasets[1])
cosy_rel_ints_df = cosy_rel_ints_df.assign(type="cosy")
rel_ints_df =
|
pd.concat((hsqc_rel_ints_df, cosy_rel_ints_df))
|
pandas.concat
|
import argparse
import sys
import os
import numpy as np
import pandas as pd
import glob
from collections import Counter
import pdb
parser = argparse.ArgumentParser(description = '''Calculate Neff.''')
parser.add_argument('--a3mdir', nargs=1, type= str, default=sys.stdin, help = 'Path to directory with merged a3m files.')
parser.add_argument('--pdbmeta', nargs=1, type= str, default=sys.stdin, help = 'Path to pdb ids and interacting chains.')
parser.add_argument('--mode', nargs=1, type= str, default=sys.stdin, help = 'Selection mode: top/bottom/all/random100.')
parser.add_argument('--outdir', nargs=1, type= str, default=sys.stdin, help = 'Path to output directory. Include /in end')
def read_a3m(infile):
'''Read a3m MSA'''
mapping = {'-': 21, 'A': 1, 'B': 21, 'C': 2, 'D': 3, 'E': 4, 'F': 5,
'G': 6,'H': 7, 'I': 8, 'K': 9, 'L': 10, 'M': 11,'N': 12,
'O': 21, 'P': 13,'Q': 14, 'R': 15, 'S': 16, 'T': 17,
'V': 18, 'W': 19, 'Y': 20,'U': 21, 'Z': 21, 'X': 21, 'J': 21}
parsed = []#Save extracted msa
with open(infile, 'r') as file:
for line in file:
line = line.rstrip()
if line.startswith('>'): #OX=OrganismIdentifier
continue
line = line.rstrip()
parsed.append([mapping.get(ch, 22) for ch in line if not ch.islower()])
return np.array(parsed, dtype=np.int8, order='F')
def calc_neff(msa, t):
'''Calculate Neff using cutoff t
Neff is essentially equal to the
number of non-redundant sequences (sequence identity<0.8) in the MSA normalized
by the query length
https://storage.googleapis.com/plos-corpus-prod/10.1371/journal.pcbi.1007411/2/pcbi.1007411.s001.pdf?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=wombat-sa%40plos-prod.iam.gserviceaccount.com%2F20210816%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20210816T070710Z&X-Goog-Expires=86400&X-Goog-SignedHeaders=host&X-Goog-Signature=a68350f11d44be91f3de5348ffd024e739a29b6531837c8a55778d8bb28e19340880187cc6a9479bd7cfd1742936bcd7288d768e07e9cc751181f736fde530a392daee6889fcca2d9d5e7acbdb78c47beb14c8d9b8f4a0befa72435d56be51ce149277552216a4d9f0eb02795ad888e74be8ccb9426ccbd0f18fd1e1aa4c59115c240467389694fe2f4ebecb9b1bdca63e3c3c9fe2f5877bc71e063f9af24c8deb5d2ffe1212463020f06f245cf851b954be8e39a003b23bafa56babc656a16c44beeeddc3cbb05a289a4c92eca13ba95fb2d4d64d5f2bacf68be73f7ede5bda044d30ae2b4c6999dc7faf6a6821ed0e977e80aab0ec691190ed14d8c51611cd
'''
l = msa.shape[1]
lt = int(l*t) #The number of positions that may differ in the cluster
norm = 1/np.sqrt(l)
#Cluster
n_clusters = 0
clustered_seqs = 0
remaining_msa = msa
#Go through all seqs and cluster
while remaining_msa.shape[0]>1:
msa_diff = remaining_msa-remaining_msa[0]
sim_counts = Counter(np.argwhere(msa_diff==0)[:,0]) #Count the similar positions
vals = np.array([*sim_counts.values()])
keys = np.array([*sim_counts.keys()])
#Get cluster
cluster = keys[np.argwhere(vals>=lt)] #The cluster contains all sequences with over lt similarity
n_clusters += 1
if cluster.shape[0]==0:
clustered_seqs +=1
remaining_msa = remaining_msa[1:,:]
else:
clustered_seqs += cluster.shape[0]
sel_inds = np.zeros(msa_diff.shape[0])
sel_inds[cluster[:,0]]=1
#Update remaining seqs
remaining_msa = remaining_msa[np.argwhere(sel_inds==0)[:,0]]
print(n_clusters, clustered_seqs)
#Calc Neff - add the last seq
if len(remaining_msa)>0:
n_clusters+=1
neff = norm*n_clusters
return neff
#################MAIN####################
#Parse args
args = parser.parse_args()
#Data
a3mdir = args.a3mdir[0]
pdbmeta = pd.read_csv(args.pdbmeta[0])
mode = args.mode[0]
outdir = args.outdir[0]
#Similarity cutoff
t=0.8
#Go through all pairs in pdbmeta
ids1 = []
ids2 = []
neffs = []
for i in range(len(pdbmeta)):
row = pdbmeta.loc[i]
id1 = row.PDB+'_'+row['Chain 1']
id2 = row.PDB+'_'+row['Chain 2']
try:
msa = read_a3m(a3mdir+id1+'_'+id2+'_'+mode+'.a3m')
except:
continue
neff = calc_neff(msa, t)
#Save
ids1.append(id1)
ids2.append(id2)
neffs.append(neff)
print(neff)
#Create df
results_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sourcetracker._sourcetracker import (intersect_and_sort_samples,
collapse_source_data,
subsample_dataframe,
validate_gibbs_input,
validate_gibbs_parameters,
collate_gibbs_results,
get_samples,
generate_environment_assignments,
cumulative_proportions,
single_sink_feature_table,
ConditionalProbability,
gibbs_sampler, gibbs)
from sourcetracker._plot import plot_heatmap
class TestValidateGibbsInput(TestCase):
def setUp(self):
self.index = ['s%s' % i for i in range(5)]
self.columns = ['f%s' % i for i in range(4)]
def test_no_errors_(self):
# A table where nothing is wrong, no changes expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
exp_sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs, sources)
# Sources and sinks.
sinks = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sinks = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_float_data(self):
# Data is float, expect rounding.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.zeros(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
# Sources and sinks.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 5
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
exp_sinks = \
pd.DataFrame(5 * np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_negative_data(self):
# Values less than 0, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) - 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
data = -1 * np.random.randint(0, 20, size=20).reshape(5, 4)
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(-10 * data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_nan_data(self):
# nans, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
data[3, 2] = np.nan
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
data[1, 3] = np.nan
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_non_numeric_data(self):
# data contains at least some non-numeric columns, expect errors.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sources.iloc[2, 2] = '3.a'
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks.iloc[2, 2] = '3'
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_columns_identical(self):
# Columns are identical, no error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, sources)
pd.util.testing.assert_frame_equal(obs_sinks, sinks)
def test_columns_non_identical(self):
# Columns are not identical, error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=['feature%s' % i for i in range(4)])
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
class TestValidateGibbsParams(TestCase):
def test_acceptable_inputs(self):
# All values acceptable, expect no errors.
alpha1 = .001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
alpha1 = alpha2 = beta = 0
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
def test_not_acceptable_inputs(self):
# One of the float params is negative.
alpha1 = -.001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is 0.
alpha1 = .001
restarts = 0
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is a float.
restarts = 1.34
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a string.
restarts = '3.2232'
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a nan.
restarts = 3
alpha1 = np.nan
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
class TestIntersectAndSortSamples(TestCase):
def test_partially_overlapping_tables(self):
# Test an example where there are unshared samples present in both
# feature and sample tables. Notice that order is different between
# the samples that are shared between both tables. The order of samples
# in the returned tables is set by the ordering done in np.intersect1d.
sdata_c1 = [3.1, 'red', 5]
sdata_c2 = [3.6, 'yellow', 7]
sdata_c3 = [3.9, 'yellow', -2]
sdata_c4 = [2.5, 'red', 5]
sdata_c5 = [6.7, 'blue', 10]
samples = ['s1', 's4', 's2', 's3', 'sX']
headers = ['pH', 'color', 'day']
stable = pd.DataFrame([sdata_c1, sdata_c4, sdata_c2, sdata_c3,
sdata_c5], index=samples, columns=headers)
fdata = np.arange(90).reshape(9, 10)
samples = ['s%i' % i for i in range(3, 12)]
columns = ['o%i' % i for i in range(1, 11)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = pd.DataFrame(fdata[[1, 0], :], index=['s4', 's3'],
columns=columns)
exp_stable = pd.DataFrame([sdata_c4, sdata_c3], index=['s4', 's3'],
columns=headers)
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
# No shared samples, expect a ValueError.
ftable.index = ['ss%i' % i for i in range(9)]
self.assertRaises(ValueError, intersect_and_sort_samples, stable,
ftable)
# All samples shared, expect no changes.
fdata = np.arange(50).reshape(5, 10)
samples = ['s1', 's4', 's2', 's3', 'sX']
columns = ['o%i' % i for i in range(10)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = ftable.loc[stable.index, :]
exp_stable = stable
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
class TestGetSamples(TestCase):
def tests(self):
# Make a dataframe which contains mixed data to test.
col0 = ['a', 'a', 'a', 'a', 'b']
col1 = [3, 2, 3, 1, 3]
col2 = ['red', 'red', 'blue', 255, 255]
headers = ['sample_location', 'num_reps', 'color']
samples = ['s1', 's2', 's3', 's4', 's5']
sample_metadata = \
pd.DataFrame.from_dict({k: v for k, v in zip(headers,
[col0, col1, col2])})
sample_metadata.index = samples
obs = get_samples(sample_metadata, 'sample_location', 'b')
exp = pd.Index(['s5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'sample_location', 'a')
exp = pd.Index(['s1', 's2', 's3', 's4'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'color', 255)
exp =
|
pd.Index(['s4', 's5'], dtype='object')
|
pandas.Index
|
import numpy as np
import pandas as pd
import math
import nltk
import itertools
import os
from collections import Counter
from collections import defaultdict
from collections import OrderedDict
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import RegexpTokenizer
import seaborn as sns
import matplotlib.pyplot as plt
'''Function needed to read all the books stored in separate .tsv files and combine them in a single dataframe that will be used in the search engine'''
## This function is used only once to create the dataframe that will be then stored in a csv file to ease the access
def create_dataframe():
df = pd.read_csv('./data/tsv/book_0.tsv', sep='\t', index_col=False)
books_to_skip = [192,2464,3011,3056,10141,11338,19728,20585,25426,25453,25902,26045,28476,29403]
for i in range(1,30000):
if os.path.getsize(f'./data/tsv/book_{i}.tsv') != 0 and i not in books_to_skip:
x =
|
pd.read_csv(f'./data/tsv/book_{i}.tsv', sep='\t', index_col=False)
|
pandas.read_csv
|
#!/usr/bin/env python3
# 29.03.21
# Assignment lab 03
# Master Class: Machine Learning (5MI2018)
# Faculty of Economic Science
# University of Neuchatel (Switzerland)
# Lab 3, see ML21_Exercise_3.pdf for more information
# https://github.com/RomainClaret/msc.ml.labs
# Authors:
# - <NAME> @RomainClaret
# - <NAME> @Nic0uds
# PART: DATA CLEANING AND PREPARATION
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
import pickle
# get the features names and the values of the categories from adult.names (build a dictionary)
data_dict = {}
with open('adult.names') as f:
for l in f:
if l[0] == '|' or ':' not in l: continue
c = l.split(':')
if c[1].startswith(' continuous'): data_dict[c[0]] = ""
else: data_dict[c[0]] = c[1].replace("\n","").replace(".","").replace(" ","").split(",")
# in the specifications (adult.names): Unknown values are replaced with the character '?'
header = list(data_dict.keys())+['income']
df_train = pd.read_table("adult.data", sep=r',\s', na_values='?', header=None, names=header).dropna()
df_evaluate = pd.read_table("adult.test", sep=r',\s', na_values='?', skiprows=[0], header=None, names=header).dropna()
# droping the education because it's redundant with education-num
# droping the occupation because it's not generic enough, we have much more categories that those captured in the training sample
# droping the relationship because it's not generic enough, we have much more categories that those captured in the training sample
drop_list = ["education", "occupation", "relationship"]
df_train = df_train.drop(columns=drop_list)
df_evaluate = df_evaluate.drop(columns=drop_list)
# reducing categories with multiple options into lower dimensions classification (into binary preferably) when possible
# - marital-status could be reduced as Married or Not-Married
# marital-status ['Never-married' 'Married-civ-spouse' 'Divorced' 'Married-spouse-absent' 'Separated' 'Married-AF-spouse' 'Widowed']
# - workclass could be recuded to 3 dimensions: Government, Private, and Self-Employment
# Note that we take into consideration all the options for the category from the specifications
# ['State-gov' 'Self-emp-not-inc' 'Private' 'Federal-gov' 'Local-gov' 'Self-emp-inc' 'Without-pay']
dict_replace = {
'marital-status' : {
'Never-married': 'Not-Married',
'Married-civ-spouse': 'Married',
'Divorced': 'Not-Married',
'Married-spouse-absent': 'Married',
'Separated': 'Married',
'Married-AF-spouse': 'Married',
'Widowed': 'Not-Married'
},
'workclass': {
'State-gov': 'Government',
'Self-emp-not-inc': 'Self-Employment',
'Federal-gov': 'Government',
'Local-gov': 'Government',
'Self-emp-inc': 'Self-Employment'
}
}
df_train.replace(dict_replace, inplace=True)
df_evaluate.replace(dict_replace, inplace=True)
# uniformizing the categories between the training and evaluation datasets
# indeed, there is a . at the end of the value in the evaluation dataset for the income category and not in the training dataset
df_evaluate["income"].replace({"<=50K.":"<=50K", ">50K.":">50K"}, inplace=True)
# for binary categories we will be using a label encoder
# - marital-status, sex, income
for l in ["marital-status", "sex", "income"]:
l_enc = LabelEncoder()
encoder_train = l_enc.fit(df_train[l])
encoder_evaluate = l_enc.fit(df_evaluate[l])
df_train["encoded_"+l] = encoder_train.transform(df_train[l])
df_evaluate["encoded_"+l] = encoder_evaluate.transform(df_evaluate[l])
# For non-binary categories, first we check the specifications of the dataset to validate all the options per category (we have data_dict)
# Indeed, the values in the categories are not always all present in a dataset
# race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
# native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
# and our custom category: workclass: Government, Private, and Self-Employment
# adding temporary fake data for the one hot encoder
fake_row = df_train[:1].copy()
df_fake =
|
pd.DataFrame(data=fake_row, columns=df_train.columns)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
from datetime import datetime
import numpy as np
import pandas as pd
from .. import library
LINE_PROTOCOL = "health,type={},period={},unit={} {}="
class Health(library.Library):
def _run(self):
files = self.dropbox_download("/Data/Health", self.input)
self.add_counter(library.CTR_SRC_FILES, library.CTR_ACT_SKIPPED, 0 if os.getenv('WRANGLE_REPROCESS_ALL_FILES') == "true" else \
sum([not status[1] for status in files.values()]))
new_data = os.getenv('WRANGLE_REPROCESS_ALL_FILES') == "true" or \
all([status[0] for status in files.values()]) and any([status[1] for status in files.values()])
if new_data:
sleep_yesterday_df = pd.DataFrame()
sleep_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 08:07:00 2017
@author: LAI
"""
####
import numpy as np
import pandas as pd
import math
import sklearn.preprocessing as prep
from sklearn.model_selection import KFold, ParameterGrid
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC
from sklearn.ensemble import ExtraTreesClassifier
from Visual import Visual as V
class LearningPipe():
"""
This is a wrapper class based on several sklearn models to perform feature
selection, grid search, cross validation and evaluation jobs. And provides
methods to visualize parameter tuning and decision boundaries.
"""
def __init__(self, clf, clf_name, Xs, y):
self.clf = clf
self.clf_name = clf_name
self.Xs = Xs
self.y = y
self.kf = KFold(n_splits=5)
self.spaces = [
'original', 'PCA', 'sparsePCA', 'factorAnalysis', 'NMF'
]
@staticmethod
def featureSelection(X_train, X_test, y, modelType='tree'):
""" a static method using either linear svm or extra tree to select
relevent features for later model fitting.
Inputs:
-------
X_train: The training feature matrix in one space
X_test: The testing feature matrix in one space
y: the label matrix
modelType: either 'tree' or 'lsvc', default as 'tree'
Outputs:
-------
The new feature matrix with selected features.
"""
if modelType == 'lsvc':
clf = LinearSVC(C=0.01, penalty="l1", dual=False)
if modelType == 'tree':
clf = ExtraTreesClassifier()
model = SelectFromModel(clf.fit(X_train, y), prefit=True)
return model.transform(X_train), model.transform(X_test)
@staticmethod
def polynomializeFeatures(X_train, X_test, n_degree=2):
""" a static method transforming the features into polynomal space
in order to catpure non-linear relationships in later learning process.
Inputs:
-------
X_train: The training feature matrix in one space
X_test: The testing feature matrix in one space
n_degree: The degree of polynomial spaces
Outputs:
-------
The transformed training and testing feature matrix.
"""
poly = prep.PolynomialFeatures(n_degree)
X_train_poly = poly.fit_transform(X_train)
X_test_poly = poly.fit_transform(X_test)
return X_train_poly, X_test_poly
def pseudoProba(self, dists):
""" Convert the distance from test data point to decision boundary to
pseudo probability using sigmoid function.
Inputs:
-------
dists: The distances from each test data point to decision boundary
in discriminative classifiers.
Outputs:
-------
The converted pseudo probability for each data point.
"""
sigmoid = lambda x: 1 / (1 + math.exp(-x))
n_samples = len(dists)
pseudoProba = np.empty((n_samples,2))
for i in range(n_samples):
pseudoProba[i] = [sigmoid(dists[i]), 1 - sigmoid(dists[i])]
return pseudoProba
def _getConfusionHist(self, fittedModel, X_test, y_test):
""" Calculate confusion histogram(histogram version of confusion matrix)
using predict probability of generative model or distance to decision
boundary of discriminative model.
Inputs:
-------
fittedModel: The classifier already fitted on training dataset
X_test: the feature matrix for testing
y_test: the label matrix for testing
Outputs:
-------
The confusion histogram of both positive and negative predictions.
"""
# fetch the prediction probabilities or pseudo probabilities of each
# data point from model.
if hasattr(fittedModel, "predict_proba"):
yHatProbs = fittedModel.predict_proba(X_test)
else:
yHatDists = fittedModel.decision_function(X_test)
yHatProbs = self.pseudoProba(yHatDists)
# calculate the confidence error of each data point. For example, if
# a prediction is <negative=0.7, positive=0.3> while the true label is
# positive, then the confidence error is -0.2 because probability for
# positive has to reach at least 0.5 to make correct prediction. Thus
# this prediction is a case of false negative.
confusionHist = []
for lbl in [0,1]: # 0 = negative, 1 = positive
mask = yHatProbs[:, lbl] >= 0.5
preds, labels = yHatProbs[mask], y_test[mask]
cfd_err = []
for i in range(len(preds)):
cfd_err.append((-1)**labels[i] * (0.5 - preds[i]))
binDensity,binEdge = np.histogram(
cfd_err, bins=10, range=(-0.5, 0.5), density=True
)
confusionHist.append([binDensity,binEdge])
return np.array(confusionHist)
def evaluate(self, param, X, y):
""" Evaluate the accuracies of the current classifier with given
parameter combination using 5-fold cross validation. Then calcu-
late the average accuracy, variance and expected loss based on
the results of cross validation.
Inputs:
-------
param: parameter combination for classifier
X: the features to fit
y: the labels to fit
Outputs:
-------
performance: the average accuracy, variance and expected loss
"""
confusionHists = []
scores = []
for train_idx, test_idx in self.kf.split(X):
model = self.clf(**param)
model.fit(X[train_idx], y[train_idx].ravel())
scores.append(
model.score(X[test_idx],
y[test_idx].ravel())
)
confusionHists.append(
self._getConfusionHist(model,
X[test_idx],
y[test_idx].ravel())
)
mean = np.mean(scores)
var = np.var(scores)
loss = (1 - mean)**2 + var
performance = (mean, var, loss)
return performance, np.mean(confusionHists, axis=0)
def gridSearching(self, params={}):
""" With the given values of parameters, this function will gene-
rate a parameter grid using sklearn and evaluate each of the para-
meter combination in each feature spaces and record the performances
in self.results as a DataFrame.
Inputs:
-------
params: possible values for each parameter
"""
paramsGrid = ParameterGrid(params)
results = []
print("number of combinations in each space: %d" %len(paramsGrid))
for space in self.spaces:
print("searching space: %s" %space)
for cnt, param in enumerate(paramsGrid):
print(cnt)
performance, confusionHist = self.evaluate(
param, self.Xs[space], self.y
)
mean, var, loss = performance
param.update({
'space':space, 'mean':mean,
'variance':var, 'expectedLoss':loss,
'confusionHist':confusionHist
})
results.append(param)
df = pd.DataFrame(results).sort_values('expectedLoss')
self.results = df.reset_index(drop=True)
self.bestPerfs = self._getBestPerfs()
def _getBestPerfs(self):
"""
output:
-------
A dataframe contains the best results in each feature space
"""
bestPerfs = []
resultGroups = self.results.groupby('space')
cols = [
'space', 'mean', 'variance', 'expectedLoss', 'confusionHist'
]
for space in self.spaces:
bestPerf = resultGroups\
.get_group(space)[cols]\
.to_dict('records')[0]
bestPerfs.append(bestPerf)
bestPerfs =
|
pd.DataFrame(bestPerfs)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import codecs
import lightgbm as lgb
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Read data
image_file_path = './simulated_dpc_data.csv'
with codecs.open(image_file_path, "r", "Shift-JIS", "ignore") as file:
dpc = pd.read_table(file, delimiter=",")
# dpc_r, g_dpc_r_1, g_r: restricted data from dpc
dpc_r=dpc.loc[:, ['ID','code']]
# g_dpc_r_1: made to check the details (: name of the code, ‘name’)
g_dpc_r_1=dpc.loc[:, ['ID','code','name']]
# Dummy Encoding with ‘name’
g_r = pd.get_dummies(dpc_r['code'])
# Reconstruct simulated data for AI learning
df_concat_dpc_get_dummies = pd.concat([dpc_r, g_r], axis=1)
# Remove features that may be the cause of the data leak
dpc_Remove_data_leak = df_concat_dpc_get_dummies.drop(["code",160094710,160094810,160094910,150285010,2113008,8842965,8843014,622224401,810000000,160060010], axis=1)
# Sum up the number of occurrences of each feature for each patient.
total_patient_features= dpc_Remove_data_leak.groupby("ID").sum()
total_patient_features.reset_index()
# Load a new file with ID and treatment availability
# Prepare training data
image_file_path_ID_and_polyp_pn = './simulated_patient_data.csv'
with codecs.open(image_file_path_ID_and_polyp_pn, "r", "Shift-JIS", "ignore") as file:
ID_and_polyp_pn = pd.read_table(file, delimiter=",")
ID_and_polyp_pn_data= ID_and_polyp_pn[['ID', 'target']]
#Combine the new file containing ID and treatment status with the file after dummy encoding by the ‘name’
ID_treatment_medical_statement=
|
pd.merge(ID_and_polyp_pn_data,total_patient_features,on=["ID"],how='outer')
|
pandas.merge
|
from data import Data
from stats import Stats
from significance import Significance
import itertools as it
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import os
root_folder = os.path.dirname(os.path.dirname( __file__ ))
results_folder = os.path.join(root_folder, 'results', 'descriptive_report')
if not os.path.exists(results_folder):
os.makedirs(results_folder)
def get_rejections(split, threshold, discardzeroes):
data = Data(split)
keys = list(data.dfs.keys())
columns = Data('all').dfs.get('all')
columns = columns.drop(['ttb_check', 'tdb_check', 'tob_check'], axis=1)
if split == 'purpose':
columns = columns.drop([key for key in columns.columns if 'cd' in key], axis=1)
columns = columns.columns
scores = {column : [] for column in columns}
df_pvalues = pd.DataFrame(index=columns)
for a,b in it.combinations(keys, 2):
df1 = data.dfs[a]
df2 = data.dfs[b]
if split == 'purpose':
df1 = df1.drop(['ttb_check', 'tdb_check', 'tob_check'], axis=1)
df2 = df2.drop(['ttb_check', 'tdb_check', 'tob_check'], axis=1)
df1 = df1.drop([key for key in df1.columns if 'cd' in key], axis=1)
df2 = df2.drop([key for key in df2.columns if 'cd' in key], axis=1)
else:
df1 = df1.drop(['ttb_check', 'tdb_check', 'tob_check'], axis=1)
df2 = df2.drop(['ttb_check', 'tdb_check', 'tob_check'], axis=1)
#Extra iteration to take only blueprints with nodes
if split == 'professionality':
df1 = df1[df1['nn_count'] != 0]
df2 = df2[df2['nn_count'] != 0]
sig_analysis = Significance(df1, df2, discardzeroes)
try:
stat_values = sig_analysis.sig['corr_p_values']
df_pvalues =
|
pd.concat([df_pvalues, stat_values], axis=1)
|
pandas.concat
|
import os
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, cross_validate
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.linear_model import Lasso, LassoCV, LogisticRegressionCV, LogisticRegression
from sklearn.linear_model import ElasticNet, ElasticNetCV, enet_path
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import auc, roc_curve
from utils import kernel
from mics import classifier_mics
'''
函数名尽量保持了和scikit-learn相同的函数名,便于理解函数的作用
没有写留一法实现的函数,如果要用留一法直接在K折交叉验证参数中将折数设置为样本个数即实现了留一法(scikit-learn官方文件推荐)
不推荐在网格搜索法中使用留一法,当待选参数较多时会让模型开销极大
'''
class lasso():
'''LASSO特征选择的方法集锦,直接在class中选择是否进行交叉验证
输入:
X_train, X_test, y_train, y_test: 训练集和测试集的特征与标签
feature_name: 特征名称,顺序和X的列必须对应
path: 记录文件的存储路径,自行定义
cv_val:布尔型,是否进行网格搜索交叉验证
'''
def __init__(self, X_train, X_test, y_train, y_test, feature_name, path, cv_val=True):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.name = feature_name
self.cv_val = cv_val
self.path = path
def lasso(self, alpha, cv):
'''使用LASSO进行特征选择,只进行一次,选择特征系数不为0的特征作为结果
得到的结果包括特征选择后的训练集和测试集特征,同时还有特征名和权重,每个特征名有一个权重值,顺序是对应的
输入:
alpha: 参数alpha
cv: int, 如果进行交叉验证,cv的折数
输出:
best_alpha(只有使用交叉验证时才有): 最优lasso惩罚参数
new_train_feature: 选择的训练集特征矩阵
new_test_feature: 选择后的测试集特征矩阵
new_feature_name: 选择后的特征名称
feature_weight: 选择后特征对应的系数
'''
if self.cv_val is True:
model_lasso = LassoCV(alphas=alpha, cv=cv)
model_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(model_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lassoCV')
os.makedirs(img_path, exist_ok=True)
# 交叉验证得到的最佳lasso惩罚参数
best_alpha = model_lasso.alpha_
print('-----------------------------')
print('Best LASSO alpha:')
print(best_alpha)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(model_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight
else:
model_lasso = Lasso(alpha=alpha)
model_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(model_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lasso_only')
os.makedirs(img_path, exist_ok=True)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(model_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return new_train_feature, new_test_feature, new_feature_name, feature_weight
def lasso_shuffle(self, shuffle_time, alpha_range, cv=10):
'''通过多次循环,每次循环都将数据集进行打乱,最后统计每个特征出现的次数
输入:
shuffle_time: 进行shuffle循环的次数
alpha_range: alpha的值,如果不进行网格搜索为int,如果进行网格搜索为list
cv: 如果进行交叉验证的话,折数
输出:
new_train_feature: 特征选择后的训练集特征(其实这个和下面的特征矩阵不重要,最后还是要用索引重新对原始特征矩阵进行抽取)
new_test_feature: 特征选择后的测试集特征
select_feature_name: 选择出来的特征名
select_feature_name_freq: 对应特征名,每个特征在多次shuffle循环中出现的次数
feature_weight: 对应特征名,每个特征的系数
select_feature_index: 对应特征名,每个特征在原始特征矩阵中的索引,可以在特征选择完成后直接进行矩阵特征的抽取
'''
# 将返回的值存入txt文件中
lasso_txt = open(os.path.join(self.path, 'lasso_shuffle.txt'), 'w')
lasso_txt.write('LASSO parameters set:\n')
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('Grid search: % s' % self.cv_val)
lasso_txt.write('\nAlpha range: % s' % alpha_range)
lasso_txt.write('\nShuffle time: % s' % shuffle_time)
lasso_txt.write('\nGrid search cv-fold: % s' % cv)
lasso_txt.write('\n---------------------------------------------\n')
if self.cv_val is True:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 初始化最佳参数alpha
alpha_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
kfold = StratifiedKFold(n_splits=cv, shuffle=False)
model_lasso = LassoCV(alphas=alpha_range, cv=cv)
model_lasso.fit(X, y)
coef = pd.Series(model_lasso.coef_)
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 交叉验证得到的最佳lasso惩罚参数
alpha = model_lasso.alpha_
alpha_list.append(alpha)
print('best alpha value is % s' % alpha)
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 每次的alpha都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
alpha_freq = dict(zip(*np.unique(alpha_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 按照alpha出现的频率,从大到小进行排序,分别存储alpha的大小和出现次数
alpha_value = []
alpha_value_freq = []
for k in sorted(alpha_freq, key=alpha_freq.__getitem__, reverse=True):
# alpha值相对应的顺序,将每个alpha值出现的次数存在alpha_value_freq中
alpha_value_freq.append(alpha_freq[k])
# 将alpha的值存在alpha_value中,list形式
alpha_value.append(k)
print('alpha value % s appeared % s times in the loop' % (k, alpha_freq[k]))
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
else:
# 初始化权重为0,初始化特征列表为空
coef_sum = 0
select_list = []
# 开始shuffle循环,每次都存储选择后的特征名
for i in range(shuffle_time):
# 将数据进行shuffle
X, y = shuffle(self.X_train, self.y_train)
model_lasso = Lasso(alpha=alpha_range)
model_lasso.fit(X, y)
coef = pd.Series(model_lasso.coef_)
print("% s th shuffle, Lasso picked " % i + str(
sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
# 将每一次循环的coef都进行相加
coef_sum += model_lasso.coef_
# 提取非零特征的mask
model = SelectFromModel(model_lasso, prefit=True)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
# 根据mask将保留特征的名字存储到select_list
for bool, name in zip(mask, self.name):
if bool:
select_list.append(name)
# 求全部特征的coef平均值
coef_mean = coef_sum / shuffle_time
# 每次的特征都存储在select_list中,然后统计每个特征出现的次数,存在feature_freq中,dict形式
feature_freq = dict(zip(*np.unique(select_list, return_counts=True)))
# 按照特征出现的频率,从大到小进行排序,分别存储特征名和出现次数
select_feature_name = []
select_feature_name_freq = []
for k in sorted(feature_freq, key=feature_freq.__getitem__, reverse=True):
# 特征名相对应的顺序,将每个特征出现的次数存在select_feature_name_freq中
select_feature_name_freq.append(feature_freq[k])
# 将特征名存在select_feature_name中,list形式
select_feature_name.append(k)
# 获取lasso后特征的索引
select_feature_index = []
# 将lasso后特征的名字转为list
name_list = list(select_feature_name)
# 将原始所有特征的名字转为list
all_name_list = list(self.name)
# 获取特征选择后特征在原始特征list中的索引位置,将所有索引位置存在select_feature_index中
for i in range(len(select_feature_name)):
index = all_name_list.index(name_list[i])
select_feature_index.append(index)
# 通过索引将选择后的特征矩阵赋值给select_feature
new_train_feature = self.X_train[:, select_feature_index]
new_test_feature = self.X_test[:, select_feature_index]
feature_weight = coef_mean[select_feature_index]
# 将输出值存入txt文件
lasso_txt.write('\nSelected feature index:\n')
lasso_txt.write(str(select_feature_index))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature weight: \n')
lasso_txt.write(str(feature_weight))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature name:\n')
lasso_txt.write(str(select_feature_name))
lasso_txt.write('\n---------------------------------------------\n')
lasso_txt.write('\nSelected feature appearance frequency:\n')
lasso_txt.write(str(select_feature_name_freq))
lasso_txt.write('\n---------------------------------------------\n')
return new_train_feature, new_test_feature, select_feature_name, \
select_feature_name_freq, feature_weight, select_feature_index
def logis_lasso(self, alpha, cv):
'''使用logistic LASSO进行特征选择,可以选择是否使用交叉验证选择惩罚参数alpha
得到的结果包括特征选择后的训练集和测试集特征,同时还有特征名和权重,每个特征名有一个权重值,顺序是对应的
输入:
alpha: 惩罚参数,这里因为是LASSO所以就相当于是alpha
cv:如果进行交叉验证,次数
输出:
best alpha(只有使用交叉验证时才有): 最优lasso惩罚参数
new_train_feature: 训练集特征选择后的特征矩阵
new_train_feature: 测试集特征选择后的特征矩阵
new_feature_name: 特征选择后的特征名称
feature_weight: 选择后每个特征对应的权重
'''
if self.cv_val is True:
logis_lasso = LogisticRegressionCV(Cs=alpha, cv=cv, penalty='l1')
logis_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(np.ravel(logis_lasso.coef_))
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lassoCV')
os.makedirs(img_path, exist_ok=True)
# 交叉验证得到的最佳lasso惩罚参数
best_alpha = logis_lasso.Cs_
print('-----------------------------')
print('Best LASSO alpha:')
print(best_alpha)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(logis_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight = pd.Series(feature_weight)
return best_alpha, new_train_feature, new_test_feature, new_feature_name, feature_weight
else:
logis_lasso = LogisticRegression(C=alpha, penalty='l1')
logis_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(logis_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lasso_only')
os.makedirs(img_path, exist_ok=True)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(logis_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature = pd.DataFrame(data=X_new_train, columns=new_feature_name)
new_test_feature = pd.DataFrame(data=X_new_test, columns=new_feature_name)
feature_weight =
|
pd.Series(feature_weight)
|
pandas.Series
|
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(
|
Week(weekday=1)
|
pandas.core.datetools.Week
|
from os import path
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from pathlib import Path
import ptitprince as pt
# ----------
# Loss Plots
# ----------
def save_loss_plot(path, loss_function, v_path=None, show=True):
df = pd.read_csv(path)
if v_path is not None:
vdf =
|
pd.read_csv(v_path)
|
pandas.read_csv
|
# Copyright (c) 2019. yoshida-lab. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections.abc import Iterable
from copy import copy
import itertools
from multiprocessing import cpu_count
from typing import DefaultDict, List, Sequence, Union, Set
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
from pymatgen.core.composition import Composition as PMGComp
from sklearn.base import TransformerMixin, BaseEstimator
from xenonpy.datatools.preset import preset
from xenonpy.utils import TimedMetaClass, Switch
class BaseFeaturizer(BaseEstimator, TransformerMixin, metaclass=ABCMeta):
"""
Abstract class to calculate features from :class:`pandas.Series` input data.
Each entry can be any format such a compound formula or a pymatgen crystal structure
dependent on the featurizer implementation.
This class have similar structure with `matminer BaseFeaturizer`_ but follow more strict convention.
That means you can embed this feature directly into `matminer BaseFeaturizer`_ class implement.::
class MatFeature(BaseFeaturizer):
def featurize(self, *x):
return <xenonpy_featurizer>.featurize(*x)
.. _matminer BaseFeaturizer: https://github.com/hackingmaterials/matminer/blob/master/matminer/featurizers/base_smc.py
**Using a BaseFeaturizer Class**
:meth:`BaseFeaturizer` implement :class:`sklearn.base.BaseEstimator` and :class:`sklearn.base.TransformerMixin`
that means you can use it in a scikit-learn way.::
featurizer = SomeFeaturizer()
features = featurizer.fit_transform(X)
You can also employ the featurizer as part of a ScikitLearn Pipeline object.
You would then provide your input data as an array to the Pipeline, which would
output the featurers as an :class:`pandas.DataFrame`.
:class:`BaseFeaturizer` also provide you to retrieving proper references for a featurizer.
The ``__citations__`` returns a list of papers that should be cited.
The ``__authors__`` returns a list of people who wrote the featurizer.
Also can be accessed from property ``citations`` and ``citations``.
**Implementing a New BaseFeaturizer Class**
These operations must be implemented for each new featurizer:
- ``featurize`` - Takes a single material as input, returns the features of that material.
- ``feature_labels`` - Generates a human-meaningful name for each of the features. **Implement this as property**.
Also suggest to implement these two **properties**:
- ``citations`` - Returns a list of citations in BibTeX format.
- ``implementors`` - Returns a list of people who contributed writing a paper.
All options of the featurizer must be set by the ``__init__`` function. All
options must be listed as keyword arguments with default values, and the
value must be saved as a class attribute with the same name or as a property
(e.g., argument `n` should be stored in `self.n`).
These requirements are necessary for
compatibility with the ``get_params`` and ``set_params`` methods of ``BaseEstimator``,
which enable easy interoperability with scikit-learn.
:meth:`featurize` must return a list of features in :class:`numpy.ndarray`.
.. note::
None of these operations should change the state of the featurizer. I.e.,
running each method twice should no produce different results, no class
attributes should be changed, unning one operation should not affect the
output of another.
"""
__authors__ = ['anonymous']
__citations__ = ['No citations']
def __init__(
self,
n_jobs: int = -1,
*,
on_errors: str = 'raise',
return_type: str = 'any',
parallel_verbose: int = 0,
):
"""
Parameters
----------
n_jobs
The number of jobs to run in parallel for both fit and predict. Set -1 to use all cpu cores (default).
Inputs ``X`` will be split into some blocks then run on each cpu cores.
When set to 0, input X will be treated as a block and pass to ``Featurizer.featurize`` directly.
on_errors
How to handle the exceptions in a feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
return_type
Specific the return type.
Can be ``any``, ``array`` and ``df``.
``array`` and ``df`` force return type to ``np.ndarray`` and ``pd.DataFrame`` respectively.
If ``any``, the return type dependent on the input type.
Default is ``any``
parallel_verbose
The verbosity level: if non zero, progress messages are printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported. Default ``0``.
"""
self.return_type = return_type
self.n_jobs = n_jobs
self.on_errors = on_errors
self.parallel_verbose = parallel_verbose
self._kwargs = {}
@property
def return_type(self):
return self._return_type
@return_type.setter
def return_type(self, val):
if val not in {'any', 'array', 'df'}:
raise ValueError('`return_type` must be `any`, `array` or `df`')
self._return_type = val
@property
def on_errors(self):
return self._on_errors
@on_errors.setter
def on_errors(self, val):
if val not in {'nan', 'keep', 'raise'}:
raise ValueError('`on_errors` must be `nan`, `keep` or `raise`')
self._on_errors = val
@property
def parallel_verbose(self):
return self._parallel_verbose
@parallel_verbose.setter
def parallel_verbose(self, val):
if not isinstance(val, int):
raise ValueError('`parallel_verbose` must be int')
self._parallel_verbose = val
@property
def n_jobs(self):
return self._n_jobs
@n_jobs.setter
def n_jobs(self, n_jobs):
"""Set the number of threads for this """
if n_jobs < -1:
n_jobs = -1
if n_jobs > cpu_count() or n_jobs == -1:
self._n_jobs = cpu_count()
else:
self._n_jobs = n_jobs
def fit(self, X, y=None, **fit_kwargs):
"""Update the parameters of this featurizer based on available data
Args:
X - [list of tuples], training data
Returns:
self
"""
return self
# todo: Dose fit_transform need to pass paras to transform?
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X, **fit_params)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X, **fit_params)
def transform(self, entries: Sequence, *, return_type=None, **kwargs):
"""
Featurize a list of entries.
If `featurize` takes multiple inputs, supply inputs as a list of tuples.
Args
----
entries: list-like
A list of entries to be featurized.
return_type: str
Specific the return type.
Can be ``any``, ``array`` and ``df``.
``array`` and ``df`` force return type to ``np.ndarray`` and ``pd.DataFrame`` respectively.
If ``any``, the return type depend on the input type.
This is a temporary change that only have effect in the current transform.
Default is ``None`` for no changes.
Returns
-------
DataFrame
features for each entry.
"""
self._kwargs = kwargs
# Check inputs
if not isinstance(entries, Iterable):
raise TypeError('parameter "entries" must be a iterable object')
# Special case: Empty list
if len(entries) is 0:
return []
for c in Switch(self._n_jobs):
if c(0):
# Run the actual featurization
ret = self.featurize(entries, **kwargs)
break
if c(1):
ret = [self._wrapper(x) for x in entries]
break
if c():
ret = Parallel(n_jobs=self._n_jobs,
verbose=self._parallel_verbose)(delayed(self._wrapper)(x) for x in entries)
try:
labels = self.feature_labels
except NotImplementedError:
labels = None
if return_type is None:
return_type = self.return_type
if return_type == 'any':
if isinstance(entries, (pd.Series, pd.DataFrame)):
tmp = pd.DataFrame(ret, index=entries.index, columns=labels)
return tmp
if isinstance(entries, np.ndarray):
return np.array(ret)
return ret
if return_type == 'array':
return np.array(ret)
if return_type == 'df':
if isinstance(entries, (pd.Series, pd.DataFrame)):
return pd.DataFrame(ret, index=entries.index, columns=labels)
return pd.DataFrame(ret, columns=labels)
def _wrapper(self, x):
"""
An exception wrapper for featurize, used in featurize_many and
featurize_dataframe. featurize_wrapper changes the behavior of featurize
when ignore_errors is True in featurize_many/dataframe.
Args:
x: input data to featurize (type depends on featurizer).
Returns:
(list) one or more features.
"""
try:
# Successful featurization returns nan for an error.
if not isinstance(x, (tuple, list, np.ndarray)):
return self.featurize(x, **self._kwargs)
return self.featurize(*x, **self._kwargs)
except Exception as e:
if self._on_errors == 'nan':
return [np.nan] * len(self.feature_labels)
elif self._on_errors == 'keep':
return [e] * len(self.feature_labels)
else:
raise e
@abstractmethod
def featurize(self, *x, **kwargs):
"""
Main featurizer function, which has to be implemented
in any derived featurizer subclass.
Args
====
x: depends on featurizer
input data to featurize.
Returns
=======
any: numpy.ndarray
one or more features.
"""
@property
@abstractmethod
def feature_labels(self):
"""
Generate attribute names.
Returns:
([str]) attribute labels.
"""
@property
def citations(self):
"""
Citation(s) and reference(s) for this feature.
Returns:
(list) each element should be a string citation,
ideally in BibTeX format.
"""
return '\n'.join(self.__citations__)
@property
def authors(self):
"""
List of implementors of the feature.
Returns:
(list) each element should either be a string with author name (e.g.,
"<NAME>") or a dictionary with required key "name" and other
keys like "email" or "institution" (e.g., {"name": "<NAME>", "email": "<EMAIL>", "institution": "LBNL"}).
"""
return '\n'.join(self.__authors__)
class BaseDescriptor(BaseEstimator, TransformerMixin, metaclass=TimedMetaClass):
"""
Abstract class to organize featurizers.
Examples
--------
.. code::
class MyDescriptor(BaseDescriptor):
def __init__(self, n_jobs=-1):
self.descriptor = SomeFeature1(n_jobs)
self.descriptor = SomeFeature2(n_jobs)
self.descriptor = SomeFeature3(n_jobs)
self.descriptor = SomeFeature4(n_jobs)
"""
def __init__(self, *, featurizers: Union[List[str], str] = 'all', on_errors: str = 'raise'):
"""
Parameters
----------
featurizers
Specify which Featurizer(s) will be used.
Default is 'all'.
on_errors
How to handle the exceptions in a feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
"""
self.__featurizers__: Set[str] = set() # featurizers' names
self.__featurizer_sets__: DefaultDict[str, List[BaseFeaturizer]] = defaultdict(list)
self.featurizers = featurizers
self.on_errors = on_errors
@property
def on_errors(self):
return self._on_errors
@on_errors.setter
def on_errors(self, val):
if val not in {'nan', 'keep', 'raise'}:
raise ValueError('`on_errors` must be `nan`, `keep` or `raise`')
self._on_errors = val
for fea_set in self.__featurizer_sets__.values():
for fea in fea_set:
fea.on_errors = val
@property
def featurizers(self):
return self._featurizers
@featurizers.setter
def featurizers(self, val):
if isinstance(val, str):
if val != 'all':
self._featurizers = (val,)
else:
self._featurizers = val
elif isinstance(val, (tuple, List)):
self._featurizers = tuple(val)
else:
raise ValueError('parameter `featurizers` must be `all`, name of featurizer, or list of name of featurizer')
@property
def elapsed(self):
return self._timer.elapsed
def __setattr__(self, key, value):
if key == '__featurizer_sets__':
if not isinstance(value, defaultdict):
raise RuntimeError('Can not set "self.__featurizer_sets__" by yourself')
super().__setattr__(key, value)
if isinstance(value, BaseFeaturizer):
if value.__class__.__name__ in self.__featurizers__:
raise RuntimeError('Duplicated featurizer <%s>' % value.__class__.__name__)
self.__featurizer_sets__[key].append(value)
self.__featurizers__.add(value.__class__.__name__)
else:
super().__setattr__(key, value)
def __repr__(self):
return self.__class__.__name__ + ':\n' + \
'\n'.join(
[' |- %s:\n | |- %s' % (k, '\n | |- '.join(map(lambda s: s.__class__.__name__, v))) for k, v in
self.__featurizer_sets__.items()])
def _check_input(self, X, y=None, **kwargs):
def _reformat(x):
if x is None:
return x
keys = list(self.__featurizer_sets__.keys())
if len(keys) == 1:
if isinstance(x, list):
return pd.DataFrame(pd.Series(x), columns=keys)
if isinstance(x, np.ndarray):
if len(x.shape) == 1:
return pd.DataFrame(x, columns=keys)
if isinstance(x, pd.Series):
return pd.DataFrame(x.values, columns=keys, index=x.index)
if isinstance(x, pd.Series):
x = pd.DataFrame(x)
if isinstance(x, pd.DataFrame):
tmp = set(x.columns) | set(kwargs.keys())
if set(keys).isdisjoint(tmp):
raise KeyError('name of columns do not match any feature set')
return x
raise TypeError('you can not ues a array-like input'
'because there are multiply feature sets or the dim of input is not 1')
return _reformat(X), _reformat(y)
def _rename(self, **fit_params):
for k, v in fit_params.items():
if k in self.__featurizer_sets__:
self.__featurizer_sets__[v] = self.__featurizer_sets__.pop(k)
@property
def all_featurizers(self):
return list(self.__featurizers__)
def fit(self, X, y=None, **kwargs):
if not isinstance(X, Iterable):
raise TypeError('parameter "entries" must be a iterable object')
self._rename(**kwargs)
X, y = self._check_input(X, y)
for k, features in self.__featurizer_sets__.items():
if k in X:
for f in features:
if self._featurizers != 'all' and f.__class__.__name__ not in self._featurizers:
continue
if y is not None and k in y:
f.fit(X[k], y[k], **kwargs)
else:
f.fit(X[k], **kwargs)
return self
def transform(self, X, **kwargs):
if not isinstance(X, Iterable):
raise TypeError('parameter "entries" must be a iterable object')
if len(X) is 0:
return None
if 'return_type' in kwargs:
del kwargs['return_type']
results = []
X, _ = self._check_input(X, **kwargs)
for k, features in self.__featurizer_sets__.items():
if k in kwargs:
k = kwargs[k]
if k in X:
for f in features:
if self._featurizers != 'all' and f.__class__.__name__ not in self._featurizers:
continue
ret = f.transform(X[k], return_type='df', **kwargs)
results.append(ret)
return
|
pd.concat(results, axis=1)
|
pandas.concat
|
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.signal import argrelextrema
from statsmodels.nonparametric.kernel_regression import KernelReg
def find_extrema(s, bw='cv_ls'):
"""
Input:
s: prices as pd.series
bw: bandwith as str or array like
Returns:
prices: with 0-based index as pd.series
extrema: extrema of prices as pd.series
smoothed_prices: smoothed prices using kernel regression as pd.series
smoothed_extrema: extrema of smoothed_prices as pd.series
"""
# Copy series so we can replace index and perform non-parametric
# kernel regression.
prices = s.copy()
prices = prices.reset_index()
prices.columns = ['date', 'price']
prices = prices['price']
kr = KernelReg(
[prices.values],
[prices.index.to_numpy()],
var_type='c', bw=bw
)
f = kr.fit([prices.index])
# Use smoothed prices to determine local minima and maxima
smooth_prices =
|
pd.Series(data=f[0], index=prices.index)
|
pandas.Series
|
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=
|
pd.Timedelta('0 Seconds')
|
pandas.Timedelta
|
import pandas as pd
from ast import literal_eval
def col_to_dtg(df, label):
df[label] =
|
pd.to_datetime(df[label])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None) # 모든 열을 출력하도록
target_feature = 'AGE_GROUP'
# fill or drop na value
def checkNA(data):
# drop unused data
data = data.drop(
['HCHK_YEAR', 'SIDO', 'TOT_CHOLE', 'TRIGLYCERIDE', 'HDL_CHOLE', 'LDL_CHOLE', 'HCHK_OE_INSPEC_YN', 'CRS_YN',
'TTR_YN', 'DATA_STD__DT'], axis=1)
data = data.dropna(subset=['AGE_GROUP', 'HEIGHT', 'WEIGHT', 'WAIST'], axis=0)
data = data.fillna(method='ffill')
data = data.fillna(method='bfill')
return data
# read data from csv file
def read_data():
data = pd.read_csv("health_checkup.csv")
data = checkNA(data)
return data
# for one-hot-encoding
def dummy_data(data, columns):
for column in columns:
data = pd.concat([data, pd.get_dummies(data[column], prefix=column)], axis=1)
data = data.drop(column, axis=1)
return data
def Preprocessing(feature, encode_list, scale_list):
# feature : dataframe of feature
# scaler
scaler_stndard = preprocessing.StandardScaler()
scaler_MM = preprocessing.MinMaxScaler()
scaler_robust = preprocessing.RobustScaler()
scaler_maxabs = preprocessing.MaxAbsScaler()
scaler_normalize = preprocessing.Normalizer()
scalers = [None, scaler_stndard, scaler_MM, scaler_robust, scaler_maxabs, scaler_normalize]
scalers_name = ["original", "standard", "minmax", "robust", "maxabs", "normalize"]
# encoder
encoder_ordinal = preprocessing.OrdinalEncoder()
# one hot encoding => using pd.get_dummies() (not used preprocessing.OneHotEncoder())
encoders_name = ["ordinal", "onehot"]
# result box
result_dictionary = {}
i = 0
if encode_list == []:
for scaler in scalers:
if i == 0: # not scaling
result_dictionary[scalers_name[i]] = feature.copy()
else:
# ===== scalers
result_dictionary[scalers_name[i]] = feature.copy()
result_dictionary[scalers_name[i]][scale_list] = scaler.fit_transform(feature[scale_list]) # scaling
i = i + 1
return result_dictionary
for scaler in scalers:
if i == 0: # not scaling
result_dictionary[scalers_name[i] + "_ordinal"] = feature.copy()
result_dictionary[scalers_name[i] + "_ordinal"][encode_list] = encoder_ordinal.fit_transform(
feature[encode_list])
result_dictionary[scalers_name[i] + "_onehot"] = feature.copy()
result_dictionary[scalers_name[i] + "_onehot"] = dummy_data(result_dictionary[scalers_name[i] + "_onehot"],
encode_list)
else:
# ===== scalers + ordinal encoding
result_dictionary[scalers_name[i] + "_ordinal"] = feature.copy()
result_dictionary[scalers_name[i] + "_ordinal"][scale_list] = scaler.fit_transform(
feature[scale_list]) # scaling
result_dictionary[scalers_name[i] + "_ordinal"][encode_list] = encoder_ordinal.fit_transform(
feature[encode_list]) # encoding
# ===== scalers + OneHot encoding
result_dictionary[scalers_name[i] + "_onehot"] = feature.copy()
result_dictionary[scalers_name[i] + "_onehot"][scale_list] = scaler.fit_transform(
feature[scale_list]) # scaling
result_dictionary[scalers_name[i] + "_onehot"] = dummy_data(result_dictionary[scalers_name[i] + "_onehot"],
encode_list) # encoding
i = i + 1
return result_dictionary
def plotCurrentResult(score, title):
plt.title(title)
x_values = range(1, len(score) + 1)
plt.xlabel('Parameter set')
if "Decision Tree" in score['current_model'][0]:
tempList = [_['max_depth'] for _ in score['params']]
plt.xticks(x_values, tempList)
elif "Support Vector Machine" in score['current_model'][0]:
tempList = [[_['C'], _['gamma']] for _ in score['params']]
plt.xticks(x_values, tempList)
else:
tempList = [_['n_neighbors'] for _ in score['params']]
plt.xticks(x_values, tempList)
plt.ylabel('mean score')
y_values = score['mean_test_score'].tolist()
plt.plot(x_values, y_values)
plt.show()
def plotCurrentResult2(score, title):
plt.title(title)
x_values = range(1, len(score) + 1)
xList = []
yList = []
plt.xlabel('Model states')
plt.ylabel('best score')
for _ in score:
xList.append([_['best-model'], _['best-param']])
yList.append(_['best-score'])
plt.xticks(x_values, xList)
plt.plot(x_values, yList)
plt.show()
def classification(data, target):
# temp best record variable
best_answer = {
'best-model': "",
'best-param': "",
'best-score': -1.0,
}
whole_score =
|
pd.DataFrame()
|
pandas.DataFrame
|
import xml.etree.ElementTree as ET
import sys
import warnings
import numpy as np
import os
from pathlib import Path
import pandas as pd
import shutil
import requests
import json
import yaml
from functools import partial
import fiona
from fiona.crs import to_string, from_string
from shapely.geometry import Polygon, LineString, Point, shape, mapping
from shapely.ops import unary_union, transform
import pyproj
import math
import gisutils
import lsmaker
try:
import matplotlib.pyplot as plt
except:
pass
from lsmaker.diagnostics import Diagnostics
# ## Functions #############################
def add_projection(line, point):
"""Add vertex to line at point,
if the closest point on the line isn't an end.
Parameters
----------
line : LineString
point : Point
Returns
-------
newline : LineString
Line with point added, or original line, if point coincides with end.
"""
l = line
mp = point
distance = l.project(mp)
if distance <= 0.0 or distance >= line.length:
return line
coords = list(l.coords)
for i, p in enumerate(l.coords):
pd = l.project(Point(p))
if pd == distance:
return line
elif pd > distance:
return LineString(coords[:i] + [(mp.x, mp.y)] + coords[i:])
def add_vertices_at_testpoints(lssdf, tpgeoms, tol=200):
"""Add vertices to LinesinkData at locations of testpoints
(so that modeled flow observations are correct)
Parameters
----------
lssdf : DataFrame
DataFrame of linesink strings. Must contain 'geometry' column
of shapely LineStrings defining geometries of linesink strings,
in same coordinate system as testpoints.
tpgeoms : list
List of testpoint geometries.
tol : float
Tolerance, in coordinate system units, for considering lines
near the testpoints.
Returns
-------
geoms : list of geometries
New geometry column with added vertices.
"""
df = lssdf.copy()
for mp in tpgeoms:
# find all lines within tolerance
nearby = np.array([l.intersects(mp.buffer(tol)) for l in df.geometry])
ldf = df[nearby].copy()
# choose closest if two or more nearby lines
if len(ldf) > 1:
ldf['dist'] = [mp.distance(ll.interpolate(ll.project(mp)))
for ll in ldf.geometry.values]
ldf.sort_values('dist', inplace=True)
# if at least one line is nearby
if len(ldf) > 0:
ind = ldf.index[0]
l = ldf.geometry.values[0]
newline = add_projection(l, mp)
df.loc[ind, 'geometry'] = newline
#df.set_value(ind, 'geometry', newline)
return df.geometry.tolist()
def get_elevations_from_epqs(points, units='feet'):
"""From list of shapely points in lat, lon, returns list of elevation values
"""
if len(points) > 0:
print('querying National Map Elevation Point Query Service...')
elevations = [get_elevation_from_epqs(p.x, p.y, units=units) for p in points]
else:
elevations = []
return elevations
def get_elevation_from_epqs(lon, lat, units='feet'):
"""Returns an elevation value at a point location.
Notes
-----
example url for -91, 45:
http://nationalmap.gov/epqs/pqs.php?x=-91&y=45&units=Feet&output=json
Examples
--------
>>> get_elevation_from_epqs(-91, 45)
1139.778277
"""
url = 'http://nationalmap.gov/epqs/pqs.php?'
url += 'x={}&y={}&units={}&output=json'.format(lon, lat, units)
try:
#epqsdata = urlopen(url).readline()
response = requests.get(url)
elev = json.loads(response.text)['USGS_Elevation_Point_Query_Service']['Elevation_Query']['Elevation']
print(lat, lon, elev, units)
except:
e = sys.exc_info()
print(e)
print('Problem accessing Elevation Point Query Service. '
'Need an internet connection to get seepage lake elevations.'
"\nIf your internet is working, running the script again may work; sometime the EPQS can be temperamental")
elev = 0.0
try:
elev = float(elev)
except:
print(('Warning, invalid elevation of {} returned for {}, {}.\nSetting elevation to 0.'.format(elev, lon, lat)))
elev = 0.0
return elev
def _get_random_point_in_polygon(poly):
"""Generates a point within a polygon (for lakes where the centroid is not in the lake)"""
minx, miny, maxx, maxy = poly.bounds
while True:
p = Point(np.random.uniform(minx, maxx), np.random.uniform(miny, maxy))
if poly.contains(p):
return p
#def reproject(geoms, pr1, pr2):
# """Reprojects a list of geometries from coordinate system pr1 to pr2
# (given as proj strings)."""
# print('reprojecting from {} to {}...'.format(pr1, pr2))
# if not isinstance(geoms, list):
# geoms = [geoms]
# pr1 = pyproj.Proj(pr1, errcheck=True, preserve_units=True)
# pr2 = pyproj.Proj(pr2, errcheck=True, preserve_units=True)
# project = partial(pyproj.transform, pr1, pr2)
# return [transform(project, g) for g in geoms]
def w_parameter(B, lmbda):
"""Compute w parameter for estimating an effective conductance term
(i.e., when simulating Lakes using Linesinks instead of GFLOW's lake package)
If only larger lakes are simulated (e.g., > 1 km2), w parameter will be = lambda
see Haitjema 2005, "Dealing with Resistance to Flow into Surface Waters"
"""
if lmbda <= 0.1 * B:
w = lmbda
elif 0.1 * B < lmbda < 2 * B:
w = lmbda * np.tanh(B / (2 * lmbda))
else:
w = B / 2
return w
def width_from_arboate(arbolate, lmbda):
"""Estimate stream width in feet from arbolate sum in meters, using relationship
described by Feinstein et al (2010), Appendix 2, p 266.
"""
estwidth = 0.1193 * math.pow(1000 * arbolate, 0.5032)
w = 2 * w_parameter(estwidth, lmbda) # assumes stream is rep. by single linesink
return w
def lake_width(area, total_line_length, lmbda):
"""Estimate conductance width from lake area and length of flowlines running through it
"""
if total_line_length > 0:
estwidth = 1000 * (area / total_line_length) / 0.3048 # (km2/km)*(ft/km)
else:
estwidth = np.sqrt(area / np.pi) * 1000 / 0.3048 # (km)*(ft/km)
# see Haitjema 2005, "Dealing with Resistance to Flow into Surface Waters"
# basically if only larger lakes are simulated (e.g., > 1 km2), w parameter will be = lambda
# this assumes that GFLOW's lake package will not be used
w = w_parameter(estwidth, lmbda)
return w # feet
def name(x):
"""Abbreviations for naming LinesinkData from names in NHDPlus
GFLOW requires linesink names to be 32 characters or less
"""
if x.GNIS_NAME:
# reduce name down with abbreviations
abb = {'Branch': 'Br',
'Creek': 'Crk',
'East': 'E',
'Flowage': 'Fl',
'Lake': 'L',
'North': 'N',
'Pond': 'P',
'Reservoir': 'Res',
'River': 'R',
'South': 'S',
'West': 'W',
"'": ''}
name = '{} {}'.format(x.name, x.GNIS_NAME)
for k, v in abb.items():
name = name.replace(k, v)
else:
name = '{} unnamed'.format(x.name)
return name[:32]
def uniquelist(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def closest_vertex_ind(point, shape_coords):
"""Returns index of closest vertices in shapely geometry object
Ugly but works
"""
crds = shape_coords
X = np.array([i[0] for i in crds])
Y = np.array([i[1] for i in crds])
dX, dY = X - point[0], Y - point[1]
closest_ind = np.argmin(np.sqrt(dX ** 2 + dY ** 2))
return closest_ind
def move_point_along_line(x1, x2, dist):
diff = (x2[0] - x1[0], x2[1] - x1[1])
return tuple(x2 - dist * np.sign(diff))
class LinesinkData:
maxlines = 4000
int_dtype = str # np.int64
dtypes = {'Label': str,
'HeadSpecified': float,
'StartingHead': float,
'EndingHead': float,
'Resistance': float,
'Width': float,
'Depth': float,
'Routing': np.int64,
'EndStream': np.int64,
'OverlandFlow': np.int64,
'EndInflow': np.int64,
'ScenResistance': str,
'Drain': np.int64,
'ScenFluxName': str,
'Gallery': np.int64,
'TotalDischarge': np.int64,
'InletStream': np.int64,
'OutletStream': np.int64,
'OutletTable': str,
'Lake': np.int64,
'Precipitation': float,
'Evapotranspiration': float,
'Farfield': bool,
'chkScenario': bool,
'AutoSWIZC': np.int64,
'DefaultResistance': float}
fcodes = {'Perennial': 46006,
'Intermittent': 46003,
'Uncategorized': 46000}
def __init__(self, infile=None, GFLOW_lss_xml=None):
# attributes
self._lsmaker_config_file_path = None # absolute path to config file
self.preproc = None
self.resistance = None
self.H = None # aquifer thickness in model units
self.k = None # hydraulic conductivity of the aquifer in model units
self.lmbda = None
self.ScenResistance = None
self.chkScenario = None
self.global_streambed_thickness = None # streambed thickness
self.ComputationalUnits = None # 'feet' or 'meters'; for XML output file
self.BasemapUnits = None
# elevation units multiplier (from NHDPlus cm to model units)
self.zmult = None
# model domain
self.farfield = None
self.routed_area = None
self.nearfield = None
self.prj = None
self.crs = None
#self.crs_str = None # self.crs, in proj string format
self.pyproj_crs = None # pyproj.CRS instance based on prj input
self.farfield_buffer = None
self.clip_farfield = None
self.split_by_HUC = None
self.HUC_shp = None
self.HUC_name_field = None
# simplification
self.refinement_areas = [] # list of n areas within routed area with additional refinement
self.nearfield_tolerance = None
self.routed_area_tolerance = None
self.farfield_tolerance = None
self.min_nearfield_order = None
self.min_routed_area_order = None
self.min_farfield_order = None
self.min_nearfield_wb_size = None
self.min_waterbody_size = None
self.min_farfield_wb_size = None
self.farfield_length_threshold = None
self.routed_area_length_threshold = None
self.drop_intermittent = None
self.drop_crossing = None
self.asum_thresh_ra = None
self.asum_thresh_nf = None
self.asum_thresh_ff = None
# NHD files
self.flowlines = None
self.elevslope = None
self.PlusFlowVAA = None
self.waterbodies = None
# columns to retain in NHD files (when joining to GIS lines)
# Note: may need to add method to handle case discrepancies
self.flowlines_cols = ['COMID', 'FCODE', 'FDATE', 'FLOWDIR', 'FTYPE', 'GNIS_ID', 'GNIS_NAME', 'LENGTHKM',
'REACHCODE', 'RESOLUTION', 'WBAREACOMI', 'geometry']
self.flowlines_cols_dtypes = {'COMID': self.int_dtype,
'FCODE': self.int_dtype,
'FDATE': str,
'FLOWDIR': str,
'FTYPE': str,
'GNIS_ID': self.int_dtype,
'GNIS_NAME': str,
'LENGTHKM': float,
'REACHCODE': str,
'RESOLUTION': str,
'WBAREACOMI': self.int_dtype,
'geometry': object}
self.elevslope_cols = ['MINELEVSMO', 'MAXELEVSMO']
self.elevslope_dtypes = {'MINELEVSMO': float,
'MAXELEVSMO': float}
self.pfvaa_cols = ['ArbolateSu', 'Hydroseq', 'DnHydroseq', 'StreamOrde']
self.pfvaa_cols_dtypes = {'ArbolateSu': float,
'Hydroseq': self.int_dtype,
'DnHydroseq': self.int_dtype,
'StreamOrde': np.int64}
self.wb_cols = ['AREASQKM', 'COMID', 'ELEVATION', 'FCODE', 'FDATE', 'FTYPE', 'GNIS_ID', 'GNIS_NAME',
'REACHCODE', 'RESOLUTION', 'geometry']
self.wb_cols_dtypes = {'AREASQKM': float,
'COMID': self.int_dtype,
'ELEVATION': float,
'FCODE': self.int_dtype,
'FDATE': str,
'FTYPE': str,
'GNIS_ID': self.int_dtype,
'GNIS_NAME': str,
'REACHCODE': str,
'RESOLUTION': str,
'geometry': object}
# could do away with above and have one dtypes list
self.dtypes.update(self.flowlines_cols_dtypes)
self.dtypes.update(self.elevslope_dtypes)
self.dtypes.update(self.pfvaa_cols_dtypes)
self.dtypes.update(self.wb_cols_dtypes)
# preprocessed files
self.DEM = None
self.elevs_field = None
self.DEM_zmult = None
self.flowlines_clipped = None
self.waterbodies_clipped = None
self.routed_mp = None
self.farfield_mp = None
self.preprocessed_lines = None
self.preprocdir = None
self.wb_centroids_w_elevations = None # elevations extracted during preprocessing routine
self.elevs_field = None # field in wb_centroids_w_elevations containing elevations
# outputs
self.outfile_basename = None
self.error_reporting = 'error_reporting.txt'
# attributes
self.from_lss_xml = False
self.df = pd.DataFrame() # working dataframe for translating NHDPlus data to linesink strings
self.lss = pd.DataFrame() # dataframe of GFLOW linesink strings (with attributes)
self.outsegs = pd.DataFrame()
self.confluences = pd.DataFrame()
# read in the configuration file
if infile is not None and infile.endswith('.xml'):
self.read_lsmaker_xml(infile)
if infile is not None:
for extension in 'yml', 'yaml':
if infile.endswith(extension):
self.read_lsmaker_yaml(infile)
# or create instance from a GFLOW LSS XML file
elif GFLOW_lss_xml is not None:
self.from_lss_xml = True
self.df = self.read_lss(GFLOW_lss_xml)
# nearfield can't be coarser than the routed area
if infile is not None:
self.min_nearfield_order = min((self.min_nearfield_order,
self.min_routed_area_order))
# create a pyproj CRS instance
# set the CRS (basemap) length units
self.set_crs(prjfile=self.prj)
# logging/diagnostics
# todo: more robust/detail logging
if Path(self.error_reporting).exists():
try:
Path(self.error_reporting).unlink()
except:
j=2
with open(self.error_reporting, 'w') as efp:
efp.write('Linesink-maker version {}\n'.format(lsmaker.__version__))
self.dg = Diagnostics(lsm_object=self, logfile=self.error_reporting)
def __eq__(self, other):
"""Test for equality to another linesink object."""
if not isinstance(other, self.__class__):
return False
exclude_attrs = ['_lsmaker_config_file_path',
'crs',
'crs_str',
'inpars',
'cfg',
'efp',
]
# LinesinkData instances from lss xml won't have some attributes
# or some df columns that came from NHDPlus
compare_df_columns = slice(None)
if self.from_lss_xml or other.from_lss_xml:
# todo: expose default values that are being set on write of lss_xml
# (many of the columns in LinesinkData instance from lss xml aren't in
# LinesinkData instance that was created from scratch, because the variables
# are only being set in LinesinkData.write_lss method)
compare_df_columns = set(self.df.columns).intersection(other.df.columns)
# the geometries and coordinates won't be exactly the same
# explicitly compare the coordinates separately
compare_df_columns = compare_df_columns.difference({'geometry',
'ls_coords',
'width'
})
for k, v in self.__dict__.items():
# items to skip
# todo: implement pyproj.CRS class to robustly compare CRSs
if k in exclude_attrs:
continue
elif self.from_lss_xml or other.from_lss_xml:
if k not in ('df', 'ComputationalUnits', 'BasemapUnits'):
continue
elif k not in other.__dict__:
return False
elif type(v) == bool:
if not v == other.__dict__[k]:
return False
elif k == 'df':
if len(v) == 0 and len(other.__dict__[k]) == 0:
continue
try:
df1 = v[compare_df_columns]
df2 = other.__dict__[k][compare_df_columns]
#
|
pd.testing.assert_frame_equal(df1, df2)
|
pandas.testing.assert_frame_equal
|
import pandas as pd
import datatable as dt
import zipfile
import re
import os
import time
from datetime import timedelta
import sys
def pd_options():
desired_width = 300
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', None)
import warnings
warnings.filterwarnings('ignore')
def directory(directory_path):
"""Puts you in the right directory. Gives you list of files in path"""
os.chdir(re.findall("^(.*[\\\/])", directory_path)[0])
csv_files = os.listdir(directory_path)
return csv_files
def get_csv_names_from_list(paths):
if not isinstance(paths, list):
raise TypeError('We need a list of csv file paths here')
dfs = []
for i in paths:
if i.endswith('.csv'):
df_name = re.findall("\w+(?=\.)", i)[0]
dfs.append(df_name)
print(str(",".join(dfs)))
print(str(".shape,".join(dfs)), ".shape", sep='')
def read_data(path_ending_with_filename=None, return_df=False, method=None, dataframes=None):
"""
e.g.
read_data(path)
sample_submission, test, train = read_data(path, True)
---
Reads single csv or list of csvs or csvs in zip.
Available methods:
'dt' = Datatable fread
TODO: Add to read methods. i.e., parquet, pickle, arrow, etc.
"""
dt.options.progress.enabled = True
if isinstance(path_ending_with_filename, str):
if path_ending_with_filename.endswith('.zip'):
zf = zipfile.ZipFile(path_ending_with_filename)
if dataframes:
dataframes = [x.strip(" ") for x in dataframes.split(",")]
if len(dataframes) == 1:
x = dataframes[0] + '.csv'
dfs = {}
start_time = time.monotonic()
if method == 'dt':
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = dt.fread(zf.open(x)).to_pandas()
else:
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = pd.read_csv(zf.open(x))
end_time = time.monotonic()
print(timedelta(seconds=end_time - start_time))
keys = list(dfs.keys())
values = list(dfs.values())
for i, k in enumerate(dfs):
print(i + 1, ".", " ", k, " ", "=", " ", "(", f"{values[i].shape[0]:,}", " ", ":", " ",
f"{values[i].shape[1]:,}", ")",
sep="")
if return_df:
return pd.DataFrame.from_dict(values[0])
else:
files = [x + '.csv' for x in dataframes]
else:
files = zf.namelist()
if return_df:
dfs = {}
start_time = time.monotonic()
for x in files:
if x.endswith('.csv'):
if method == 'dt':
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = dt.fread(zf.open(x)).to_pandas()
else:
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = pd.read_csv(zf.open(x))
end_time = time.monotonic()
print(timedelta(seconds=end_time - start_time))
keys = list(dfs.keys())
values = list(dfs.values())
for i, k in enumerate(dfs):
print(i + 1, ".", " ", k, " ", "=", " ", "(", f"{values[i].shape[0]:,}", " ", ":", " ",
f"{values[i].shape[1]:,}", ")",
sep="")
return dfs.values()
else:
if not dataframes:
csv_file_names = [format(re.findall("\w+(?=\.)", zf.namelist()[i])[0]) for i in
range(len(zf.namelist())) if zf.namelist()[i].endswith('.csv')]
# if dataframes:
#
# file_pos = [i for i, x in enumerate(csv_file_names)]
# else:
file_pos = [i for i, x in enumerate(zf.namelist()) if x.endswith('.csv')]
uncompressed_dir = [f"{(zf.filelist[i].file_size / 1024 ** 2):.2f} Mb" for i in file_pos]
compressed = [f"{(zf.filelist[i].compress_size / 1024 ** 2):.2f} Mb" for i in file_pos]
print(pd.concat([pd.Series(csv_file_names), pd.Series(uncompressed_dir), pd.Series(compressed)],
axis=1,
keys=["file_names", "uncompressed", "compressed"]))
print()
print(*csv_file_names, sep=",")
else:
# SINGLE FILE
if path_ending_with_filename.endswith(".csv"):
df_name = re.findall("\w+(?=\.)", path_ending_with_filename)[0]
if method == 'dt':
df = dt.fread(path_ending_with_filename)
df = df.to_pandas()
else:
df = pd.read_csv(path_ending_with_filename)
if return_df:
return df
else:
print(df_name, df.shape)
else:
# CSVS IN DIRECTORY
dfs = {}
os.chdir(path_ending_with_filename)
if dataframes:
dataframes = [x.strip(" ") for x in dataframes.split(",")]
csvs_in_directory = [x for x in os.listdir(path_ending_with_filename) if x.endswith('.csv')]
files = list(set(csvs_in_directory) & set([x + '.csv' for x in dataframes]))
else:
files = [x for x in os.listdir(path_ending_with_filename) if x.endswith('.csv')]
for x in files:
if method == 'dt':
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = dt.fread(x).to_pandas()
else:
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = pd.read_csv(x)
keys = list(dfs.keys())
values = list(dfs.values())
if return_df:
for i, k in enumerate(dfs):
print(i + 1, ".", " ", k, " ", "=", " ", "(", f"{values[i].shape[0]:,}", " ", ":", " ",
f"{values[i].shape[1]:,}", ")",
sep="")
return dfs.values()
else:
uncompressed_dir = [f"{(sys.getsizeof(dfs[i]) / 1024 ** 2):.2f} Mb" for i in dfs]
print(pd.concat([pd.Series(keys),
|
pd.Series(uncompressed_dir)
|
pandas.Series
|
"""
inserindo dados com pandas
C - CREATE
R - READ
U - UPDATE
D - DELETE
"""
import pandas as pd
BASE_PATH = 'base.csv'
# CREATE
def post(dados: dict):
df_antigo = pd.DataFrame(get())
df_novo = pd.DataFrame(dados, index=[0])
df = df_antigo.append(df_novo)
df.to_csv(BASE_PATH, sep=',', index=False)
# READ
def get(id: int=None):
try:
df = pd.read_csv(BASE_PATH, sep=',')
lista_dados = df.to_dict('records')
if not id:
return df.to_dict('records') # [{"id":1 ...}, {"id": 2 ...}, ...]
for dado in lista_dados:
if dado['id'] == id:
return [dado]
except:
return []
# UPDATE
def put(id: int, dados_alterar):
lista_antiga = get()
lista_dados_novos = []
for dado in lista_antiga:
if dado["id"] == id:
dado = dados_alterar
lista_dados_novos.append(dado)
df = pd.DataFrame(lista_dados_novos)
df.to_csv(BASE_PATH, sep=',', index=False)
return
# DELETE
def delete(id: int):
lista_antiga = get()
lista_dados_novos = []
for dado in lista_antiga:
if not dado["id"] == id:
lista_dados_novos.append(dado)
df = pd.DataFrame(lista_dados_novos)
df.to_csv(BASE_PATH, sep=',', index=False)
# DELETE ALL
def clear():
df =
|
pd.DataFrame([])
|
pandas.DataFrame
|
from flask import *
import pandas as pd
from textblob import Word
from textblob import TextBlob
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.sparse import hstack
import numpy as np
from sklearn import model_selection
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import LogisticRegression
import nltk
from nltk.corpus import wordnet as wn
import ssl
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/predict', methods=['GET'])
def predict():
input_text = [request.args.get('REVIEW')]
input_text = pd.DataFrame(input_text, columns=['a'])
# Set to lowercase
input_text['a'] = input_text['a'].str.lower()
# Remove symbols
input_text['a'] = input_text['a'].str.replace('[^\w\s]', '')
# Add Sentiment column
input_text['sentiment'] = input_text['a'].apply(
lambda x: TextBlob(x).sentiment[0])
# Vectorize
input_transform = tfidf.transform(input_text['a'])
input_transform = hstack((input_transform, np.array(input_text['sentiment'])[:,
None]))
prediction = model.predict(input_transform)[0]
return str(prediction)
if __name__ == '__main__':
# Setting up stop words for data preprocessing
nltk.download('wordnet')
stop = stopwords.words('english')
food_sets = wn.synsets('food')
food_stop_words = list()
for food_set in food_sets:
food_stop_words += list(
set([w.replace('_', ' ') for s in food_set.closure(lambda s: s.hyponyms()) for w in s.lemma_names()]))
# Load Yelp dataset
df = pd.read_json('review0.json', lines=True)
# Create new dataframe consisting of just stars and text from the Yelp dataset
col = ['stars', 'text']
cleaned_df = df[col]
cleaned_df = cleaned_df[
|
pd.notnull(cleaned_df['text'])
|
pandas.notnull
|
"""
birdspotter is a python package providing a toolkit to measures the social influence and botness of twitter users.
"""
import simplejson
from tqdm import tqdm
import wget
import zipfile
import pandas as pd
import pickle as pk
import numpy as np
from birdspotter.utils import *
import traceback
import collections
from xgboost.sklearn import XGBClassifier
import xgboost as xgb
import os
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import dateutil
from birdspotter.user_influence import P, influence
from itertools import islice
import ijson
class BirdSpotter:
"""Birdspotter measures the social influence and botness of twitter users.
This class takes a twitter dump in (json or jsonl format) and extract metrics bot and influence metrics for the users.
The class will download word2vec embeddings if they are not specified.
It exposes processed data from the tweet dumps.
Attributes:
cascadeDataframe (:class:`pandas.DataFrame`): A dataframe of tweets ordered by cascades and time (the column casIndex denotes which cascade each tweet belongs to)
featureDataframe (:class:`pandas.DataFrame`): A dataframe of users with their respective botness and influence scores.
hashtagDataframe (:class:`pandas.DataFrame`): A dataframe of the text features for hashtags.
"""
def __init__(self, path, tweetLimit = None, embeddings='download', quiet=False):
"""Birdspotter measures the social influence and botness of twitter users.
Parameters
----------
path : str
The path to a tweet json or jsonl file containing the tweets for analysis.
tweetLimit : int, optional
A limit on the number of tweets to process if the tweet dump is too large, if None then all tweets are processed, by default None
embeddings : collections.Mapping or str, optional
A method for loading word2vec embeddings, which accepts are path to embeddings, a mapping object or a pickle object. Refer to setWord2VecEmbeddings for details. By default 'download'
quiet : bool, optional
Determines if debug statements will be printed or not, by default False
"""
self.word2vecEmbeddings = None
self.quiet = quiet
self.extractTweets(path, tweetLimit = tweetLimit, embeddings=embeddings)
def __pprint(self, message):
if not self.quiet:
print(message)
def setWord2VecEmbeddings(self, embeddings='download', forceReload=True):
"""Sets the word2vec embeddings. The embeddings can be a path to a pickle or txt file, a mapping object or the string 'download' which will automatically download and use the FastText 'wiki-news-300d-1M.vec' if not available in the current path.
Parameters
----------
embeddings : collections.Mapping or str or None, optional
A method for loading word2vec embeddings. A path to a embeddings pickle or txt file, a mapping object, the string 'download', by default 'download'. If None, it does nothing.
forceReload : bool, optional
If the embeddings are already set, forceReload determines whether to update them, by default True
"""
if not forceReload and self.word2vecEmbeddings is not None:
return
if embeddings is None:
return
elif isinstance(embeddings, str) and embeddings == 'download':
if os.path.isfile('./wiki-news-300d-1M.vec'):
self.__pprint("Loading Fasttext wiki-news-300d-1M.vec Word2Vec Embeddings...")
with open('./wiki-news-300d-1M.vec',"r") as f:
model = {}
if not self.quiet:
pbar = tqdm(total=1000000)
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
if not self.quiet:
pbar.update(1)
if not self.quiet:
pbar.close()
self.word2vecEmbeddings = model
self.__pprint("Finished loading Word2Vec Embeddings")
else:
try:
self.__pprint("Downloading Fasttext embeddings")
filename = wget.download('https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip')
self.__pprint('\n')
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall('./')
self.__pprint("Loading downloaded Fasttext wiki-news-300d-1M.vec Word2Vec Embeddings...")
with open('./wiki-news-300d-1M.vec',"r") as f:
model = {}
if not self.quiet:
pbar = tqdm(total=1000000)
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
if not self.quiet:
pbar.update(1)
if not self.quiet:
pbar.close()
self.word2vecEmbeddings = model
self.__pprint("Finished loading Word2Vec Embeddings")
except Exception as e:
print(e)
elif isinstance(embeddings, str):
embeddingsPath = embeddings
_,fileextension = os.path.splitext(embeddingsPath)
if fileextension == '.pickle':
self.__pprint("Loading Word2Vec Embeddings...")
with open(embeddingsPath,"rb") as f:
self.word2vecEmbeddings = pk.load(f)
self.__pprint("Finished loading Word2Vec Embeddings")
elif fileextension == '.txt':
self.__pprint("Loading Word2Vec Embeddings...")
with open(embeddingsPath,"r") as f:
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
self.word2vecEmbeddings = model
self.__pprint("Finished loading Word2Vec Embeddings")
elif isinstance(embeddings, collections.Mapping):
self.word2vecEmbeddings = embeddings
def extractTweets(self, filePath, tweetLimit = None, embeddings='download'):
"""Extracts tweets from a json or jsonl file and generates cascade, feature and hashtag dataframes as class attributes.
Note that we use the file extension to determine how to handle the file.
Parameters
----------
filePath : str
The path to a tweet json or jsonl file containing the tweets for analysis.
tweetLimit : int, optional
A limit on the number of tweets to process if the tweet dump is too large, if None then all tweets are processed, by default None
embeddings : collections.Mapping or str or None, optional
A method for loading word2vec embeddings. A path to a embeddings pickle or txt file, a mapping object, the string 'download', by default 'download'. If None, it does nothing.
Returns
-------
DataFrame
A dataframe of user's botness and influence scores (and other features).
"""
# Appending DataFrames line by line is inefficient, because it generates a
# new dataframe each time. It better to get the entire list and them concat.
user_list = []
tweet_list = []
w2v_content_list = []
w2v_description_list = []
cascade_list = []
self.__pprint("Starting Tweet Extraction")
_,fileextension = os.path.splitext(filePath)
raw_tweets = []
with open(filePath, encoding="utf-8") as f:
if fileextension == '.jsonl':
raw_tweets = map(simplejson.loads, list(islice(f, tweetLimit)))
elif fileextension == '.json':
raw_tweets = list(islice(ijson.items(f, 'item'),tweetLimit))
else:
raise Exception('Not a valid tweet dump. Needs to be either jsonl or json, with the extension explicit.')
if not self.quiet:
pbar = tqdm()
for j in raw_tweets:
if not self.quiet:
pbar.update(1)
try:
temp_user = {}
temp_tweet = {}
temp_text = (j['text'] if 'text' in j.keys() else j['full_text'])
temp_content = {'status_text': temp_text, 'user_id' : j['user']['id_str']}
temp_description = {'description':j['user']['description'], 'user_id' : j['user']['id_str']}
temp_cascade = {}
if 'retweeted_status' in j:
temp_cascade['cascade_id'] = j['retweeted_status']['id_str']
temp_cascade['original_created_at'] = j['retweeted_status']['created_at']
temp_cascade['created_at'] = j['created_at']
temp_cascade['retweeted'] = True
else:
temp_cascade['cascade_id'] = j['id_str']
temp_cascade['original_created_at'] = j['created_at']
temp_cascade['created_at'] = j['created_at']
temp_cascade['retweeted'] = False
temp_cascade['follower_count'] = j['user']['followers_count']
temp_cascade['status_text'] = temp_text
temp_cascade['screen_name'] = j['user']['screen_name']
temp_cascade['hashtag_entities'] = [e['text'] for e in j['entities']['hashtags']]
temp_user['screen_name'] = j['user']['screen_name']
temp_user['url'] = j['user']['profile_image_url_https']
temp_user['description'] = j['user']['description']
temp_user['followers_count'] = j['user']['followers_count']
temp_cascade['user_id'] = j['user']['id_str']
temp_user['user_id'] = j['user']['id_str']
temp_tweet['user_id'] = j['user']['id_str']
temp_user.update(getTextFeatures('name',j['user']['name']))
temp_user.update(getTextFeatures('location',j['user']['location']))
temp_user.update(getTextFeatures('description',j['user']['description']))
for key in ['statuses_count', 'listed_count', 'friends_count', 'followers_count']:
temp_user[key] = j['user'][key]
temp_user['verified'] = 1 if j['user']['verified'] else 0
temp_user['ff_ratio'] = (temp_user['followers_count'] + 1)/(temp_user['followers_count'] + temp_user['friends_count'] + 1)
n = datetime.now()
temp_user['years_on_twitter'] = (datetime(n.year, n.month, n.day) - datetime.strptime(j['user']['created_at'], '%a %b %d %H:%M:%S +0000 %Y')).days/365
temp_user['statuses_rate'] = (temp_user['statuses_count'] + 1)/(temp_user['years_on_twitter'] + .001)
temp_user['tweets_to_followers'] = (temp_user['statuses_count'] + 1)/(temp_user['followers_count'] + 1)
temp_user['retweet_count'] = j['retweet_count']
temp_user['favorite_count'] = j['favorite_count']
temp_user['favourites_count'] = j['user']['favourites_count']
temp_tweet.update(getTextFeatures('status_text',temp_text))
temp_tweet['n_tweets'] = 1 if 'retweeted_status' in j and ('quoted_status_is' in j) else 0
temp_tweet['n_retweets'] = 1 if 'retweeted_status' in j else 0
temp_tweet['n_quotes'] = 1 if 'quoted_status_id' in j else 0
temp_tweet['n_timeofday'] = hourofweekday(j['created_at'])
temp_tweet.update(getSource(j['source']))
user_list.append(temp_user)
tweet_list.append(temp_tweet)
w2v_content_list.append(temp_content)
w2v_description_list.append(temp_description)
cascade_list.append(temp_cascade)
except Exception as err:
traceback.print_tb(err.__traceback__)
if not self.quiet:
pbar.close()
# We are assuming that user data doesn't change much and if it does, we take that 'latest' as our feature
userDataframe = pd.DataFrame(user_list).fillna(0).set_index('user_id')
userDataframe = userDataframe[~userDataframe.index.duplicated(keep='last')]
tweetDataframe = pd.DataFrame(tweet_list).fillna(0).set_index('user_id')
n_retweets = tweetDataframe['n_retweets'].groupby('user_id').sum()
n_quoted = tweetDataframe['n_quotes'].groupby('user_id').sum()
tweetDataframe = tweetDataframe.groupby('user_id').mean()
tweetDataframe['n_retweets'] = n_retweets
tweetDataframe['n_quotes'] = n_quoted
self.cascadeDataframe = pd.DataFrame(cascade_list).fillna(0)
self.__reformatCascadeDataframe()
contentDataframe = pd.DataFrame(w2v_content_list).set_index('user_id')
descriptionDataframe = pd.DataFrame(w2v_description_list).set_index('user_id')
descriptionDataframe = descriptionDataframe[~descriptionDataframe.index.duplicated(keep='last')]
self.setWord2VecEmbeddings(embeddings, forceReload=False)
self.featureDataframe = userDataframe.join(tweetDataframe)
if self.word2vecEmbeddings is not None:
w2vDataframe = self.__computeVectors(contentDataframe, descriptionDataframe)
self.featureDataframe = self.featureDataframe.join(w2vDataframe)
#Computes the features for all the hashtags. Is currently not protected from namespace errors.
self.hashtagDataframe = self.__computeHashtagFeatures(contentDataframe)
if 'influence' in self.hashtagDataframe.columns:
self.hashtagDataframe.drop('influence', axis=1, inplace=True, errors='ignore')
self.featureDataframe = self.featureDataframe.join(self.hashtagDataframe, rsuffix='_hashtag')
self.featureDataframe = self.featureDataframe[~self.featureDataframe.index.duplicated()]
return self.featureDataframe
def getBotAnnotationTemplate(self, filename="annotationTemplate.csv"):
"""Writes a CSV with the list of users and a blank column "isbot" to be annotated.
A helper function which outputs a CSV to be annotated by a human. The output is a list of users with the blank "isbot" column.
Parameters
----------
filename : str
The name of the file to write the CSV
Returns
-------
Dataframe
A dataframe of the users, with their screen names and a blank "is_bot" column.
"""
csv_data = (self.cascadeDataframe.groupby(['screen_name', 'user_id']).apply(lambda d: '').reset_index(name='isbot'))
csv_data.to_csv(filename)
return csv_data
def __computeHashtagFeatures(self, contentdf):
"""Computes the hashtag tfidf features as a dataframe"""
hashtagSeries = contentdf['status_text'].str.findall(r'(?<!\w)#\w+').str.join(" ").str.replace("#","")
userIndex = hashtagSeries.index
crop = hashtagSeries.tolist()
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(crop)
transformer = TfidfTransformer(smooth_idf=False)
tfidf = transformer.fit_transform(X)
column_names = vectorizer.get_feature_names()
hashtagDataframe = pd.DataFrame(tfidf.toarray(), columns=column_names, index=userIndex)
return hashtagDataframe
def __computeVectors(self, contentdf, descriptiondf):
"""Computes the word2vec features as a dataframe"""
ud = {}
for index,row in contentdf.iterrows():
vec = np.zeros(len(self.word2vecEmbeddings['a']))
tol = 0
for w in parse(row['status_text']):
if w in self.word2vecEmbeddings:
vec = vec + np.array(self.word2vecEmbeddings[w])
tol += 1
if tol != 0 and not np.isnan(tol):
vec = vec/tol
if index in ud:
ud[index].append(vec)
else:
ud[index] = [vec]
for k,v in ud.items():
ud[k] = np.array(v).mean(axis=0)
conw2v = pd.DataFrame(ud)
conw2v = conw2v.T
conw2v.index.name = 'user_id'
conw2v.columns = ["con_w2v_" + str(i) for i in conw2v.columns]
ud = {}
for index,row in descriptiondf.iterrows():
vec = np.zeros(len(self.word2vecEmbeddings['a']))
tol = 0
for w in parse(row['description']):
if w in self.word2vecEmbeddings:
vec = vec + np.array(self.word2vecEmbeddings[w])
tol += 1
if tol != 0 and not np.isnan(tol):
vec = vec/tol
ud[index] = [vec]
for k,v in ud.items():
ud[k] = np.array(v).mean(axis=0)
desw2v = pd.DataFrame(ud)
desw2v = desw2v.T
desw2v.index.name = 'user_id'
desw2v.columns = ["des_w2v_" + str(i) for i in desw2v.columns]
return conw2v.join(desw2v)
def loadClassifierModel(self, fname):
"""Loads the XGB booster model, from the saved XGB binary file
Parameters
----------
fname : str
The path to the XGB binary file
"""
booster = xgb.Booster()
booster.load_model(fname)
self.clf = booster
def trainClassifierModel(self, labelledDataPath, targetColumnName='isbot', saveFileName=None):
"""Trains the bot detection classifier.
Trains the bot detection classifier, using an XGB classifier.
Due to the way XGB works, the features used are the intersection, between the features from the tweet dumps and the features from the training set.
Parameters
----------
labelledDataPath : str
A path to the data with bot labels, as either csv or pickled dataframe
targetColumnName : str
The name of the column, describing whether a user is a bot or not, by default 'isbot'
saveFileName : str, optional
The path of the file, to save the XGB model binary, which can be loaded with loadClassifierModel, by default None
"""
params = {
'learning_rate' :0.1,
'n_estimators':80,
'max_depth':5, #16
'subsample':0.6,
'colsample_bytree':1,
'objective': 'binary:logistic',
'n_jobs':10,
'silent':True,
'seed' :27
}
_,fileextension = os.path.splitext(labelledDataPath)
if fileextension == '.csv':
botrnot = pd.read_csv(labelledDataPath, sep ="\t")
elif fileextension == '.pickle':
with open(labelledDataPath,'rb') as f:
botrnot = pk.load(f)
if 'is_bot' in botrnot.columns:
botTarget = botrnot['is_bot']
elif targetColumnName in botrnot.columns:
botTarget = botrnot[targetColumnName]
else:
raise Exception("The target column was not specified and cannot be found in the data. Please specify your target column accordingly.")
self.columnNameIntersection = list(set(self.featureDataframe.columns.values).intersection(set(botrnot.columns.values)))
botrnot = botrnot[self.columnNameIntersection]
train = xgb.DMatrix(botrnot.values, botTarget.values, feature_names=botrnot.columns.values)
self.clf = xgb.train(params, train, 80)
if saveFileName is not None:
self.clf.save_model(saveFileName)
def getBotness(self):
"""Adds the botness of users to the feature dataframe.
It requires the tweets be extracted and the classifier be trained, otherwise exceptions are raised respectively.
Returns
-------
DataFrame
The current feature dataframe of users, with associated botness scores appended.
Raises
------
Exception
Tweets haven't been extracted yet. Need to run extractTweets.
"""
if not hasattr(self, 'clf'):
self.loadClassifierModel(os.path.join(os.path.dirname(__file__), 'data', 'pretrained_botness_model.xgb'))
if not hasattr(self, 'featureDataframe'):
raise Exception("Tweets haven't been extracted yet")
if not hasattr(self, 'columnNameIntersection'):
with open(os.path.join(os.path.dirname(__file__), 'data', 'standard_bot_features'), 'r') as f:
standard_bot_features = set(f.read().splitlines())
self.columnNameIntersection = list(set(self.featureDataframe.columns.values).intersection(standard_bot_features))
testdf = self.featureDataframe[self.columnNameIntersection]
test = xgb.DMatrix(testdf.values, feature_names=self.columnNameIntersection)
bdf = pd.DataFrame()
bdf['botness'] = self.clf.predict(test)
bdf['user_id'] = testdf.index
__botnessDataframe = bdf.set_index('user_id')
self.featureDataframe = self.featureDataframe.join(__botnessDataframe)
self.featureDataframe = self.featureDataframe[~self.featureDataframe.index.duplicated()]
return self.featureDataframe
def __reformatCascadeDataframe(self):
""" Reformats the cascade dataframe for influence estimation"""
self.cascadeDataframe['magnitude'] = self.cascadeDataframe['follower_count']
cascades = []
groups = self.cascadeDataframe.groupby('cascade_id')
self.__pprint('Reformatting cascades')
if not self.quiet:
pbar = tqdm(total=len(groups))
# Group the tweets by id
for i, g in groups:
g = g.reset_index(drop=True)
min_time = dateutil.parser.parse(g['original_created_at'][0])
g['timestamp'] = pd.to_datetime(g['created_at']).values.astype('datetime64[s]')
g['min_time'] = min_time
g['min_time'] = g['min_time'].values.astype('datetime64[s]')
g['diff'] = g['timestamp'].sub(g['min_time'], axis=0)
g['time'] = g['diff'].dt.total_seconds()
g['time'] = g['time'] - np.min(g['time'])
g = g.sort_values(by=['time'])
cascades.append(g)
if not self.quiet:
pbar.update(1)
if not self.quiet:
pbar.close()
self.cascadeDataframe =
|
pd.concat(cascades)
|
pandas.concat
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd =
|
Series(['a', 'b', np.NaN, 'd'])
|
pandas.Series
|
#!/usr/bin/python3
"""
Calculate MOS score in a SIP call log and output a csv with the same filename
"""
import sys
import pandas as pd
import mos_score as mos
import time
from datetime import date
import statistics as st
def txt(filename = "", colname = "MOS Score"):
if (filename):
if len(filename.split('_')) == 4:
log_date = filename.split('_')[3][:10] #YYYY-MM-DD
else:
# use today
log_date = date.today().strftime("%Y-%m-%d")
with open(filename, 'r') as fp:
line = fp.readline()
with open(filename + ".csv", 'w') as emtpy_csv:
pass
prevline = ""
cnt = 1
RX = False
latency = ""
jitter = ""
loss = ""
previous_pkt_loss = 0
real_time_pkt_size = 0
KBps = 0
loss_time_list = []
current_loss = 0
current_loss_col = []
current_latency = []
current_jitter = []
col = []
col2 = []
col3 = []
col4 = []
timestamp = 0
last_total_pkt = 0
current_total_pkt = 0
measure_count = 0
short_call_MOS = 0
short_call_MOS_col = []
culmnulative_short_call_loss = 0
culmnulative_short_call_latency = []
culmnulative_short_call_jitter = []
while line:
if ("pjsua_app_common.c !" in line):
log_time = line.split()[0]
time_struct = time.strptime(log_date + "-" + log_time, "%Y-%m-%d-%H:%M:%S.%f")
timestamp = time.mktime(time_struct) + float('0.' + log_time.split('.')[-1])
if ("Call time:" in line):
row = ""
latency = ""
jitter = ""
loss = ""
if ("RX pt" in line):
RX = True
if ("TX " in line):
RX = False
if RX:
if ("total " in line):
split_line = line.split()
if ("Kpkt" in line):
total_pkts = float(split_line[1][0:-4]) * 1000
current_total_pkt = total_pkts
else:
total_pkts = float(split_line[1][0:-3])
current_total_pkt = total_pkts
recv = split_line[2]
if ("KB" in recv):
recv_size = float(recv[0:-2])
else:
recv_size = float(recv[0:-2]) * 1000
real_time_pkt_size = recv_size / total_pkts
KBps_string = split_line[6][5:9]
if KBps_string[3] == "K":
KBps = float(KBps_string[0:3])
else:
KBps = float(KBps_string)
if ("pkt loss" in line):
split_line = line.split()
loss = split_line[2][1:-3]
col2.append(str(loss))
raw_loss = int(split_line[1][5::])
if (raw_loss != previous_pkt_loss):
net_loss = raw_loss - previous_pkt_loss
current_loss = net_loss
current_loss_col.append(str(net_loss))
loss_time = net_loss * real_time_pkt_size / KBps
previous_pkt_loss = raw_loss
else:
loss_time = 0
current_loss = 0
current_loss_col.append("0")
loss_time_list.append(str(loss_time))
if ("jitter" in line):
split_line = line.split()
jitter = split_line[3]
current_jitter.append(str(split_line[5]))
culmnulative_short_call_jitter.append(float(split_line[5]))
col3.append(str(jitter))
if ("RTT" in line):
split_line = line.split()
latency = split_line[4]
col4.append(str(latency))
current_latency.append(str(split_line[6]))
culmnulative_short_call_latency.append(float(split_line[6]))
mos_value = mos.calculate_mos(latency, jitter, loss, timestamp)
col.append(str(mos_value))
if (measure_count == 0):
short_call_MOS = mos_value
elif measure_count % 100 == 0:
total_pkts = current_total_pkt - last_total_pkt
short_call_loss = culmnulative_short_call_loss / total_pkts
short_call_MOS = mos.calculate_mos(st.mean(culmnulative_short_call_latency), st.mean(culmnulative_short_call_jitter), short_call_loss, timestamp)
short_call_MOS_col.append(str(short_call_MOS))
culmnulative_short_call_loss = 0
culmnulative_short_call_latency = []
culmnulative_short_call_jitter = []
else:
total_pkts = current_total_pkt - last_total_pkt
short_call_loss = culmnulative_short_call_loss / total_pkts
short_call_MOS = mos.calculate_mos(st.mean(culmnulative_short_call_latency), st.mean(culmnulative_short_call_jitter), short_call_loss, timestamp)
short_call_MOS_col.append(str(short_call_MOS))
culmnulative_short_call_loss += current_loss
short_call_MOS_col.append(str(short_call_MOS))
measure_count += 1
prevline = line
line = fp.readline()
cnt += 1
try:
csv_input =
|
pd.read_csv(filename + ".csv")
|
pandas.read_csv
|
"""
Two way BANOVA
"""
from __future__ import division
import numpy as np
import pymc3 as pm
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
from scipy.stats import norm
from theano import tensor as tt
# THE DATA.
# Specify data source:
data_source = ["QianS2007" , "Salary" , "Random" , "Ex19.3"][1]
# Load the data:
if data_source == "QianS2007":
data_record = pd.read_csv("QianS2007SeaweedData.txt")
# Logistic transform the COVER value:
# Used by Appendix 3 of QianS2007 to replicate Ramsey and Schafer (2002).
data_record['COVER'] = -np.log((100/data_record['COVER']) -1)
y = data_record['COVER'].values
x1 = pd.Categorical(data_record['TREAT']).codes
x1names = data_record['TREAT'].values
x2 = pd.Categorical(data_record['BLOCK']).codes
x2names = data_record['BLOCK'].values
Ntotal = len(y)
Nx1Lvl = len(set(x1))
Nx2Lvl = len(set(x2))
x1contrastDict = {'f_Effect':[1/2, -1/2, 0, 1/2, -1/2, 0],
'F_Effect':[0, 1/2, -1/2, 0, 1/2, -1/2],
'L_Effect':[1/3, 1/3, 1/3, -1/3, -1/3, -1/3 ]}
x2contrastDict = None # np.zeros(Nx2Lvl)
x1x2contrastDict = None # np.zeros(Nx1Lvl*Nx2Lvl, Nx1Lvl)
if data_source == "Salary":
data_record = pd.read_csv("Salary.csv")
y = data_record['Salary']
x1 = pd.Categorical(data_record['Org']).codes
x1names = data_record['Org'].unique()
x1names.sort()
x2 = pd.Categorical(data_record['Post']).codes
x2names = data_record['Post'].unique()
x2names.sort()
Ntotal = len(y)
Nx1Lvl = len(set(x1))
Nx2Lvl = len(set(x2))
x1contrastDict = {'BFINvCEDP':[1, -1, 0, 0],
'CEDPvTHTR':[0, 1, 0, -1]}
x2contrastDict = {'FT1vFT2':[1, -1, 0],
'FT2vFT3':[0,1,-1]}
x1x2contrastDict = {'CHEMvTHTRxFT1vFT3':np.outer([0, 0, 1, -1], [1,0,-1]),
'BFINvOTHxFT1vOTH':np.outer([1, -1/3, -1/3, -1/3], [1, -1/2, -1/2])}
if data_source == "Random":
np.random.seed(47405)
ysdtrue = 3
a0true = 100
a1true = np.array([2, 0, -2]) # sum to zero
a2true = np.array([3, 1, -1, -3]) # sum to zero
a1a2true = np.array([[1,-1,0, 0], [-1,1,0,0], [0,0,0,0]])
npercell = 8
index = np.arange(len(a1true)*len(a2true)*npercell)
data_record = pd.DataFrame(index=index, columns=["y","x1","x2"])
rowidx = 0
for x1idx in range(0, len(a1true)):
for x2idx in range(0, len(a2true)):
for subjidx in range(0, npercell):
data_record['x1'][rowidx] = x1idx
data_record['x2'][rowidx] = x2idx
data_record['y'][rowidx] = float(a0true + a1true[x1idx] + a2true[x2idx]
+ a1a2true[x1idx, x2idx] + norm.rvs(loc=0, scale=ysdtrue, size=1)[0])
rowidx += 1
y = data_record['y']
x1 =
|
pd.Categorical(data_record['x1'])
|
pandas.Categorical
|
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
import yfinance as yf
def register_callbacks(dashapp):
@dashapp.callback(
Output('main_chart', 'figure'),
[Input('drop_select', 'value'),
Input('time_period_select', 'value'),
Input('linearpercent', 'value'),
Input('prepost', 'value')])
def update_chart(ticker, time_period, lin_per, pre_post):
d = {'1d': '1m', '5d': '1m', '1mo': '5m'}
df = pd.DataFrame(columns = ['Close', 'period_open_price', 'Symbol', 'percent_delta'])
for t in ticker:
output = pd.DataFrame(yf.Ticker(t).history(period = time_period, interval = d.get(time_period, '1d'), prepost = pre_post))
output['Symbol'] = t
output['period_open_price'] = output['Close'][0]
output['percent_delta'] = round((((output['Close'] - output['period_open_price']) / output['period_open_price']) * 100), 2)
output = output.loc[:, ['Close', 'period_open_price', 'Symbol', 'percent_delta']]
df =
|
pd.concat([df, output])
|
pandas.concat
|
from django.shortcuts import render
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from core.models import *
import pandas as pd
class PanelPageView(LoginRequiredMixin, TemplateView):
template_name = 'panel.html'
def get_context_data(self, **kwargs):
context = super(PanelPageView, self).get_context_data(**kwargs)
# qs = list(ClientModel.objects.values_list('name__name', 'task__task',
# 'subtask__subtask', 'time_spent',
# 'date_added',
# 'dec_name').order_by(
# '-date_added'))
qs = list(ClientModel.objects.values_list('name__name', 'task__task',
'subtask__subtask', 'time_spent'))
df = pd.DataFrame(qs, columns=['name', 'task', 'subtask', 'time_spent'])
df.to_csv('./media/recent.csv', sep=';', index=None)
df =
|
pd.read_csv('./media/recent.csv', sep=';')
|
pandas.read_csv
|
import data_manipulation as dm
import pandas as pd
def parse_google_takeout_semantic_location_history(json_filepath):
"""
Parse Google takeout semantic location history
Parameters
----------
json_filepath: str
Json file path
Returns
-------
csv
Return 2 csv, placeVisit & activitySegment
"""
df =
|
pd.read_json(json_filepath)
|
pandas.read_json
|
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in
|
range(index.nlevels)
|
pandas.compat.range
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
<<<<<<< HEAD
=======
>>>>>>> ad5dca32c9a52a3a44a630f68b04e0f42af10dd2
@author: talkhiami
"""
import unittest
import pandas as pd
from code.feature_extraction.hashtag_count import HashtagCount
class HashtagCountTest(unittest.TestCase):
def setUp(self):
self.INPUT_COLUMN = "input"
self.hashtag_count = HashtagCount(self.INPUT_COLUMN)
def test_input_columns(self):
self.assertListEqual(self.hashtag_count._input_columns, [self.INPUT_COLUMN])
#testing
def test_exist_hashtag_count(self):
input_text = "['analytics', 'ai', 'datascience', 'bigdata', 'dataethics', 'datascientists', 'orms', 'machinelearning']"
input_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import gc
import argparse
import pandas as pd
import numpy as np
import keras.backend as K
from keras.preprocessing.image import Iterator
from src.data.category_idx import map_categories
from keras.layers.embeddings import Embedding
from keras.layers import Flatten
from keras.layers import Input
from keras.layers import merge
from keras.models import Model
from keras.initializers import Ones
from keras.optimizers import Adam
from keras.models import load_model
from keras.constraints import non_neg
N_CATEGORIES = 5270
CATEGORIES_SPLIT = 2000
MODEL_FILE = 'model.h5'
VALID_PREDICTIONS_FILE = 'valid_predictions.csv'
TOP_K = 10
class SpecialIterator(Iterator):
def __init__(self, images, categories, n_models, batch_size=32, shuffle=True, seed=None):
self.x = images
self.products = images[['product_id', 'img_idx']].drop_duplicates().sort_values(['product_id', 'img_idx'])
self.categories = categories.sort_index()
self.num_classes = N_CATEGORIES
self.samples = self.products.shape[0]
self.n_models = n_models
super(SpecialIterator, self).__init__(self.samples, batch_size, shuffle, seed)
def next(self):
index_array = next(self.index_generator)[0]
prods = self.products.iloc[index_array]
pd = {(row.product_id, row.img_idx): i for i, row in enumerate(prods.itertuples())}
cats = self.categories.loc[prods.product_id]
images = prods.merge(self.x, on=['product_id', 'img_idx'], how='left')
p = np.zeros((len(index_array), self.num_classes, self.n_models), dtype=np.float32)
for row in images.itertuples():
p[pd[(row.product_id, row.img_idx)], row.category_idx, row.model] = 0 if np.isnan(row.prob) else row.prob
return [np.repeat(np.arange(self.n_models).reshape(1, self.n_models), len(index_array), axis=0),
p[:, :CATEGORIES_SPLIT, :], p[:, CATEGORIES_SPLIT:, :]], cats['category_idx'].as_matrix()
def train_ensemble_nn(preds_csv_files, prod_info_csv, category_idx_csv, model_dir, lr, seed, batch_size, epochs):
prod_info = pd.read_csv(prod_info_csv)
category_idx = pd.read_csv(category_idx_csv)
all_preds = []
model_inx = {}
for i, csv in enumerate(preds_csv_files):
preds = pd.read_csv(csv)
preds['model'] = i
model_inx[i] = csv
all_preds.append(preds)
print('Assigned indexes to models: ', model_inx)
all_preds = pd.concat(all_preds)
n_models = len(preds_csv_files)
categories = prod_info[prod_info.product_id.isin(all_preds.product_id.unique())][['product_id', 'category_id']]
categories['category_idx'] = map_categories(category_idx, categories.category_id)
categories = categories[['product_id', 'category_idx']]
categories = categories.set_index('product_id')
it = SpecialIterator(all_preds, categories, n_models, batch_size=batch_size, seed=seed, shuffle=True)
model_file = os.path.join(model_dir, MODEL_FILE)
if os.path.exists(model_file):
model = load_model(model_file)
else:
model_inp = Input(shape=(n_models,), dtype='int32')
preds_cat1_inp = Input((CATEGORIES_SPLIT, n_models))
preds_cat2_inp = Input((N_CATEGORIES - CATEGORIES_SPLIT, n_models))
mul_cat1 = Embedding(n_models, 1, input_length=n_models, embeddings_initializer=Ones(),
embeddings_constraint=non_neg())(model_inp)
mul_cat1 = Flatten()(mul_cat1)
mul_cat2 = Embedding(n_models, 1, input_length=n_models, embeddings_initializer=Ones(),
embeddings_constraint=non_neg())(model_inp)
mul_cat2 = Flatten()(mul_cat2)
def op(x):
z_left = x[0].dimshuffle(1, 0, 2) * x[1]
z_right = x[2].dimshuffle(1, 0, 2) * x[3]
z = K.concatenate([z_left, z_right], axis=0)
v = K.sum(z, axis=-1)
p = K.sum(v, axis=-2)
return (v / p).dimshuffle(1, 0)
x = merge([preds_cat1_inp, mul_cat1, preds_cat2_inp, mul_cat2], mode=op, output_shape=(N_CATEGORIES,))
model = Model([model_inp, preds_cat1_inp, preds_cat2_inp], x)
np.random.seed(seed)
model.compile(optimizer=Adam(lr=lr), loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
model.fit_generator(it, steps_per_epoch=it.samples / it.batch_size, epochs=epochs)
print('First {} categories model weights:'.format(CATEGORIES_SPLIT))
print(model.get_layer('embedding_1').get_weights())
print('Left categories model weights:'.format(CATEGORIES_SPLIT))
print(model.get_layer('embedding_2').get_weights())
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
model.save(os.path.join(model_dir, MODEL_FILE))
def predict_valid(preds_csv_files, prod_info_csv, category_idx_csv, model_dir, batch_size):
model_file = os.path.join(model_dir, MODEL_FILE)
if os.path.exists(model_file):
model = load_model(model_file)
else:
raise ValueError("Model doesn't exist")
prod_info = pd.read_csv(prod_info_csv)
category_idx = pd.read_csv(category_idx_csv)
all_preds = []
model_inx = {}
for i, csv in enumerate(preds_csv_files):
preds =
|
pd.read_csv(csv)
|
pandas.read_csv
|
import cuenote
import pandas
import io
import os
import sys
os.system(f"{sys.executable} -m pip install -U pytd==1.2.0 td-client")
import pytd
TD_API_KEY = os.environ.get("td_apikey")
TD_API_SERVER = os.environ.get("td_endpoint")
TD_DATABASE = os.environ.get("td_database")
days_refresh_logs = 14
def main():
# Create a TD client instance.
client = pytd.Client(
apikey=TD_API_KEY, endpoint=TD_API_SERVER, database=TD_DATABASE
)
# Download log files from Cuenote, then upload CSVs to TD
delivery_ids = client.query(
"select delivery_id from jobinfo where TD_INTERVAL(TD_TIME_PARSE(delivery_time), '-{days_refresh_logs}d')".format(days_refresh_logs=days_refresh_logs))
for delivery_id in delivery_ids["data"]:
# Delivery Log
result = cuenote.call_api(
"delivery/{delivery_id}/log".format(delivery_id=delivery_id[0]), {"with_delivlog": "true"})
df = pandas.read_csv(io.BytesIO(result.content),
header=0, encoding="UTF-8")
df["delivery_id"] = delivery_id[0]
df.columns = [
"email_address_id",
"email_address",
"status_updated_at",
"status_loc",
"status",
"mx_host_name",
"connection_ip_port",
"smtp_status_updated_at",
"smtp_status_loc",
"smtp_status",
"smtp_response",
"bounce_received_at",
"bounce_type",
"bounce_summary",
"bounce_content",
"bounce_address",
"bounce_log_id",
"unreachable_at",
"all_retries",
"first_retry",
"last_retry",
"retry_count",
"last_retry_status_loc",
"last_retry_status",
"last_retry_response",
"device",
"content",
"additional_information",
"delivery_id"
]
df["status_updated_at"] = pandas.to_datetime(
df["status_updated_at"]
)
df["smtp_status_updated_at"] = pandas.to_datetime(
df["smtp_status_updated_at"]
)
df["bounce_received_at"] = pandas.to_datetime(
df["bounce_received_at"]
)
df["unreachable_at"] = pandas.to_datetime(df["unreachable_at"])
if len(df) > 0:
client.load_table_from_dataframe(
df, "log_deliv_stg", writer="bulk_import", if_exists="append"
)
# Click Log
result = cuenote.call_api("delivery/{delivery_id}/log/click".format(
delivery_id=delivery_id[0]), {"with_delivlog": "true"})
df = pandas.read_csv(io.BytesIO(result.content),
header=0, encoding="UTF-8")
df["delivery_id"] = delivery_id[0]
df.columns = [
"clicked_at",
"clicked_url",
"email_address",
"type",
"click_count",
"device",
"content",
"additional_information",
"delivery_id"
]
df["clicked_at"] =
|
pandas.to_datetime(df["clicked_at"])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
import json
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pymongo import ASCENDING, DESCENDING
from src.data import conn
from src.data.setting import TRADE_BEGIN_DATE
from src.data.future.setting import NAME2CODE_MAP, COLUMNS_MAP
from src.data.future.utils import get_download_file_index, move_data_files, get_exist_files, \
split_symbol
from src.data.setting import RAW_HQ_DIR, INSTRUMENT_TYPE
from src.util import get_post_text, get_html_text
from log import LogHandler
# TIME_WAITING = 1
log = LogHandler('data.log')
# ----------------------------------download data from web-----------------
def is_data_empty(data):
"""
判断数据是否存在
:param data: pd.DataFrame or str
:return: True 数据不存在
"""
if isinstance(data, pd.DataFrame):
return data.empty
elif not isinstance(data, str):
return True
elif re.search('doctype', data, re.I):
return True
elif len(data) < 100:
return True
else:
return False
def download_cffex_hq_by_date(date: datetime, category=0):
"""
获取中国金融期货交易所交易所日交易数据 datetime(2010, 4, 30)
http://www.cffex.com.cn/sj/hqsj/rtj/201903/13/20190313_1.csv
没有期权,预留接口
:param date: datetime
:param category: 行情类型, 0期货 或 1期权
:return str
"""
assert date <= datetime.today()
assert category in [0, 1]
url_template = 'http://www.cffex.com.cn/fzjy/mrhq/{}/{}/{}_1.csv'
url = url_template.format(date.strftime('%Y%m'), date.strftime('%d'), date.strftime('%Y%m%d'))
return get_html_text(url)
def download_czce_hq_by_date(date: datetime, category=0):
"""
获取郑州商品交易所日交易数据
http://www.czce.com.cn/cn/DFSStaticFiles/Future/2019/20190314/FutureDataDaily.txt
http://www.czce.com.cn/cn/DFSStaticFiles/Future/2019/20190314/FutureDataDaily.htm
期权 datetime(2017, 4, 19)
http://www.czce.com.cn/cn/DFSStaticFiles/Option/2018/20180816/OptionDataDaily.htm
http://www.czce.com.cn/cn/DFSStaticFiles/Option/2017/20171109/OptionDataDaily.htm
datetime(2015, 10, 8)
http://www.czce.com.cn/cn/exchange/2015/datadaily/20150821.htm
http://www.czce.com.cn/cn/exchange/2015/datadaily/20150930.txt
datetime(2010, 8, 24)
http://www.czce.com.cn/cn/exchange/jyxx/hq/hq20100806.html
datetime(2005, 4, 29)
:param date: datetime
:param category: 行情类型, 0期货 或 1期权
:return pd.DataFrame
"""
assert date <= datetime.today()
assert category in [0, 1]
index = 0
ret = pd.DataFrame()
if date > datetime(2015, 10, 7):
template = ['http://www.czce.com.cn/cn/DFSStaticFiles/Future/{}/{}/FutureDataDaily.htm',
'http://www.czce.com.cn/cn/DFSStaticFiles/Option/{}/{}/OptionDataDaily.htm']
url_template = template[category]
url = url_template.format(date.year, date.strftime('%Y%m%d'))
elif date > datetime(2010, 8, 23):
url_template = 'http://www.czce.com.cn/cn/exchange/{}/datadaily/{}.htm'
url = url_template.format(date.year, date.strftime('%Y%m%d'))
index = 3
elif date > datetime(2005, 4, 28):
url_template = 'http://www.czce.com.cn/cn/exchange/jyxx/hq/hq{}.html'
url = url_template.format(date.strftime('%Y%m%d'))
index = 1
else:
return pd.DataFrame()
text = get_html_text(url)
if is_data_empty(text):
return ret
tables = pd.read_html(text, header=0)
df = tables[index]
bflag = df.empty or len(df.columns) < 10 or len(df.columns) > 20
if not bflag:
return df
# 处理特殊的例外情况 2017-12-27 index=3
for df in tables:
bflag = df.empty or len(df.columns) < 10 or len(df.columns) > 20
if not bflag:
return df
return ret
def download_shfe_hq_by_date(date: datetime, category=0):
"""
获取上海商品交易所日交易数据 20020108/20090105 期货数据起始日(还可以往前取) 2018921 期权数据起始日
http://www.shfe.com.cn/data/dailydata/kx/kx20190318.dat
http://www.shfe.com.cn/data/dailydata/option/kx/kx20190315.dat
:param date: datetime
:param category: 行情类型, 0期货 或 1期权
:return str
"""
assert date <= datetime.today()
assert category in [0, 1]
url_template = ['http://www.shfe.com.cn/data/dailydata/kx/kx{}.dat',
'http://www.shfe.com.cn/data/dailydata/option/kx/kx{}.dat']
url = url_template[category].format(date.strftime('%Y%m%d'))
return get_html_text(url)
def download_dce_hq_by_date(date: datetime, code='all', category=0):
"""
获取大连商品交易所日交易数据 20050104 期货数据起始日 2017331 期权数据起始日
url = 'http://www.dce.com.cn//publicweb/quotesdata/dayQuotesCh.html'
url = 'http://www.dce.com.cn/publicweb/quotesdata/exportDayQuotesChData.html'
form.data =
dayQuotes.variety : all/y/m
dayQuotes.trade_type : 0/1 0:期货 1:期权
year : 2019
month : 2 实际月份-1
day : 14
exportFlag : txt excel:xls
:param code: 商品代码
:param date: datetime
:param category: 行情类型, 0期货 或 1期权
:return str
"""
assert date <= datetime.today()
assert category in [0, 1]
url = 'http://www.dce.com.cn/publicweb/quotesdata/exportDayQuotesChData.html'
form_data = {'dayQuotes.variety': code,
'dayQuotes.trade_type': category,
'year': date.year,
'month': date.month - 1,
'day': date.day,
'exportFlag': 'txt'}
return get_post_text(url, form_data)
def download_hq_by_date(date, file_path, market='dce', category=0):
"""
从交易所网站获取某天的所有行情数据,存盘并返回pd.DataFrame
:param date: 需要数据的日期
:param file_path: 存储文件的地址
:param market: 交易所代码
:param category: 0:期货 1:期权
:return: pd.DataFrame
"""
assert category in [0, 1]
assert market in ['dce', 'czce', 'shfe', 'cffex']
get_exchange_hq_func = {'cffex': download_cffex_hq_by_date,
'czce': download_czce_hq_by_date,
'shfe': download_shfe_hq_by_date,
'dce': download_dce_hq_by_date}
data = get_exchange_hq_func[market](date, category=category)
date_str = date.strftime('%Y%m%d')
if is_data_empty(data):
log.warning('{} {} data:{} is not downloaded! '.format(market, date_str, category))
# time.sleep(np.random.rand() * TIME_WAITING * 3)
return False
if market == 'czce':
data.to_csv(file_path, encoding='gb2312')
else:
file_path.write_text(data)
return True
def download_hq_by_dates(market, start, category=0):
"""
根据日期连续下载交易所日交易数据
:param start:
:param market:
:param category: 行情类型, 0期货 或 1期权
:return True False: 说明不用下载数据
"""
assert category in [0, 1]
assert market in ['dce', 'czce', 'shfe', 'cffex']
target = RAW_HQ_DIR[category] / market
file_index = get_download_file_index(target, start=start)
if file_index.empty:
return False
for dt in file_index:
print('{} downloading {} {} hq:{} data!'.format(
datetime.now().strftime('%H:%M:%S'), market, dt.strftime('%Y-%m-%d'), category))
date_str = dt.strftime('%Y%m%d')
file_path = target / '{}.day'.format(date_str)
download_hq_by_date(dt, file_path, market, category)
# time.sleep(np.random.rand() * TIME_WAITING)
return True
def convert_deliver(symbol, date):
"""
从合约代码中提取交割月份数据
:param symbol:
:param date:
:return:
"""
if not isinstance(symbol, str):
symbol = str(symbol).strip()
m = re.search('\d{3,4}$', symbol.strip())[0]
if len(m) == 4:
return m
if m[0] == '0':
y = date.year
y = int(y - np.floor(y / 100) * 100 + 1)
m = str(y) + m[-2:]
else:
m = date.strftime('%y')[0] + m
return m
def data_type_conversion(data, category, columns, names, date, market):
"""
数据类型转换,将str转换为数字类型
:param market:
:param date:
:param names:
:param columns:
:param category: 行情类型
:param data:
:return:
"""
hq_df = data.copy()
# 截取所需字段,并将字段名转换为统一标准
hq_df = hq_df[columns]
hq_df.columns = names
hq_df = hq_df.dropna()
hq_df['datetime'] = date
hq_df['market'] = market
hq_df['open'] = pd.to_numeric(hq_df['open'], downcast='float')
hq_df['close'] = pd.to_numeric(hq_df['close'], downcast='float')
hq_df['high'] = pd.to_numeric(hq_df['high'], downcast='float')
hq_df['low'] = pd.to_numeric(hq_df['low'], downcast='float')
hq_df['settle'] = pd.to_numeric(hq_df['settle'], downcast='float')
hq_df['volume'] = pd.to_numeric(hq_df['volume'], downcast='integer')
hq_df['openInt'] = pd.to_numeric(hq_df['openInt'], downcast='integer')
# 行权量和delta只有期权独有
if category == 1:
hq_df['exevolume'] = pd.to_numeric(hq_df['exevolume'], downcast='integer')
hq_df['delta'] = pd.to_numeric(hq_df['delta'], downcast='float')
return hq_df
def transfer_dce_future_hq(date, file_path, columns_map):
"""
将每天的数据统一标准
:return: pd.DataFrame 统一标准后的数据
"""
ret = pd.DataFrame()
hq_df = pd.read_csv(file_path, encoding='gb2312', header=0, index_col=False,
sep='\s+', thousands=',')
bflag = hq_df.empty or len(hq_df.columns) < len(columns_map) or len(hq_df.columns) > 20
if bflag: # 原始数据文件为null,不重新下载,需要再运行一次程序
print('dce future hq data:{} is not exist, please rerun program!'.format(file_path.name))
return ret
hq_df = data_type_conversion(hq_df, 0, list(columns_map.values()), list(columns_map.keys()), date, 'dce')
# 商品字母缩写转换
hq_df['code'] = hq_df['code'].transform(lambda x: NAME2CODE_MAP['exchange'][x])
# 构建symbol
hq_df['symbol'] = hq_df['code'] + hq_df['symbol'].transform(lambda x: convert_deliver(x, date))
hq_df['amount'] = pd.to_numeric(hq_df['amount'], downcast='float') * 10000
return hq_df
def transfer_czce_future_hq(date, file_path, columns_map):
"""
将每天的数据统一标准
:return: pd.DataFrame 统一标准后的数据
"""
ret = pd.DataFrame()
hq_df = pd.read_csv(file_path, encoding='gb2312', header=0, index_col=False)
bflag = hq_df.empty or len(hq_df.columns) < len(columns_map) or len(hq_df.columns) > 20
if bflag: # 原始数据文件为null,不重新下载,需要再运行一次程序
print('dce future hq data:{} is not exist, please rerun program!'.format(file_path.name))
return ret
columns_map = columns_map.copy()
if date < datetime(2010, 8, 24):
columns_map['volume'] = columns_map['volume'][0]
else:
columns_map['volume'] = columns_map['volume'][1]
# 商品字母缩写转换
symbol_name = columns_map['symbol']
split_re, index = split_symbol('^[a-zA-Z]{1,2}', hq_df[symbol_name])
hq_df = hq_df if all(index) else hq_df[index] # 删除非数据行
hq_df = data_type_conversion(hq_df, 0, list(columns_map.values()), list(columns_map.keys()), date, 'czce')
# TODO 确认不需要keys,NAME2CODE_MAP['exchange'].keys()
hq_df['code'] = split_re.transform(
lambda x: NAME2CODE_MAP['exchange'][x[0]] if x[0] in NAME2CODE_MAP['exchange'].keys() else x[0])
# 构建symbol
hq_df['symbol'] = hq_df['code'] + hq_df['symbol'].transform(lambda x: convert_deliver(x, date))
hq_df['amount'] =
|
pd.to_numeric(hq_df['amount'], downcast='float')
|
pandas.to_numeric
|
#coding=utf-8
import os
import shutil
import numpy as np
import pandas as pd
class CSZLUtils(object):
"""description of class"""
def mkdir(path):
"""生成文件夹路径"""
# 去除首位空格
path=path.strip()
# 去除尾部 \ 符号
path=path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists=os.path.exists(path)
# 判断结果
if not isExists:
os.makedirs(path)
print (path+' 创建成功')
return True
else:
# 如果目录存在则不创建,并提示目录已存在
print (path+' 目录已存在')
return False
def copyfile(srcfile,dstfile):
"""复制文件"""
if not os.path.isfile(srcfile):
print ("%s not exist!"%(srcfile))
else:
fpath,fname=os.path.split(dstfile) #分离文件名和路径
if not os.path.exists(fpath):
os.makedirs(fpath) #创建路径
shutil.copyfile(srcfile,dstfile) #复制文件
print ("copy %s -> %s"%( srcfile,dstfile))
def reduce_mem_usage(df):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
#这里可能会误将大数字定义为cat所以先只指定ts_code
if(col=='ts_code'):
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def csvmode():
return False
def Loaddata(path):
if CSZLUtils.csvmode():
changepath=CSZLUtils.pathchange(path)
df=
|
pd.read_csv(changepath,index_col=0,header=0)
|
pandas.read_csv
|
import collections
import logging
import os
import pprint
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as cartif
import core.signal_processing as csigna
import helpers.git as git
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class Test__compute_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_df()
output_df = csigna._compute_lagged_cumsum(input_df, 3)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 3, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test_lag_1(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 1, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_df() -> pd.DataFrame:
df = pd.DataFrame([list(range(10))] * 3).T
df[1] = df[0] + 1
df[2] = df[0] + 2
df.index = pd.date_range(start="2010-01-01", periods=10)
df.rename(columns=lambda x: f"col_{x}", inplace=True)
return df
class Test_correlate_with_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1", "y2"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1"], x_vars=["x"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_arma_df(seed: int = 0) -> pd.DataFrame:
arma_process = cartif.ArmaProcess([], [])
date_range = {"start": "2010-01-01", "periods": 40, "freq": "M"}
srs1 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed
).rename("x")
srs2 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 1
).rename("y1")
srs3 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 2
).rename("y2")
return pd.concat([srs1, srs2, srs3], axis=1)
class Test_accumulate(hut.TestCase):
def test1(self) -> None:
srs = pd.Series(
range(0, 20), index=pd.date_range("2010-01-01", periods=20)
)
actual = csigna.accumulate(srs, num_steps=1)
expected = srs.astype(float)
pd.testing.assert_series_equal(actual, expected)
def test2(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], index=idx)
actual = csigna.accumulate(srs, num_steps=2)
expected = pd.Series([np.nan, 1, 3, 5, 7, 9, 11, 13, 15, 17], index=idx)
pd.testing.assert_series_equal(actual, expected)
def test3(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[np.nan, np.nan, 3, 6, 9, 12, 15, 18, 21, 24], index=idx
)
pd.testing.assert_series_equal(actual, expected)
def test4(self) -> None:
srs = pd.Series(
np.random.randn(100), index=pd.date_range("2010-01-01", periods=100)
)
output = pd.concat([srs, csigna.accumulate(srs, num_steps=5)], axis=1)
output.columns = ["series", "series_accumulated"]
self.check_string(hut.convert_df_to_string(output, index=True))
def test_long_step1(self) -> None:
idx = pd.date_range("2010-01-01", periods=3)
srs = pd.Series([1, 2, 3], index=idx)
actual = csigna.accumulate(srs, num_steps=5)
expected = pd.Series([np.nan, np.nan, np.nan], index=idx)
pd.testing.assert_series_equal(actual, expected)
def test_nans1(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, np.nan, 2, 3, 4, np.nan, 5, 6, 7], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
9,
np.nan,
np.nan,
np.nan,
18,
],
index=idx,
)
pd.testing.assert_series_equal(actual, expected)
def test_nans2(self) -> None:
idx = pd.date_range("2010-01-01", periods=6)
srs = pd.Series([np.nan, np.nan, np.nan, 2, 3, 4], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan, 9], index=idx
)
pd.testing.assert_series_equal(actual, expected)
def test_nans3(self) -> None:
idx = pd.date_range("2010-01-01", periods=6)
srs = pd.Series([np.nan, np.nan, np.nan, 2, 3, 4], index=idx)
actual = csigna.accumulate(srs, num_steps=2)
expected = pd.Series([np.nan, np.nan, np.nan, np.nan, 5, 7], index=idx)
pd.testing.assert_series_equal(actual, expected)
class Test_get_symmetric_equisized_bins(hut.TestCase):
def test_zero_in_bin_interior_false(self) -> None:
input_ = pd.Series([-1, 3])
expected = np.array([-3, -2, -1, 0, 1, 2, 3])
actual = csigna.get_symmetric_equisized_bins(input_, 1)
np.testing.assert_array_equal(actual, expected)
def test_zero_in_bin_interior_true(self) -> None:
input_ = pd.Series([-1, 3])
expected = np.array([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5])
actual = csigna.get_symmetric_equisized_bins(input_, 1, True)
np.testing.assert_array_equal(actual, expected)
def test_infs(self) -> None:
data = pd.Series([-1, np.inf, -np.inf, 3])
expected = np.array([-4, -2, 0, 2, 4])
actual = csigna.get_symmetric_equisized_bins(data, 2)
np.testing.assert_array_equal(actual, expected)
class Test_compute_rolling_zscore1(hut.TestCase):
def test_default_values1(self) -> None:
"""
Test with default parameters on a heaviside series.
"""
heaviside = cartif.get_heaviside(-10, 252, 1, 1).rename("input")
actual = csigna.compute_rolling_zscore(heaviside, tau=40).rename("output")
output_df = pd.concat([heaviside, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_default_values2(self) -> None:
"""
Test for tau with default parameters on a heaviside series.
"""
heaviside = cartif.get_heaviside(-10, 252, 1, 1).rename("input")
actual = csigna.compute_rolling_zscore(heaviside, tau=20).rename("output")
output_df = pd.concat([heaviside, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_clean1(self) -> None:
"""
Test on a clean arma series.
"""
series = self._get_arma_series(seed=1)
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_nan1(self) -> None:
"""
Test on an arma series with leading NaNs.
"""
series = self._get_arma_series(seed=1)
series[:5] = np.nan
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_nan2(self) -> None:
"""
Test on an arma series with interspersed NaNs.
"""
series = self._get_arma_series(seed=1)
series[5:10] = np.nan
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df =
|
pd.concat([series, actual], axis=1)
|
pandas.concat
|
import numpy as np
import pandas as pd
import os
from predict_by_model import *
def GetStrn(strg):
s1 = strg.split(";")[-1]
s2 = "_".join(s1.split("__")[1:])
s3 = s2.split(".")[0]
return s3
if __name__ == "__main__":
try:
os.mkdir("LorikeetExperiments")
except:
pass
birdData = pd.read_excel("LorikeetData.xlsx")
Experiments = birdData.iloc[2:].rename({"Taxonomy":"SampleID",'Unnamed: 1':"LorikeetID",'Unnamed: 2':"Date",'Unnamed: 3':"Age",'Unnamed: 4':"Sex",'Unnamed: 5':"Species",'Unnamed: 6':"Enteritis"},axis = 1)
predictions =
|
pd.DataFrame(index = Experiments["SampleID"], columns = ["LorikeetID","Date","Age","Sex","Species","Enteritis","Invasion","CperfDelta","Resistance","CperfResistance","CperfPromotion","InitialCperfGrowth","SpeciesFound","SpeciesListed","ReadProportion"])
|
pandas.DataFrame
|
from math import sqrt
import numpy as np
import pandas as pd
pd.set_option('display.float_format', lambda x: '%.5f' % x) # pandas
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100)
|
pd.set_option('display.width', 600)
|
pandas.set_option
|
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pandas import testing as tm
import palletjack
class TestGSheetsLoader:
def test_load_specific_worksheet_into_dataframe_by_id(self, mocker):
sheet_mock = mocker.Mock()
client_mock = mocker.Mock()
client_mock.open_by_key.return_value = sheet_mock
gsheet_loader_mock = mocker.Mock()
gsheet_loader_mock.gsheets_client = client_mock
palletjack.GSheetLoader.load_specific_worksheet_into_dataframe(gsheet_loader_mock, 'foobar', 5)
sheet_mock.worksheet.assert_called_once_with('index', 5)
sheet_mock.worksheet_by_title.assert_not_called()
def test_load_specific_worksheet_into_dataframe_by_title(self, mocker):
sheet_mock = mocker.Mock()
client_mock = mocker.Mock()
client_mock.open_by_key.return_value = sheet_mock
gsheet_loader_mock = mocker.Mock()
gsheet_loader_mock.gsheets_client = client_mock
palletjack.GSheetLoader.load_specific_worksheet_into_dataframe(
gsheet_loader_mock, 'foobar', '2015', by_title=True
)
sheet_mock.worksheet.assert_not_called
sheet_mock.worksheet_by_title.assert_called_once_with('2015')
def test_load_all_worksheets_into_dataframes_single_worksheet(self, mocker):
worksheet_mock = mocker.Mock()
worksheet_mock.title = 'ws1'
worksheet_mock.get_as_df.return_value = 'df1'
sheet_mock = mocker.Mock()
sheet_mock.worksheets.return_value = [worksheet_mock]
client_mock = mocker.Mock()
client_mock.open_by_key.return_value = sheet_mock
gsheet_loader_mock = mocker.Mock()
gsheet_loader_mock.gsheets_client = client_mock
df_dict = palletjack.GSheetLoader.load_all_worksheets_into_dataframes(gsheet_loader_mock, 'foobar')
test_dict = {'ws1': 'df1'}
assert df_dict == test_dict
def test_load_all_worksheets_into_dataframes_multiple_worksheets(self, mocker):
ws1_mock = mocker.Mock()
ws1_mock.title = 'ws1'
ws1_mock.get_as_df.return_value = 'df1'
ws2_mock = mocker.Mock()
ws2_mock.title = 'ws2'
ws2_mock.get_as_df.return_value = 'df2'
sheet_mock = mocker.Mock()
sheet_mock.worksheets.return_value = [ws1_mock, ws2_mock]
client_mock = mocker.Mock()
client_mock.open_by_key.return_value = sheet_mock
gsheet_loader_mock = mocker.Mock()
gsheet_loader_mock.gsheets_client = client_mock
df_dict = palletjack.GSheetLoader.load_all_worksheets_into_dataframes(gsheet_loader_mock, 'foobar')
test_dict = {'ws1': 'df1', 'ws2': 'df2'}
assert df_dict == test_dict
def test_combine_worksheets_into_single_dataframe_combines_properly(self, mocker):
df1 = pd.DataFrame({
'foo': [1, 2],
'bar': [3, 4],
})
df2 = pd.DataFrame({
'foo': [10, 11],
'bar': [12, 13],
})
df_dict = {'df1': df1, 'df2': df2}
combined_df = palletjack.GSheetLoader.combine_worksheets_into_single_dataframe(mocker.Mock(), df_dict)
test_df = pd.DataFrame({
'worksheet': ['df1', 'df1', 'df2', 'df2'],
'foo': [1, 2, 10, 11],
'bar': [3, 4, 12, 13],
})
test_df.index = [0, 1, 0, 1]
test_df.index.name = 'row'
|
tm.assert_frame_equal(combined_df, test_df)
|
pandas.testing.assert_frame_equal
|
# dedicated to the penguin
import pandas as pd
import numpy as np
import dask.array.gufunc
import dask.array as da
from dask.diagnostics import ProgressBar
from dask.distributed import Client
from . import io, nexus, tools
from .stream_parser import StreamParser
# from .map_image import MapImage
import h5py
from typing import Union, Dict, Optional, List, Tuple, Callable
import copy
from collections import defaultdict
from warnings import warn, catch_warnings, simplefilter
from tables import NaturalNameWarning
from concurrent.futures import ProcessPoolExecutor, wait, FIRST_EXCEPTION
from contextlib import contextmanager
import os
# top-level helper functions for chunking operations
# ...to be refactored into tools or compute later...
def _check_commensurate(init: Union[list, tuple, np.ndarray], final: Union[list, tuple, np.ndarray],
equal_size: bool = False):
'''check if blocks with sizes in init are commensurate with (i.e. have boundaries aligned with)
blocks in final, and (optionally) if final blocks in final are equally-sized within each block in initial.
Useful to check if a dask rechunk operation will act across boundaries of existing chunks,
which is often something you'll want to try to avoid (and might be a sign that something is going wrong).
Blocks in final must hence be smaller than those in init, i.e. len(final) >= len(init),
and of course: sum(final) == sum(init).
Returns whether the blocks are commensurate, and (if so), the number of
final blocks in each of the initial block.'''
#TODO consider using numba jit
final_inv = list(final)[::-1] # invert for faster popping
init = list(init)
if sum(init) != sum(final):
raise ValueError('Sum of init and final must be identical.')
blocksize = []
if equal_size:
for s0 in init:
# iterate over initial blocks
n_final_in_initial = s0 // final_inv[-1]
for _ in range(n_final_in_initial):
# iterate over final blocks within initial
if (s0 % final_inv.pop()) != 0:
return False, None
blocksize.append(n_final_in_initial)
else:
for s0 in init:
# iterate over initial blocks
# n_rem = copy.copy(s0)
n_rem = s0
b_num = 0
while n_rem != 0:
n_rem -= final_inv.pop()
b_num += 1
if n_rem < 0:
# incommensurate block found!
return False, None
blocksize.append(b_num)
assert len(final_inv) == 0
return True, blocksize
def _agg_groups(stack: np.ndarray, labels: Union[np.ndarray, list, tuple], agg_function: callable, *args, **kwargs):
'''Apply aggregating function to a numpy stack group-by-group, with groups defined by unique labels,
and return the concatenated results; i.e., the length of the result along the aggregation
axis equals the number of unique labels.
'''
res_list = []
labels = labels.squeeze()
for lbl in np.unique(labels):
res_list.append(agg_function(stack[labels == lbl,...], *args, **kwargs))
return np.concatenate(res_list)
def _map_sub_blocks(stack: da.Array, labels: Union[np.ndarray, list, tuple], func: callable, aggregating: bool = True,
*args, **kwargs):
'''Wrapper for da.map_blocks, which instead of applying the function chunk-by-chunk can apply it also to sub-groups
within each chunk, as identified by unique labels (e.g. integers). Useful if you want to use large chunks to have fast computation, but
want to apply the function to smaller blocks. Obvious example: you want to sum frames from a diffraction
movie, but have many diffraction movies stored in each single chunk, as otherwise the chunk number would be too large.
The input stack must be chunked along its 0th axis only, and len(labels) must equal the height of the stack.
If aggregating=True, func is assumed to reduce the sub-block height to 1 (like summing all stack frames), whereas
aggregating=False assumes func to leave the sub-block sizes as is (e.g. for cumulative summing).'''
chunked_labels = da.from_array(labels.reshape((-1,1,1)), chunks=(stack.chunks[0],-1,-1), name='sub_block_label')
cc_out = _check_commensurate(stack.chunks[0], np.unique(labels, return_counts=True)[1], equal_size=False)
if not cc_out[0]:
raise ValueError('Mismatched chunk structure: mapping groups are not within single chunk each')
if 'chunks' in kwargs:
final_chunks = kwargs['chunks']
else:
final_chunks = (tuple(cc_out[1]), ) + stack.chunks[1:] if aggregating else stack.chunks
return da.map_blocks(_agg_groups, stack, chunked_labels,
agg_function=func, chunks=final_chunks, *args, **kwargs)
class Dataset:
def __init__(self):
self._shots_changed = False
self._peaks_changed = False
self._predict_changed = False
self._features_changed = False
# HDF5 file addresses
self.data_pattern: str = '/%/data'
'''Path to data stacks in HDF5 files. % can be used as placeholder (as in CrystFEL). Default /%/data'''
self.shots_pattern: str = '/%/shots'
'''Path to shot table data in HDF5 files. % can be used as placeholder (as in CrystFEL). Default /%/shots'''
self._fallback_shots_pattern: str = '/%/data/shots'
self.result_pattern: str = '/%/results'
'''Path to result data (peaks, predictions) in HDF5 files. % can be used as placeholder (as in CrystFEL).
Default /%/results. **Note that storing results in this way is discouraged and deprecated.**'''
self.map_pattern: str = '/%/map'
'''Path to map and feature data in HDF5 files. % can be used as placeholder (as in CrystFEL). Default /%/map'''
self.instrument_pattern: str = '/%/instrument'
'''Path to instrument metadat in HDF5 files. % can be used as placeholder (as in CrystFEL). Default /%/instrument'''
self.parallel_io: bool = True
'''Toggles if parallel I/O is attempted for datasets spanning many files. Note that this is independent
from `dask.distributed`-based parallelization as in `store_stack_fast`. Default True, which is overriden
if the Dataset comprises a single file only.'''
# internal stuff
self._file_handles = {}
self._stacks = {}
self._shot_id_cols = ['file', 'Event']
self._feature_id_cols = ['crystal_id', 'region', 'sample']
self._diff_stack_label = ''
# tables: accessed via properties!
self._shots = pd.DataFrame(columns=self._shot_id_cols + self._feature_id_cols + ['selected'])
self._peaks = pd.DataFrame(columns=self._shot_id_cols)
self._predict =
|
pd.DataFrame(columns=self._shot_id_cols)
|
pandas.DataFrame
|
import datetime as dt
from math import sqrt
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.graphics.tsaplots as tpl
import tqdm
from bokeh.io import export_png, export_svgs
from bokeh.models import DatetimeTickFormatter, Range1d
from bokeh.plotting import figure
from pytz import timezone
from statsmodels.tsa.tsatools import detrend
import mise.data as data
from mise.constants import SEOUL_STATIONS
seoultz = timezone("Asia/Seoul")
HOURLY_DATA_PATH = "/input/python/input_seoul_imputed_hourly_pandas.csv"
DAILY_DATA_PATH = "/input/python/input_seoul_imputed_daily_pandas.csv"
def stats_ou(station_name="종로구"):
"""Model by OU process
Args:
station_name (str, optional): [description]. Defaults to "종로구".
"""
print("Data loading start...")
_df_h = data.load_imputed([1], filepath=HOURLY_DATA_PATH)
df_h = _df_h.query('stationCode == "' + str(SEOUL_STATIONS[station_name]) + '"')
if (
station_name == "종로구"
and not Path("/input/python/input_jongno_imputed_hourly_pandas.csv").is_file()
):
# load imputed result
df_h.to_csv("/input/python/input_jongno_imputed_hourly_pandas.csv")
print("Data loading complete")
targets = ["PM10", "PM25"]
intT = {"PM10": 19.01883611948326, "PM25": 20.4090132600871}
sample_size = 48
output_size = 24
train_fdate = dt.datetime(2008, 1, 5, 0).astimezone(seoultz)
train_tdate = dt.datetime(2018, 12, 31, 23).astimezone(seoultz)
test_fdate = dt.datetime(2019, 1, 1, 0).astimezone(seoultz)
test_tdate = dt.datetime(2020, 10, 31, 23).astimezone(seoultz)
# consective dates between train and test
assert train_tdate + dt.timedelta(hours=1) == test_fdate
for target in targets:
output_dir = Path("/mnt/data/OU/" + station_name + "/" + target + "/")
png_dir = output_dir / Path("png/")
svg_dir = output_dir / Path("svg/")
data_dir = output_dir / Path("csv/")
Path.mkdir(data_dir, parents=True, exist_ok=True)
Path.mkdir(png_dir, parents=True, exist_ok=True)
Path.mkdir(svg_dir, parents=True, exist_ok=True)
# numeric_pipeline_X = Pipeline(
# [('seasonalitydecompositor',
# data.SeasonalityDecompositor_AWH(smoothing=True, smoothingFrac=0.05)),
# ('standardtransformer', data.StandardScalerWrapper(scaler=StandardScaler()))])
# scaler = ColumnTransformer(
# transformers=[
# ('num', numeric_pipeline_X, [target])])
# prepare dataset
train_set = data.UnivariateRNNMeanSeasonalityDataset(
station_name=station_name,
target=target,
filepath=HOURLY_DATA_PATH,
features=[
"SO2",
"CO",
"O3",
"NO2",
"PM10",
"PM25",
"temp",
"u",
"v",
"pres",
"humid",
"prep",
"snow",
],
features_1=[
"SO2",
"CO",
"O3",
"NO2",
"PM10",
"PM25",
"temp",
"v",
"pres",
"humid",
"prep",
"snow",
],
features_2=["u"],
fdate=train_fdate,
tdate=train_tdate,
sample_size=sample_size,
output_size=output_size,
train_valid_ratio=0.8,
)
train_set.preprocess()
test_set = data.UnivariateRNNMeanSeasonalityDataset(
station_name=station_name,
target=target,
filepath=HOURLY_DATA_PATH,
features=[
"SO2",
"CO",
"O3",
"NO2",
"PM10",
"PM25",
"temp",
"u",
"v",
"pres",
"humid",
"prep",
"snow",
],
features_1=[
"SO2",
"CO",
"O3",
"NO2",
"PM10",
"PM25",
"temp",
"v",
"pres",
"humid",
"prep",
"snow",
],
features_2=["u"],
fdate=test_fdate,
tdate=test_tdate,
sample_size=sample_size,
output_size=output_size,
scaler_X=train_set.scaler_X,
scaler_Y=train_set.scaler_Y,
)
test_set.transform()
test_set.plot_seasonality(data_dir, png_dir, svg_dir)
df_test = test_set.ys.loc[test_fdate:test_tdate, :].copy()
df_test_org = test_set.ys_raw.loc[test_fdate:test_tdate, :].copy()
print("Simulate by Ornstein–Uhlenbeck process for " + target + "...")
def run_OU(_intT):
"""Run OU process
Args:
_intT (float): Time Scale
"""
df_obs = mw_df(df_test_org, output_size, test_fdate, test_tdate)
dates = df_obs.index
df_sim = sim_OU(
df_test,
dates,
target,
np.mean(df_test.to_numpy()),
np.std(df_test.to_numpy()),
_intT[target],
test_set.scaler_Y,
output_size,
)
assert df_obs.shape == df_sim.shape
# join df
plot_OU(
df_sim,
df_obs,
target,
data_dir,
png_dir,
svg_dir,
test_fdate,
test_tdate,
station_name,
output_size,
)
# save to csv
csv_fname = "df_test_obs.csv"
df_obs.to_csv(data_dir / csv_fname)
csv_fname = "df_test_sim.csv"
df_sim.to_csv(data_dir / csv_fname)
run_OU(intT)
def mw_df(df_org, output_size, fdate, tdate):
"""
moving window
"""
cols = [str(i) for i in range(output_size)]
df_obs = pd.DataFrame(columns=cols)
df = df_org.loc[fdate:tdate, :]
cols = [str(t) for t in range(output_size)]
df_obs =
|
pd.DataFrame(columns=cols)
|
pandas.DataFrame
|
from django.shortcuts import render,redirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.generic.edit import FormView
from django.views.generic import TemplateView
from django.contrib.auth import login,logout,get_user_model,authenticate
from django.contrib.auth.models import Group,User
from apps.login.models import Patient as patientUser
from django.http import HttpResponseRedirect
import requests as requis
from .forms import FormularioLogin
import fitbit
import django.http.response
import pandas as pd
from django.http import JsonResponse
from fontawesome.fields import IconField
import datetime
import sys
import threading
from datetime import timedelta, date
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
import math
import pandas as pd #tratamiento de datos
import seaborn as sns
from sklearn.svm import OneClassSVM
import random
import warnings
warnings.filterwarnings('ignore')
sys.path.insert(1, '/python_fitbit_wapi/')
from .python_fitbit_wapi import gather_keys_oauth2 as Oauth2
# Create your views here.
auth2_client = fitbit.api.Fitbit
userpatient = User
urlCSV =""
fit_statsHR=[]
fit_statsSteps=[]
contadorHR=0
contadorAnomaliasHRreposo=0
contadorAnomaliasHRejercicio=0
contadorAnomaliasStesp=0
accuracyHRreposo=False
accuracyHRejercicio=False
accuracyActividadFisica=False
especificidadHRreposo=False
especificidadHRejercicio=False
especificidadActividadFisica=False
profileFitUsr=
|
pd.DataFrame({})
|
pandas.DataFrame
|
from __future__ import unicode_literals, division, print_function
import numpy as np
import pandas as pd
from pymatgen.core import Structure, Lattice
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.local_env import VoronoiNN, JmolNN, CrystalNN
from matminer.featurizers.site import AGNIFingerprints, \
OPSiteFingerprint, CrystalNNFingerprint, \
EwaldSiteEnergy, \
VoronoiFingerprint, IntersticeDistribution, ChemEnvSiteFingerprint, \
CoordinationNumber, ChemicalSRO, GaussianSymmFunc, \
GeneralizedRadialDistributionFunction, AngularFourierSeries, \
LocalPropertyDifference, SOAP, BondOrientationalParameter, \
SiteElementalProperty, AverageBondLength, AverageBondAngle
from matminer.featurizers.deprecated import CrystalSiteFingerprint
from matminer.featurizers.utils.grdf import Gaussian
class FingerprintTests(PymatgenTest):
def setUp(self):
self.sc = Structure(
Lattice([[3.52, 0, 0], [0, 3.52, 0], [0, 0, 3.52]]),
["Al", ],
[[0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[0.45, 0.5, 0.5], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False)
self.b1 = Structure(
Lattice([[0,1,1],[1,0,1],[1,1,0]]),
["H", "He"], [[0,0,0],[0.5,0.5,0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False)
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.ni3al = Structure(
Lattice([[3.52, 0, 0], [0, 3.52, 0], [0, 0, 3.52]]),
["Al", ] + ["Ni"] * 3,
[[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
def test_simple_cubic(self):
"""Test with an easy structure"""
# Make sure direction-dependent fingerprints are zero
agni = AGNIFingerprints(directions=['x', 'y', 'z'])
features = agni.featurize(self.sc, 0)
self.assertEqual(8 * 3, len(features))
self.assertEqual(8 * 3, len(set(agni.feature_labels())))
self.assertArrayAlmostEqual([0, ] * 24, features)
# Compute the "atomic fingerprints"
agni.directions = [None]
agni.cutoff = 3.75 # To only get 6 neighbors to deal with
features = agni.featurize(self.sc, 0)
self.assertEqual(8, len(features))
self.assertEqual(8, len(set(agni.feature_labels())))
self.assertEqual(0.8, agni.etas[0])
self.assertAlmostEqual(6 * np.exp(-(3.52 / 0.8) ** 2) * 0.5 * (np.cos(np.pi * 3.52 / 3.75) + 1), features[0])
self.assertAlmostEqual(6 * np.exp(-(3.52 / 16) ** 2) * 0.5 * (np.cos(np.pi * 3.52 / 3.75) + 1), features[-1])
# Test that passing etas to constructor works
new_etas = np.logspace(-4, 2, 6)
agni = AGNIFingerprints(directions=['x', 'y', 'z'], etas=new_etas)
self.assertArrayAlmostEqual(new_etas, agni.etas)
def test_off_center_cscl(self):
agni = AGNIFingerprints(directions=[None, 'x', 'y', 'z'], cutoff=4)
# Compute the features on both sites
site1 = agni.featurize(self.cscl, 0)
site2 = agni.featurize(self.cscl, 1)
# The atomic attributes should be equal
self.assertArrayAlmostEqual(site1[:8], site2[:8])
# The direction-dependent ones should be equal and opposite in sign
self.assertArrayAlmostEqual(-1 * site1[8:], site2[8:])
# Make sure the site-ones are as expected.
right_dist = 4.209 * np.sqrt(0.45 ** 2 + 2 * 0.5 ** 2)
right_xdist = 4.209 * 0.45
left_dist = 4.209 * np.sqrt(0.55 ** 2 + 2 * 0.5 ** 2)
left_xdist = 4.209 * 0.55
self.assertAlmostEqual(4 * (
right_xdist / right_dist * np.exp(-(right_dist / 0.8) ** 2) * 0.5 * (np.cos(np.pi * right_dist / 4) + 1) -
left_xdist / left_dist * np.exp(-(left_dist / 0.8) ** 2) * 0.5 * (np.cos(np.pi * left_dist / 4) + 1)),
site1[8])
def test_dataframe(self):
data = pd.DataFrame({'strc': [self.cscl, self.cscl, self.sc], 'site': [0, 1, 0]})
agni = AGNIFingerprints()
agni.featurize_dataframe(data, ['strc', 'site'])
def test_op_site_fingerprint(self):
opsf = OPSiteFingerprint()
l = opsf.feature_labels()
t = ['sgl_bd CN_1', 'L-shaped CN_2', 'water-like CN_2', \
'bent 120 degrees CN_2', 'bent 150 degrees CN_2', \
'linear CN_2', 'trigonal planar CN_3', \
'trigonal non-coplanar CN_3', 'T-shaped CN_3', \
'square co-planar CN_4', 'tetrahedral CN_4', \
'rectangular see-saw-like CN_4', 'see-saw-like CN_4', \
'trigonal pyramidal CN_4', 'pentagonal planar CN_5', \
'square pyramidal CN_5', 'trigonal bipyramidal CN_5', \
'hexagonal planar CN_6', 'octahedral CN_6', \
'pentagonal pyramidal CN_6', 'hexagonal pyramidal CN_7', \
'pentagonal bipyramidal CN_7', 'body-centered cubic CN_8', \
'hexagonal bipyramidal CN_8', 'q2 CN_9', 'q4 CN_9', 'q6 CN_9', \
'q2 CN_10', 'q4 CN_10', 'q6 CN_10', \
'q2 CN_11', 'q4 CN_11', 'q6 CN_11', \
'cuboctahedral CN_12', 'q2 CN_12', 'q4 CN_12', 'q6 CN_12']
for i in range(len(l)):
self.assertEqual(l[i], t[i])
ops = opsf.featurize(self.sc, 0)
self.assertEqual(len(ops), 37)
self.assertAlmostEqual(ops[opsf.feature_labels().index(
'octahedral CN_6')], 0.9995, places=7)
ops = opsf.featurize(self.cscl, 0)
self.assertAlmostEqual(ops[opsf.feature_labels().index(
'body-centered cubic CN_8')], 0.8955, places=7)
opsf = OPSiteFingerprint(dist_exp=0)
ops = opsf.featurize(self.cscl, 0)
self.assertAlmostEqual(ops[opsf.feature_labels().index(
'body-centered cubic CN_8')], 0.9555, places=7)
# The following test aims at ensuring the copying of the OP dictionaries work.
opsfp = OPSiteFingerprint()
cnnfp = CrystalNNFingerprint.from_preset('ops')
self.assertEqual(len([1 for l in opsfp.feature_labels() if l.split()[0] == 'wt']), 0)
def test_crystal_site_fingerprint(self):
with self.assertWarns(FutureWarning):
csf = CrystalSiteFingerprint.from_preset('ops')
l = csf.feature_labels()
t = ['wt CN_1', 'wt CN_2', 'L-shaped CN_2', 'water-like CN_2',
'bent 120 degrees CN_2', 'bent 150 degrees CN_2', 'linear CN_2',
'wt CN_3', 'trigonal planar CN_3', 'trigonal non-coplanar CN_3',
'T-shaped CN_3', 'wt CN_4', 'square co-planar CN_4',
'tetrahedral CN_4', 'rectangular see-saw-like CN_4',
'see-saw-like CN_4', 'trigonal pyramidal CN_4', 'wt CN_5',
'pentagonal planar CN_5', 'square pyramidal CN_5',
'trigonal bipyramidal CN_5', 'wt CN_6', 'hexagonal planar CN_6',
'octahedral CN_6', 'pentagonal pyramidal CN_6', 'wt CN_7',
'hexagonal pyramidal CN_7', 'pentagonal bipyramidal CN_7',
'wt CN_8', 'body-centered cubic CN_8',
'hexagonal bipyramidal CN_8', 'wt CN_9', 'q2 CN_9', 'q4 CN_9',
'q6 CN_9', 'wt CN_10', 'q2 CN_10', 'q4 CN_10', 'q6 CN_10',
'wt CN_11', 'q2 CN_11', 'q4 CN_11', 'q6 CN_11', 'wt CN_12',
'cuboctahedral CN_12', 'q2 CN_12', 'q4 CN_12', 'q6 CN_12']
for i in range(len(l)):
self.assertEqual(l[i], t[i])
ops = csf.featurize(self.sc, 0)
self.assertEqual(len(ops), 48)
self.assertAlmostEqual(ops[csf.feature_labels().index(
'wt CN_6')], 1, places=4)
self.assertAlmostEqual(ops[csf.feature_labels().index(
'octahedral CN_6')], 1, places=4)
ops = csf.featurize(self.cscl, 0)
self.assertAlmostEqual(ops[csf.feature_labels().index(
'wt CN_8')], 0.5575257, places=4)
self.assertAlmostEqual(ops[csf.feature_labels().index(
'body-centered cubic CN_8')], 0.5329344, places=4)
def test_crystal_nn_fingerprint(self):
cnnfp = CrystalNNFingerprint.from_preset(
'ops', distance_cutoffs=None, x_diff_weight=None)
l = cnnfp.feature_labels()
t = ['wt CN_1', 'sgl_bd CN_1', 'wt CN_2', 'L-shaped CN_2',
'water-like CN_2', 'bent 120 degrees CN_2',
'bent 150 degrees CN_2', 'linear CN_2', 'wt CN_3',
'trigonal planar CN_3', 'trigonal non-coplanar CN_3',
'T-shaped CN_3', 'wt CN_4', 'square co-planar CN_4',
'tetrahedral CN_4', 'rectangular see-saw-like CN_4',
'see-saw-like CN_4', 'trigonal pyramidal CN_4', 'wt CN_5',
'pentagonal planar CN_5', 'square pyramidal CN_5',
'trigonal bipyramidal CN_5', 'wt CN_6', 'hexagonal planar CN_6',
'octahedral CN_6', 'pentagonal pyramidal CN_6', 'wt CN_7',
'hexagonal pyramidal CN_7', 'pentagonal bipyramidal CN_7',
'wt CN_8', 'body-centered cubic CN_8',
'hexagonal bipyramidal CN_8', 'wt CN_9', 'q2 CN_9', 'q4 CN_9',
'q6 CN_9', 'wt CN_10', 'q2 CN_10', 'q4 CN_10', 'q6 CN_10',
'wt CN_11', 'q2 CN_11', 'q4 CN_11', 'q6 CN_11', 'wt CN_12',
'cuboctahedral CN_12', 'q2 CN_12', 'q4 CN_12', 'q6 CN_12',
'wt CN_13', 'wt CN_14', 'wt CN_15', 'wt CN_16', 'wt CN_17',
'wt CN_18', 'wt CN_19', 'wt CN_20', 'wt CN_21', 'wt CN_22',
'wt CN_23', 'wt CN_24']
for i in range(len(l)):
self.assertEqual(l[i], t[i])
ops = cnnfp.featurize(self.sc, 0)
self.assertEqual(len(ops), 61)
self.assertAlmostEqual(ops[cnnfp.feature_labels().index(
'wt CN_6')], 1, places=7)
self.assertAlmostEqual(ops[cnnfp.feature_labels().index(
'octahedral CN_6')], 1, places=7)
ops = cnnfp.featurize(self.cscl, 0)
self.assertAlmostEqual(ops[cnnfp.feature_labels().index(
'wt CN_8')], 0.498099, places=3)
self.assertAlmostEqual(ops[cnnfp.feature_labels().index(
'body-centered cubic CN_8')], 0.47611, places=3)
op_types = {6: ["wt", "oct_max"], 8: ["wt", "bcc"]}
cnnfp = CrystalNNFingerprint(
op_types, distance_cutoffs=None, \
x_diff_weight=None)
labels = ['wt CN_6', 'oct_max CN_6', \
'wt CN_8', 'bcc CN_8']
for l1, l2 in zip(cnnfp.feature_labels(), labels):
self.assertEqual(l1, l2)
feats = cnnfp.featurize(self.sc, 0)
self.assertEqual(len(feats), 4)
chem_info = {"mass": {"Al": 26.9, "Cs+": 132.9,"Cl-": 35.4}, \
"Pauling scale": {"Al": 1.61, "Cs+": 0.79, "Cl-": 3.16}}
cnnchemfp = CrystalNNFingerprint(
op_types, chem_info=chem_info, distance_cutoffs=None, \
x_diff_weight=None)
labels = labels + ['mass local diff', \
'Pauling scale local diff']
for l1, l2 in zip(cnnchemfp.feature_labels(), labels):
self.assertEqual(l1, l2)
feats = cnnchemfp.featurize(self.sc, 0)
self.assertEqual(len(feats), 6)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'wt CN_6')], 1, places=7)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'oct_max CN_6')], 1, places=7)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'mass local diff')], 0, places=7)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'Pauling scale local diff')], 0, places=7)
feats = cnnchemfp.featurize(self.cscl, 0)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'bcc CN_8')], 0.4761107, places=3)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'mass local diff')], 97.5, places=3)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'Pauling scale local diff')], -2.37, places=3)
def test_chemenv_site_fingerprint(self):
cefp = ChemEnvSiteFingerprint.from_preset('multi_weights')
l = cefp.feature_labels()
cevals = cefp.featurize(self.sc, 0)
self.assertEqual(len(cevals), 66)
self.assertAlmostEqual(cevals[l.index('O:6')], 1, places=7)
self.assertAlmostEqual(cevals[l.index('C:8')], 0, places=7)
cevals = cefp.featurize(self.cscl, 0)
self.assertAlmostEqual(cevals[l.index('C:8')], 0.9953721, places=7)
self.assertAlmostEqual(cevals[l.index('O:6')], 0, places=7)
cefp = ChemEnvSiteFingerprint.from_preset('simple')
l = cefp.feature_labels()
cevals = cefp.featurize(self.sc, 0)
self.assertEqual(len(cevals), 66)
self.assertAlmostEqual(cevals[l.index('O:6')], 1, places=7)
self.assertAlmostEqual(cevals[l.index('C:8')], 0, places=7)
cevals = cefp.featurize(self.cscl, 0)
self.assertAlmostEqual(cevals[l.index('C:8')], 0.9953721, places=7)
self.assertAlmostEqual(cevals[l.index('O:6')], 0, places=7)
def test_voronoifingerprint(self):
df_sc= pd.DataFrame({'struct': [self.sc], 'site': [0]})
vorofp = VoronoiFingerprint(use_symm_weights=True)
vorofps = vorofp.featurize_dataframe(df_sc, ['struct', 'site'])
self.assertAlmostEqual(vorofps['Voro_index_3'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_4'][0], 6.0)
self.assertAlmostEqual(vorofps['Voro_index_5'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_6'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_7'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_8'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_9'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_10'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_3'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_4'][0], 1.0)
self.assertAlmostEqual(vorofps['Symmetry_index_5'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_6'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_7'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_8'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_9'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_10'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_3'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_4'][0], 1.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_5'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_6'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_7'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_8'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_9'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_10'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_vol_sum'][0], 43.614208)
self.assertAlmostEqual(vorofps['Voro_area_sum'][0], 74.3424)
self.assertAlmostEqual(vorofps['Voro_vol_mean'][0], 7.269034667)
self.assertAlmostEqual(vorofps['Voro_vol_std_dev'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_vol_minimum'][0], 7.269034667)
self.assertAlmostEqual(vorofps['Voro_vol_maximum'][0], 7.269034667)
self.assertAlmostEqual(vorofps['Voro_area_mean'][0], 12.3904)
self.assertAlmostEqual(vorofps['Voro_area_std_dev'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_area_minimum'][0], 12.3904)
self.assertAlmostEqual(vorofps['Voro_area_maximum'][0], 12.3904)
self.assertAlmostEqual(vorofps['Voro_dist_mean'][0], 3.52)
self.assertAlmostEqual(vorofps['Voro_dist_std_dev'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_dist_minimum'][0], 3.52)
self.assertAlmostEqual(vorofps['Voro_dist_maximum'][0], 3.52)
def test_interstice_distribution_of_crystal(self):
bcc_li = Structure(Lattice([[3.51, 0, 0], [0, 3.51, 0], [0, 0, 3.51]]),
["Li"] * 2, [[0, 0, 0], [0.5, 0.5, 0.5]])
df_bcc_li= pd.DataFrame({'struct': [bcc_li], 'site': [1]})
interstice_distribution = IntersticeDistribution()
intersticefp = interstice_distribution.featurize_dataframe(
df_bcc_li, ['struct', 'site'])
self.assertAlmostEqual(intersticefp['Interstice_vol_mean'][0], 0.32, 2)
self.assertAlmostEqual(intersticefp['Interstice_vol_std_dev'][0], 0)
self.assertAlmostEqual(intersticefp['Interstice_vol_minimum'][0], 0.32, 2)
self.assertAlmostEqual(intersticefp['Interstice_vol_maximum'][0], 0.32, 2)
self.assertAlmostEqual(intersticefp['Interstice_area_mean'][0], 0.16682, 5)
self.assertAlmostEqual(intersticefp['Interstice_area_std_dev'][0], 0)
self.assertAlmostEqual(intersticefp['Interstice_area_minimum'][0], 0.16682, 5)
self.assertAlmostEqual(intersticefp['Interstice_area_maximum'][0], 0.16682, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_mean'][0], 0.06621, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_std_dev'][0], 0.07655, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_minimum'][0], 0, 3)
self.assertAlmostEqual(intersticefp['Interstice_dist_maximum'][0], 0.15461, 5)
def test_interstice_distribution_of_glass(self):
cuzr_glass = Structure(Lattice([[25, 0, 0], [0, 25, 0], [0, 0, 25]]),
["Cu", "Cu", "Cu", "Cu", "Cu", "Zr", "Cu", "Zr",
"Cu", "Zr", "Cu", "Zr", "Cu", "Cu"],
[[11.81159679, 16.49480537, 21.69139442],
[11.16777208, 17.87850033, 18.57877144],
[12.22394796, 15.83218325, 19.37763412],
[13.07053548, 14.34025424, 21.77557646],
[10.78147725, 19.61647494, 20.77595531],
[10.87541011, 14.65986432, 23.61517624],
[12.76631002, 18.41479521, 20.46717947],
[14.63911675, 16.47487037, 20.52671362],
[14.2470256, 18.44215167, 22.56257566],
[9.38050168, 16.87974592, 20.51885879],
[10.66332986, 14.43900833, 20.545186],
[11.57096832, 18.79848982, 23.26073408],
[13.27048138, 16.38613795, 23.59697472],
[9.55774984, 17.09220537, 23.1856528]],
coords_are_cartesian=True)
df_glass=
|
pd.DataFrame({'struct': [cuzr_glass], 'site': [0]})
|
pandas.DataFrame
|
from scipy.stats import binom
import numpy as np
from joblib import delayed,Parallel
from scipy.optimize import minimize
from scipy.optimize import Bounds
from scipy.stats import norm
def Q(n,p,epsilon):
'''
Q function of non-asymptotic method
'''
k1 = np.floor(n*p+epsilon*n)
k2 = np.ceil(n*p-epsilon*n)-1
return binom.cdf(k1,n,p)-binom.cdf(k2,n,p)
def min_n(p,alpha,epsilon,expand_int=1000):
'''
Function M of non-asymptotic menthod
Find the optimal value of n restricted to Q>=1-a
'''
n = 1
q = Q(n,p,epsilon)
i = 0
condition = q<1-alpha
if not condition:
return n
while condition:
r = range(1+i*expand_int,1+(i+1)*expand_int)
r = np.array(r)
q_list = np.apply_along_axis(lambda n: Q(n,p,epsilon),0,r)
condition = all(q_list<1-alpha)
i += 1
return r[q_list>=1-alpha].min()
def sample_size_prop(alpha,epsilon):
'''
Function S of non-asymptotic method
'''
min_n_ = lambda p: -min_n(p,alpha,epsilon)
bounds = Bounds([0],[1])
res = minimize(min_n_,0.5,method='trust-constr',bounds=bounds)
return -res.fun
def sample_size_prop_normal(alpha,epsilon,p=0.5):
'''
Optimal sample size of asymptotic method
'''
z = norm.ppf(1-alpha/2)
n = ((z/epsilon)**2)*p*(1-p)
n = int(np.ceil(n))
return n
def simulated_guarantee(n,epsilon,n_sim=1000,p_true=0.5):
'''
Simulation guarantee for an specific value of p
'''
x = np.random.binomial(n=n,p=p_true,size=int(n_sim))
p_hat = x/n
return (np.abs(p_hat-p_true)<=epsilon).mean()
def general_simulated_guarantee(n,epsilon,n_sim=10000,p_min=0.001,p_max=0.999,n_ps=200,measure=np.min):
'''
Simulation guarantee for a set of values of p
'''
ps = np.linspace(p_min,p_max,n_ps)
g = Parallel(n_jobs=-1)(
delayed(simulated_guarantee)(n,epsilon,n_sim,p) for p in ps
)
return measure(g)
if __name__ == '__main__':
import pandas as pd
from itertools import product
from tabulate import tabulate
alpha_list = [0.05,0.02,0.01]
epsilon_list = [0.05,0.02,0.01]
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from .hume import Hume
import pandas as pd
import numpy as np
from io import BytesIO
class SciKitHume:
"Implements the sklearn interface for hume"
def __init__(self, image, params=None, target_label='target'):
self._hume = Hume(image, params=params)
self.target_label = target_label
def fit(self, X, y):
X = pd.DataFrame(X)
X[self.target_label] = y
data = pd.melt(X.reset_index(), id_vars="index")
self._hume = self._hume.fit(BytesIO(data.to_csv(header=None, index=None)),
target=self.target_label)
def predict(self, X):
data =
|
pd.DataFrame(X)
|
pandas.DataFrame
|
from pandas.compat import range, lrange
import numpy as np
import pandas as pd
import pandas.core.common as com
from pandas.core.frame import DataFrame
import pandas.core.nanops as nanops
def pivot_annual(series, freq=None):
"""
Group a series by years, taking leap years into account.
The output has as many rows as distinct years in the original series,
and as many columns as the length of a leap year in the units corresponding
to the original frequency (366 for daily frequency, 366*24 for hourly...).
The fist column of the output corresponds to Jan. 1st, 00:00:00,
while the last column corresponds to Dec, 31st, 23:59:59.
Entries corresponding to Feb. 29th are masked for non-leap years.
For example, if the initial series has a daily frequency, the 59th column
of the output always corresponds to Feb. 28th, the 61st column to Mar. 1st,
and the 60th column is masked for non-leap years.
With a hourly initial frequency, the (59*24)th column of the output always
correspond to Feb. 28th 23:00, the (61*24)th column to Mar. 1st, 00:00, and
the 24 columns between (59*24) and (61*24) are masked.
If the original frequency is less than daily, the output is equivalent to
``series.convert('A', func=None)``.
Parameters
----------
series : TimeSeries
freq : string or None, default None
Returns
-------
annual : DataFrame
"""
index = series.index
year = index.year
years =
|
nanops.unique1d(year)
|
pandas.core.nanops.unique1d
|
import numpy as np
def main():
import sys
import pandas as pd
if len(sys.argv)!=2:
print("ERROR! WRONG NUMBER OF PARAMETERS")
print("USAGES: remove-outlier <dataset>")
print("EXAMPLE: remove-outlier data.csv")
exit(1)
dataset = pd.read_csv(sys.argv[1]) # importing the dataset
new_dataset = remove_outlier(dataset)
new=
|
pd.DataFrame(new_dataset)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Scientific Computing and Visualization with Spyder
Created on Thu May 20 10:17:27 2021
@author: <NAME>
"""
# %% Import libraries
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as stats
import seaborn as sns
# %% Load raw data (parquet)
data = pd.read_parquet("parsed_data_public.parquet")
# %% Let's explore age
print(data.d_age.describe())
age = data.d_age.tolist()
# %% Save some variables and display them in the Variable Explorer
max_age = data.d_age.min()
min_age = data.d_age.max()
# %% Plot age with pandas
data.d_age.plot.hist(bins=25, alpha=0.5)
# %% Plot age with seaborn (and search for help from IPython Console)
sns.histplot(data.d_age, kde=True, bins=25)
plt.show()
# %% Plot age and mean
sns.histplot(data.d_age, kde=True, bins=25)
plt.xlabel('Age')
plt.axvline(data.d_age.mean(), color='k', linestyle='dashed', linewidth=1)
min_ylim, max_ylim = plt.ylim()
plt.text(data.d_age.mean()*1.1, max_ylim*0.9,
'Mean: {:.2f}'.format(data.d_age.mean()))
plt.show()
# %% Demographic variables list
demograph = [v for v in list(data.columns) if v.startswith("d_")]
# %% Cognitive ability questions
# Select the questions for the cognitive ability test (14 questions).
# Add the correct answers in a new column.
test_items = pd.read_csv("test_items.csv")
ca_test = data.copy() # Make a copy of the original dataframe
right_answers = []
for ID, ROW in test_items.iterrows():
right_answers.append(ROW.iloc[ROW["option_correct"] + 2])
test_items["right_answer"] = right_answers
for ID, ROW in test_items.iterrows():
QUESTION = "q" + str(ROW["ID"])
ANSWER = str(ROW["right_answer"])
try:
ca_test.dropna(subset=[QUESTION], inplace=True)
ca_test["resp_" + QUESTION] = ca_test.apply(lambda row: row[QUESTION] == ANSWER, axis=1)
except KeyError:
print(f"{QUESTION} not found.")
# The identification of some answers failed due to formal discrepancies.
ca_test.q18154 = pd.Series(ca_test.q18154, dtype="int")
ca_test.q18154 = pd.Series(ca_test.q18154, dtype="string")
ca_test.resp_q18154 = ca_test.apply(lambda row: row["q18154"] == "26", axis=1)
ca_test.q255 = pd.Series(ca_test.q255, dtype="int")
ca_test.q255 =
|
pd.Series(ca_test.q255, dtype="string")
|
pandas.Series
|
import pandas as pd
from datetime import datetime
import numpy as np
import csv
import sys
from os import path, listdir
from pathlib import Path
from progressbar import progressbar as pbar
df_results = pd.DataFrame(None)
na_values = [""]
number_of_files = len(listdir("../data/model_build_outputs/scores"))
print(number_of_files)
randomization_list = [2,3,4,5]
for i in pbar(range(number_of_files)):
try:
randomization = randomization_list[i]
df = pd.read_csv("../data/model_build_outputs/scores/df_results_prob_random_"+str(randomization)+".csv", na_values=na_values, keep_default_na=False)
df_results =
|
pd.concat([df], axis=0, ignore_index=True)
|
pandas.concat
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40":
|
pandas.StringDtype()
|
pandas.StringDtype
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 15 15:58:53 2022
@author: <NAME>
"""
# 1. Clean the functions
# 2. Make it production ready
# 3. Make it modular
# Evaluate_IV
# Is_Coarse_Class
# Coarse_Class
# Create_Dummies
import pandas as pd
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
def Evaluate_IV_new(inputs_,outputs_):
inputs_.reset_index(drop=True,inplace=True)
outputs_.reset_index(drop=True,inplace=True)
try:
assert (isinstance(inputs_,pd.core.series.Series) and
isinstance(outputs_,pd.core.series.Series)), 'ERROR: Must provide series input'
logging.info('SUCCESS: input type crieteria satisfied')
except AssertionError:
logging.error('ERROR: accepts only series inputs')
return 0
if(len(inputs_)!=len(outputs_)):
raise Exception('ERROR: input and output should be of same length input {} where as output {}'
.format(len(inputs_),len(outputs_)))
return 0
try:
assert isinstance (outputs_.tolist()[0],int), 'output should take 1\0 as values'
except (IndexError,AssertionError):
raise Exception('ERROR: Output is either empty or does not take 1 or 0')
return 0
try:
assert outputs_.sum()>0,'ERROR : output has no event'
except AssertionError:
raise Exception('NO EVENT CAPTURED: Output has no event')
logging.info('Calulating IV for {} '.format(inputs_.name))
dt_New = pd.concat([inputs_,outputs_],axis=1)
df_table = pd.DataFrame(dt_New.groupby([inputs_.name]).
agg({outputs_.name:['sum','count']})).reset_index(drop=False)
df_table.xs(outputs_.name,axis=1,drop_level=True).reset_index(drop=False,inplace=True)
df_table.columns=[inputs_.name,'goods','total']
df_table['bads']=df_table['total'] - df_table['goods']
df_table['total_goods'] = df_table.goods.sum()
df_table['total_bads'] = df_table.bads.sum()
df_table.loc[df_table.goods==0,'goods']=0.5
df_table.loc[df_table.bads==0,'bads']=0.5
df_table['perc_goods'] = df_table['goods']/df_table['total_goods']
df_table['perc_bads'] = df_table['bads']/df_table['total_bads']
df_table['perc_total'] = df_table['total']/df_table['total'].sum()
df_table['woe'] = df_table['perc_goods']/df_table['perc_bads']
df_table['woe'] = df_table['woe'].apply(lambda x: np.log(x))
df_table['perc_diff'] = df_table['perc_goods']-df_table['perc_bads']
df_table['IV'] = df_table['perc_diff'] * df_table['woe']
df_table['variable'] = inputs_.name
df_table.rename(columns={inputs_.name:'level'},inplace=True)
return(df_table)
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i+n]
def Is_Coarse_Class_New(IV_Category_List):
try:
assert isinstance(IV_Category_List,list),'IV calulation issue'
except AssertionError:
logging.error('ERROR: IV calculation encountered issue, please check input\output')
return None
Coarse_Class = {}
try:
coarse_df = [i for i in IV_Category_List if i.shape[0]>5]
except AttributeError:
logging.error('ERROR: IV calculation had an issue , provide valid inputs')
return None
for df_temp in coarse_df:
a = df_temp.sort_values('woe')
Var = df_temp.variable[0]
step_,Remainder = np.divmod(a.shape[0],5)
if(Remainder!=0):
Coarse_Class[Var] = list(chunks(a.level.tolist(), step_+1))
else:
Coarse_Class[Var] = list(chunks(a.level.tolist(), step_))
return(Coarse_Class)
#Identify the Columns which needs to be coarse classed and store the coarse Categories as lists and variable name as key
def Coarse_Class_New(Train_Category,Coarse_Class):
try:
assert isinstance (Train_Category,pd.core.frame.DataFrame), 'ERROR: Issues in inputs provided'
except AssertionError:
logging.error('ERROR: Input be of type dataframe')
try:
assert len(Coarse_Class.keys())>0,'no columns to coarse class'
Coarse_Class_Cols = list(Coarse_Class.keys())
df_Coarse_Class = Train_Category[Coarse_Class_Cols]
for i in Coarse_Class_Cols:
cnt=1
for j in Coarse_Class[i]:
df_Coarse_Class = df_Coarse_Class.apply(pd.Series.replace,to_replace=j,value=str(cnt))
cnt = cnt+1
return(df_Coarse_Class)
except AssertionError:
logging.INFO('INFO: Variables <5 levels cannot be coarse classed')
return None
# Applying Coarse Class to The Categories
def applycoarseclass(data,Y):
if len(data) == 0:
raise ValueError("Empty data frame provided")
IV_Category_List = []
Cols_to_Coarse = []
try:
for i in data.columns:
if(i!=Y):
get_IV = Evaluate_IV_new(data.loc[:,i],data[Y])
IV_Category_List.append(get_IV)
Cols_to_Coarse.append(i)
except (AttributeError,ValueError):
print('Expected datafram type, for more information check args expected')
print("Can't perform coarse Classing")
return(None,None,None)
Coarse_Cls = Is_Coarse_Class_New(IV_Category_List)
df_Coarse_Class = Coarse_Class_New(data,Coarse_Cls)
Coarse_Columns_IV = [Evaluate_IV_new(df_Coarse_Class.loc[:,i],data[Y]).IV.sum() for i in df_Coarse_Class.columns]
Coarse_Columns = pd.DataFrame({'Col':df_Coarse_Class.columns.tolist(),'IV':Coarse_Columns_IV})
Coarse_Columns = Coarse_Columns.loc[Coarse_Columns.IV>0.1,:]
# print()
df_Coarse_Class = df_Coarse_Class[Coarse_Columns.Col]
data.drop(list(Coarse_Cls.keys()),inplace=True,axis=1)
data =
|
pd.concat([data,df_Coarse_Class],axis=1)
|
pandas.concat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.