prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from pathlib import Path
from datetime import datetime, date
# Preprocess terms for TF-IDF
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from num2words import num2words
# end of preprocess
# LDA
from gensim import corpora, models
import pyLDAvis.gensim
# print in color
from termcolor import colored
# end LDA
import pandas as pd
import geopandas
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from nltk.corpus import wordnet
# SPARQL
import sparql
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
from matplotlib_venn_wordcloud import venn3_wordcloud
# multiprocessing
# BERT
from transformers import pipeline
# LOG
import logging
from logging.handlers import RotatingFileHandler
def biotexInputBuilder(tweetsofcity):
"""
Build and save a file formated for Biotex analysis
:param tweetsofcity: dictionary of { tweets, created_at }
:return: none
"""
biotexcorpus = []
for city in tweetsofcity:
# Get all tweets for a city :
listOfTweetsByCity = [tweets['tweet'] for tweets in tweetsofcity[city]]
# convert this list in a big string of tweets by city
document = '\n'.join(listOfTweetsByCity)
biotexcorpus.append(document)
biotexcorpus.append('\n')
biotexcorpus.append("##########END##########")
biotexcorpus.append('\n')
textToSave = "".join(biotexcorpus)
corpusfilename = "elastic-UK"
biotexcopruspath = Path('elasticsearch/analyse')
biotexCorpusPath = str(biotexcopruspath) + '/' + corpusfilename
print("\t saving file : " + str(biotexCorpusPath))
f = open(biotexCorpusPath, 'w')
f.write(textToSave)
f.close()
def preprocessTerms(document):
"""
Pre process Terms according to
https://towardsdatascience.com/tf-idf-for-document-ranking-from-scratch-in-python-on-real-world-dataset-796d339a4089
/!\ Be carefull : it has a long execution time
:param:
:return:
"""
def lowercase(t):
return np.char.lower(t)
def removesinglechar(t):
words = word_tokenize(str(t))
new_text = ""
for w in words:
if len(w) > 1:
new_text = new_text + " " + w
return new_text
def removestopwords(t):
stop_words = stopwords.words('english')
words = word_tokenize(str(t))
new_text = ""
for w in words:
if w not in stop_words:
new_text = new_text + " " + w
return new_text
def removeapostrophe(t):
return np.char.replace(t, "'", "")
def removepunctuation(t):
symbols = "!\"#$%&()*+-./:;<=>?@[\]^_`{|}~\n"
for i in range(len(symbols)):
data = np.char.replace(t, symbols[i], ' ')
data = np.char.replace(t, " ", " ")
data = np.char.replace(t, ',', '')
return data
def convertnumbers(t):
tokens = word_tokenize(str(t))
new_text = ""
for w in tokens:
try:
w = num2words(int(w))
except:
a = 0
new_text = new_text + " " + w
new_text = np.char.replace(new_text, "-", " ")
return new_text
doc = lowercase(document)
doc = removesinglechar(doc)
doc = removestopwords(doc)
doc = removeapostrophe(doc)
doc = removepunctuation(doc)
doc = removesinglechar(doc) # apostrophe create new single char
return doc
def biotexAdaptativeBuilderAdaptative(listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Build a input biotex file well formated at the level wanted by concatenate cities's tweets
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return:
"""
matrixAggDay = pd.read_csv("elasticsearch/analyse/matrixAggDay.csv")
# concat date with city
matrixAggDay['city'] = matrixAggDay[['city', 'day']].agg('_'.join, axis=1)
del matrixAggDay['day']
## change index
matrixAggDay.set_index('city', inplace=True)
matrixFiltred = spatiotemporelFilter(matrix=matrixAggDay, listOfcities=listOfcities,
spatialLevel='state', period=period)
## Pre-process :Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixFiltred["city"], matrixFiltred["state"], matrixFiltred["country"], matrixFiltred["date"] = \
zip(*matrixFiltred.index.map(splitindex))
# Agregate by level
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state':
matrixFiltred = matrixFiltred.groupby('state')['tweetsList'].apply('.\n'.join).reset_index()
elif spatialLevel == 'country':
matrixFiltred = matrixFiltred.groupby('country')['tweetsList'].apply('.\n'.join).reset_index()
# Format biotex input file
biotexcorpus = []
for index, row in matrixFiltred.iterrows():
document = row['tweetsList']
biotexcorpus.append(document)
biotexcorpus.append('\n')
biotexcorpus.append("##########END##########")
biotexcorpus.append('\n')
textToSave = "".join(biotexcorpus)
corpusfilename = "elastic-UK-adaptativebiotex"
biotexcopruspath = Path('elasticsearch/analyse')
biotexCorpusPath = str(biotexcopruspath) + '/' + corpusfilename
print("\t saving file : " + str(biotexCorpusPath))
f = open(biotexCorpusPath, 'w')
f.write(textToSave)
f.close()
def ldHHTFIDF(listOfcities):
""" /!\ for testing only !!!!
Only work if nb of states = nb of cities
i.e for UK working on 4 states with their capitals...
"""
print(colored("------------------------------------------------------------------------------------------", 'red'))
print(colored(" - UNDER DEV !!! - ", 'red'))
print(colored("------------------------------------------------------------------------------------------", 'red'))
tfidfwords = pd.read_csv("elasticsearch/analyse/TFIDFadaptativeBiggestScore.csv", index_col=0)
texts = pd.read_csv("elasticsearch/analyse/matrixAggDay.csv", index_col=1)
listOfStatesTopics = []
for i, citystate in enumerate(listOfcities):
city = str(listOfcities[i].split("_")[0])
state = str(listOfcities[i].split("_")[1])
# print(str(i) + ": " + str(state) + " - " + city)
# tfidfwords = [tfidfwords.iloc[0]]
dictionary = corpora.Dictionary([tfidfwords.loc[state]])
textfilter = texts.loc[texts.index.str.startswith(city + "_")]
corpus = [dictionary.doc2bow(text.split()) for text in textfilter.tweetsList]
# Find the better nb of topics :
## Coherence measure C_v : Normalised PointWise Mutual Information (NPMI : co-occurence probability)
## i.e degree of semantic similarity between high scoring words in the topic
## and cosine similarity
nbtopics = range(2, 35)
coherenceScore = pd.Series(index=nbtopics, dtype=float)
for n in nbtopics:
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=n)
# Compute coherence score
## Split each row values
textssplit = textfilter.tweetsList.apply(lambda x: x.split()).values
coherence = models.CoherenceModel(model=lda, texts=textssplit, dictionary=dictionary, coherence='c_v')
coherence_result = coherence.get_coherence()
coherenceScore[n] = coherence_result
# print("level: " + str(state) + " - NB: " + str(n) + " - coherence LDA: " + str(coherenceScore[n]))
# Relaunch LDA with the best nbtopic
nbTopicOptimal = coherenceScore.idxmax()
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=nbTopicOptimal)
# save and visualisation
## save
for topic, listwords in enumerate(lda.show_topics()):
stateTopic = {'state': state}
ldaOuput = str(listwords).split(" + ")[1:]
for i, word in enumerate(ldaOuput):
# reformat lda output for each word of topics
stateTopic[i] = ''.join(x for x in word if x.isalpha())
listOfStatesTopics.append(stateTopic)
## Visualisation
try:
vis = pyLDAvis.gensim.prepare(lda, corpus, dictionary)
pyLDAvis.save_html(vis, "elasticsearch/analyse/lda/lda-tfidf_" + str(state) + ".html")
except:
print("saving pyLDAvis failed. Nb of topics for " + state + ": " + nbTopicOptimal)
# Save file
listOfStatesTopicsCSV = pd.DataFrame(listOfStatesTopics)
listOfStatesTopicsCSV.to_csv("elasticsearch/analyse/lda/topicBySate.csv")
def wordnetCoverage(pdterms):
"""
add an additionnal column with boolean term is in wordnet
:param pdterms: pd.dataframes of terms. Must have a column with "terms" as a name
:return: pdterms with additionnal column with boolean term is in wordnet
"""
# Add a wordnet column boolean type : True if word is in wordnet, False otherwise
pdterms['wordnet'] = False
# Loop on terms and check if there are in wordnet
for index, row in pdterms.iterrows():
if len(wordnet.synsets(row['terms'])) != 0:
pdterms.at[index, 'wordnet'] = True
return pdterms
def sparqlquery(thesaurus, term):
"""
Sparql query. This methods have be factorize to be used in case of multiprocessign
:param thesaurus: which thesaurus to query ? agrovoc or mesh
:param term: term to align with thesaurus
:return: sparql result querry
"""
# Define MeSH sparql endpoint and query
endpointmesh = 'http://id.nlm.nih.gov/mesh/sparql'
qmesh = (
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>'
'PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>'
'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
'PREFIX owl: <http://www.w3.org/2002/07/owl#>'
'PREFIX meshv: <http://id.nlm.nih.gov/mesh/vocab#>'
'PREFIX mesh: <http://id.nlm.nih.gov/mesh/>'
'PREFIX mesh2020: <http://id.nlm.nih.gov/mesh/2020/>'
'PREFIX mesh2019: <http://id.nlm.nih.gov/mesh/2019/>'
'PREFIX mesh2018: <http://id.nlm.nih.gov/mesh/2018/>'
''
'ask '
'FROM <http://id.nlm.nih.gov/mesh> '
'WHERE { '
' ?meshTerms a meshv:Term .'
' ?meshTerms meshv:prefLabel ?label .'
' FILTER(lang(?label) = "en").'
' filter(REGEX(?label, "^' + str(term) + '$", "i"))'
''
'}'
)
# Define agrovoc sparql endpoint and query
endpointagrovoc = 'http://agrovoc.uniroma2.it/sparql'
qagrovoc = ('PREFIX skos: <http://www.w3.org/2004/02/skos/core#> '
'PREFIX skosxl: <http://www.w3.org/2008/05/skos-xl#> '
'ask WHERE {'
'?myterm skosxl:literalForm ?labelAgro.'
'FILTER(lang(?labelAgro) = "en").'
'filter(REGEX(?labelAgro, "^' + str(term) + '(s)*$", "i"))'
'}')
# query mesh
if thesaurus == "agrovoc":
q = qagrovoc
endpoint = endpointagrovoc
elif thesaurus == "mesh":
q = qmesh
endpoint = endpointmesh
else:
raise Exception('Wrong thesaurus given')
try:
result = sparql.query(endpoint, q, timeout=30)
# Sometimes Endpoint can bug on a request.
# SparqlException raised by sparql-client if timeout is reach
# other exception (That I have not identify yet) when endpoint send non well formated answer
except:
result = "endpoint error"
return result
def agrovocCoverage(pdterms):
"""
Add an additionnal column with boolean if term is in agrovoc
:param pdterms: same as wordnetCoverage
:return: same as wornetCoverage
"""
# Log number of error raised by sparql endpoint
endpointerror = 0
# Add a agrovoc column boolean type : True if terms is in Agrovoc
pdterms['agrovoc'] = False
# Loop on term
for index, row in tqdm(pdterms.iterrows(), total=pdterms.shape[0], desc="agrovoc"):
# Build SPARQL query
term = row['terms']
result = sparqlquery('agrovoc', term)
if result == "endpoint error":
endpointerror += 1
pdterms.at[index, 'agrovoc'] = "Error"
elif result.hasresult():
pdterms.at[index, 'agrovoc'] = True
print("Agrovoc number of error: " + str(endpointerror))
return pdterms
def meshCoverage(pdterms):
"""
Add an additionnal column with boolean if term is in MeSH
:param pdterms: same as wordnetCoverage
:return: same as wornetCoverage
"""
# Log number of error raised by sparql endpoint
endpointerror = 0
# Add a MeSH column boolean type : True if terms is in Mesh
pdterms['mesh'] = False
# Loop on term with multiprocessing
for index, row in tqdm(pdterms.iterrows(), total=pdterms.shape[0], desc="mesh"):
# Build SPARQL query
term = row['terms']
result = sparqlquery('mesh', term)
if result == "endpoint error":
endpointerror += 1
pdterms.at[index, 'mesh'] = "Error"
elif result.hasresult():
pdterms.at[index, 'mesh'] = True
print("Mesh number of error: " + str(endpointerror))
return pdterms
def compareWithHTFIDF(number_of_term, dfToCompare, repToSave):
"""
Only used for ECIR2020 not for NLDB2021
:param number_of_term:
:param dfToCompare:
:param repToSave:
:return:
"""
# Stack / concatenate all terms from all states in one column
HTFIDFUniquedf = concatenateHTFIDFBiggestscore()[:number_of_term]
# select N first terms
dfToCompare = dfToCompare[:number_of_term]
common = pd.merge(dfToCompare, HTFIDFUniquedf, left_on='terms', right_on='terms', how='inner')
# del common['score']
common = common.terms.drop_duplicates()
common = common.reset_index()
del common['index']
common.to_csv("elasticsearch/analyse/" + repToSave + "/common.csv")
# Get what terms are specific to Adapt-TF-IDF
print(dfToCompare)
HTFIDFUniquedf['terms'][~HTFIDFUniquedf['terms'].isin(dfToCompare['terms'])].dropna()
condition = HTFIDFUniquedf['terms'].isin(dfToCompare['terms'])
specificHTFIDF = HTFIDFUniquedf.drop(HTFIDFUniquedf[condition].index)
specificHTFIDF = specificHTFIDF.reset_index()
del specificHTFIDF['index']
specificHTFIDF.to_csv("elasticsearch/analyse/" + repToSave + "/specific-H-TFIDF.csv")
# Get what terms are specific to dfToCompare
dfToCompare['terms'][~dfToCompare['terms'].isin(HTFIDFUniquedf['terms'])].dropna()
condition = dfToCompare['terms'].isin(HTFIDFUniquedf['terms'])
specificdfToCompare = dfToCompare.drop(dfToCompare[condition].index)
specificdfToCompare = specificdfToCompare.reset_index()
del specificdfToCompare['index']
specificdfToCompare.to_csv("elasticsearch/analyse/" + repToSave + "/specific-reference.csv")
# Print stats
percentIncommon = len(common) / len(HTFIDFUniquedf) * 100
percentOfSpecificHTFIDF = len(specificHTFIDF) / len(HTFIDFUniquedf) * 100
print("Percent in common " + str(percentIncommon))
print("Percent of specific at H-TFIDF : " + str(percentOfSpecificHTFIDF))
def HTFIDF_comparewith_TFIDF_TF():
"""
Only used for ECIR2020 not for NLDB2021
.. warnings:: /!\ under dev !!!. See TODO below
.. todo::
- Remove filter and pass it as args :
- period
- list of Cities
- Pass files path in args
- Pass number of term to extract for TF-IDF and TF
Gives commons and specifics terms between H-TFIDF and TF & TF-IDF classics
Creates 6 csv files : 3 for each classical measures :
- Common.csv : list of common terms
- specific-htfidf : terms only in H-TF-IDF
- specific-reference : terms only in one classical measurs
"""
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = | pd.date_range(tfidfStartDate, tfidfEndDate) | pandas.date_range |
# %matplotlib widget
# import IPython
from datetime import datetime, timedelta
import os
import matplotlib.pyplot as plt
import matplotlib.colors as clrs
from matplotlib.lines import Line2D
import cartopy.crs as ccrs # projection: Plate Carree - WGS84
import pandas as pd
import numpy as np
import pickle
from tqdm import tqdm
from joblib import Parallel, delayed
import multiprocessing
# ------------------------------------------------------------------------------
# import data
# ------------------------------------------------------------------------------
df = pd.read_csv('Data/ucdp_ged.csv', low_memory=False)
df['date'] = | pd.to_datetime(df['date_start']) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 01 10:00:58 2021
@author: <NAME>
"""
#------------------------------------------------------------------#
# # # # # Imports # # # # #
#------------------------------------------------------------------#
from math import e
import numpy as np
import pandas as pd
import os
import time
import glob
import itertools
from joblib import Parallel, delayed
from generate_files import GenerateFiles
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
import seaborn as sns
import matplotlib.style as style
style.use('seaborn-poster') #sets the size of the charts
style.use('ggplot')
from scipy import ndimage
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_filename
from astropy.coordinates import SkyCoord, match_coordinates_sky
import astropy.units as u
from astropy.stats import mad_std
import astrotools.healpytools as hpt
import astropy_healpix as ahp
from astropy.coordinates import ICRS
from tqdm import tqdm
from collections import Counter
import warnings
warnings.filterwarnings('ignore')
import healpy as hp
from hpproj import CutSky, to_coord
import logging
cs_logger = logging.getLogger('cutsky')
cs_logger.setLevel(logging.WARNING)
cs_logger.propagate = False
hpproj_logger = logging.getLogger('hpproj')
hpproj_logger.setLevel(logging.WARNING)
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
#------------------------------------------------------------------#
# # # # # Functions # # # # #
#------------------------------------------------------------------#
class MakeData(object):
"""Class to create and preprocess input/output files from full sky-maps.
"""
def __init__(self, dataset, npix, loops, planck_path, milca_path, disk_radius=None, output_path=None):
"""
Args:
dataset (str): file name for the cluster catalog that will used.
Options are 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50'.
bands (list): list of full sky-maps that will be used for the input file.
loops (int): number of times the dataset containing patches with at least one cluster within will be added
again to training set with random variations (translations/rotations).
Options are 100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', and 'y-map'.
More full sky-maps will be added later on (e.g. CO2, X-ray, density maps).
planck_path (str): path to directory containing planck HFI 6 frequency maps.
Files should be named as following
'HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits',
'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits',
'HFI_SkyMap_545-field-Int_2048_R3.00_full.fits', 'HFI_SkyMap_857-field-Int_2048_R3.00_full.fits'.
milca_path (str): path to directory containing MILCA full sky map. File should be named 'milca_ymaps.fits'.
disk_radius (float, optional): Disk radius that will be used to create segmentation masks for output files.
Defaults to None.
output_path (str, optional): Path to output directory. Output directory needs be created beforehand using
'python xcluster.py -m True' selecting same output directory in 'params.py'.
If None, xcluster path will be used. Defaults to None.
"""
self.path = os.getcwd() + '/'
self.dataset = dataset # 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50'
self.bands = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz','y-map','CO','p-noise']
self.loops = loops
self.n_labels = 2
maps = []
self.freq = 1022
self.planck_freq = 126
if '100GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 100', 'docontour': True}))
# self.freq += 2
# self.planck_freq += 2
if '143GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 143', 'docontour': True}))
# self.freq += 4
# self.planck_freq += 4
if '217GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_217-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 217', 'docontour': True}))
# self.freq += 8
# self.planck_freq += 8
if '353GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 353', 'docontour': True}))
# self.freq += 16
# self.planck_freq += 16
if '545GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_545-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 545', 'docontour': True}))
# self.freq += 32
# self.planck_freq += 32
if '857GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_857-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 857', 'docontour': True}))
# self.freq += 64
# self.planck_freq += 64
if 'y-map' in self.bands:
maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True}))
# self.freq += 128
if 'CO' in self.bands:
maps.append((planck_path + "COM_CompMap_CO21-commander_2048_R2.00.fits", {'legend': 'CO', 'docontour': True}))
# self.freq += 256
if 'p-noise' in self.bands:
maps.append((planck_path + 'COM_CompMap_Compton-SZMap-milca-stddev_2048_R2.00.fits', {'legend': 'noise', 'docontour': True}))
# self.freq += 512
maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True})) #used for plots only
self.maps = maps
self.temp_path = self.path + 'to_clean/'
self.disk_radius = disk_radius
self.npix = npix #in pixels
self.pixsize = 1.7 #in arcmin
self.ndeg = (self.npix * self.pixsize)/60 #in deg
self.nside = 2
if output_path is None:
self.output_path = self.path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/")
else:
self.output_path = output_path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/")
self.dataset_path = self.path + 'datasets/' + self.dataset + '/'
self.planck_path = planck_path
self.milca_path = milca_path
self.test_regions = [[0, 360, 90, 70],
[0, 120, 70, 40], [120, 240, 70, 40], [240, 360, 70, 40],
[0, 120, 40, 18], [120, 240, 40, 18], [240, 360, 40, 18],
[0, 120, -18, -40], [120, 240, -18, -40], [240, 360, -18, -40],
[0, 120, -40, -70], [120, 240, -40, -70], [240, 360, -40, -70],
[0, 360, -70, -90]]
self.val_regions = [[0, 180, -20, -40],
[0, 180, -20, -40], [0, 180, -20, -40], [0, 180, -20, -40],
[0, 360, -40, -60], [0, 360, -40, -60], [0, 360, -40, -60],
[0, 360, 60, 40], [0, 360, 60, 40], [0, 360, 60, 40],
[0, 180, 40, 20], [0, 180, 40, 20], [0, 180, 40, 20],
[0, 180, 40, 20]]
def plot_psz2_clusters(self, healpix_path):
"""Saves plots containing patches for planck frequency maps and y-map.
Function is deprecated and will be removed in later versions.
Args:
healpix_path (str): output path for plots (deprecated).
"""
maps = self.maps
PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits')
glon = PSZ2[1].data['GLON']
glat = PSZ2[1].data['GLAT']
freq = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', 'y-map']
for j in range(len(glon)):
fig = plt.figure(figsize=(21,14), tight_layout=False)
fig.suptitle(r'$glon=$ {:.2f} $^\circ$, $glat=$ {:.2f} $^\circ$'.format(glon[j], glat[j]), y=0.92, fontsize=20)
cutsky = CutSky(maps, npix=self.npix, pixsize=self.pixsize, low_mem=False)
coord = to_coord([glon[j], glat[j]])
result = cutsky.cut_fits(coord)
for i,nu in enumerate(freq):
ax = fig.add_subplot(3,4,1+i)
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
HDU = result[i]['fits']
im = ax.imshow(HDU.data, origin="lower")
w = WCS(HDU.header)
sky = w.world_to_pixel_values(glon[j], glat[j])
segmentation = plt.Circle((sky[0], sky[1]), 2.5/1.7, color='white', alpha=0.1)
ax.add_patch(segmentation)
ax.axvline(sky[0], ymin=0, ymax=(self.npix//2-10)/self.npix, color='white', linestyle='--')
ax.axvline(sky[0], ymin=(self.npix//2+10)/self.npix, ymax=1, color='white', linestyle='--')
ax.axhline(sky[1], xmin=0, xmax=(self.npix//2-10)/self.npix, color='white', linestyle='--')
ax.axhline(sky[1], xmin=(self.npix//2+10)/self.npix, xmax=1, color='white', linestyle='--')
# ax.scatter(sky[0], sky[1], color='red')
ax.set_title(r'%s'%nu)
fig.colorbar(im, cax=cax, orientation='vertical')
plt.savefig(healpix_path + 'PSZ2/PSZ2_skycut_%s.png'%j, bbox_inches='tight', transparent=False)
plt.show()
plt.close()
def create_catalogs(self, plot=False):
"""Creates the following catalogs using 'PSZ2v1.fits', 'MCXC-Xray-clusters.fits', and 'redmapper_dr8_public_v6.3_catalog.fits'
(see <NAME> 2018 for more details):
planck_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with known redshift:
'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC', 'Z'
planck_no_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with unknown redshift:
'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC'
MCXC_no_planck (pd.DataFrame): dataframe with the following columns for MCXC clusters:
'RA', 'DEC', 'R500', 'M500', 'Z'
RM50_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>50:
'RA', 'DEC', 'LAMBDA', 'Z'
RM30_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>30:
'RA', 'DEC', 'LAMBDA', 'Z'
Catalogs are saved in output_path + /catalogs/. Input catalogs are in planck_path.
Args:
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits')
df_psz2 = pd.DataFrame(data={'RA': PSZ2[1].data['RA'].tolist(), 'DEC': PSZ2[1].data['DEC'].tolist(), 'GLON': PSZ2[1].data['GLON'].tolist(), 'GLAT':PSZ2[1].data['GLAT'].tolist(),
'M500': PSZ2[1].data['MSZ'].tolist(), 'R500': PSZ2[1].data['Y5R500'].tolist(), 'REDMAPPER': PSZ2[1].data['REDMAPPER'].tolist(), 'MCXC': PSZ2[1].data['MCXC'].tolist(),
'Z': PSZ2[1].data['REDSHIFT'].tolist()})
df_psz2 = df_psz2.replace([-1, -10, -99], np.nan)
planck_no_z = df_psz2.query('Z.isnull()', engine='python')
planck_z = df_psz2.query('Z.notnull()', engine='python')
# planck_no_z = planck_no_z[['RA', 'DEC']].copy()
# planck_z = planck_z[['RA', 'DEC']].copy()
planck_no_z.to_csv(self.path + 'catalogs/planck_no-z' + '.csv', index=False)
planck_z.to_csv(self.path + 'catalogs/planck_z' + '.csv', index=False)
MCXC = fits.open(self.planck_path + 'MCXC-Xray-clusters.fits')
MCXC_skycoord = SkyCoord(ra=MCXC[1].data['RA'].tolist(), dec=MCXC[1].data['DEC'].tolist(), unit=u.degree)
MCXC_GLON = list(MCXC_skycoord.galactic.l.degree)
MCXC_GLAT = list(MCXC_skycoord.galactic.b.degree)
df_MCXC = pd.DataFrame(data={'RA': MCXC[1].data['RA'].tolist(), 'DEC': MCXC[1].data['DEC'].tolist(), 'R500': MCXC[1].data['RADIUS_500'].tolist(), 'M500': MCXC[1].data['MASS_500'].tolist(),
'GLON': MCXC_GLON, 'GLAT': MCXC_GLAT, 'Z': MCXC[1].data['REDSHIFT'].tolist()})
REDMAPPER = fits.open(self.planck_path + 'redmapper_dr8_public_v6.3_catalog.fits')
REDMAPPER_skycoord = SkyCoord(ra=REDMAPPER[1].data['RA'].tolist(), dec=REDMAPPER[1].data['DEC'].tolist(), unit=u.degree)
REDMAPPER_GLON = list(REDMAPPER_skycoord.galactic.l.degree)
REDMAPPER_GLAT = list(REDMAPPER_skycoord.galactic.b.degree)
df_REDMAPPER = pd.DataFrame(data={'RA': REDMAPPER[1].data['RA'].tolist(), 'DEC': REDMAPPER[1].data['DEC'].tolist(), 'LAMBDA': REDMAPPER[1].data['LAMBDA'].tolist(),
'GLON': REDMAPPER_GLON, 'GLAT': REDMAPPER_GLAT, 'Z': REDMAPPER[1].data['Z_SPEC'].tolist()})
df_REDMAPPER_30 = df_REDMAPPER.query("LAMBDA > 30")
df_REDMAPPER_50 = df_REDMAPPER.query("LAMBDA > 50")
ACT = fits.open(self.planck_path + 'sptecs_catalog_oct919_forSZDB.fits')
SPT = fits.open(self.planck_path + 'DR5_cluster-catalog_v1.1_forSZDB.fits')
df_act = pd.DataFrame(data={'RA': list(ACT[1].data['RA']), 'DEC': list(ACT[1].data['DEC']), 'GLON': list(ACT[1].data['GLON']), 'GLAT': list(ACT[1].data['GLAT'])})
df_spt = pd.DataFrame(data={'RA': list(SPT[1].data['RA']), 'DEC': list(SPT[1].data['DEC']), 'GLON': list(SPT[1].data['GLON']), 'GLAT': list(SPT[1].data['GLAT'])})
self.remove_duplicates_on_radec(df_MCXC, df_psz2, output_name='MCXC_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_REDMAPPER_30, df_psz2, output_name='RM30_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_REDMAPPER_50, df_psz2, output_name='RM50_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_act, df_psz2, output_name='ACT_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_spt, df_psz2, output_name='SPT_no_planck', plot=plot)
PSZ2.close()
MCXC.close()
MCXC.close()
REDMAPPER.close()
ACT.close()
SPT.close()
def create_fake_source_catalog(self):
PGCC = fits.open(self.planck_path + 'HFI_PCCS_GCC_R2.02.fits')
df_pgcc = pd.DataFrame(data={'RA': list(PGCC[1].data['RA']), 'DEC': list(PGCC[1].data['DEC']), 'GLON': list(PGCC[1].data['GLON']), 'GLAT': list(PGCC[1].data['GLAT'])})
PGCC.close()
df_pgcc.to_csv(self.path + 'catalogs/' + 'PGCC' + '.csv', index=False)
df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT'])
bands = ['100GHz', '143GHz', '217GHz', '353GHz', '545GHz', '857GHz']
cs_100 = fits.open(self.planck_path + 'COM_PCCS_100_R2.01.fits')
cs_143 = fits.open(self.planck_path + 'COM_PCCS_143_R2.01.fits')
cs_217 = fits.open(self.planck_path + 'COM_PCCS_217_R2.01.fits')
cs_353 = fits.open(self.planck_path + 'COM_PCCS_353_R2.01.fits')
cs_545 = fits.open(self.planck_path + 'COM_PCCS_545_R2.01.fits')
cs_857 = fits.open(self.planck_path + 'COM_PCCS_857_R2.01.fits')
df_cs_100 = pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})
df_cs_100.to_csv(self.path + 'catalogs/' + 'cs_100' + '.csv', index=False)
df_cs_143 = pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})
df_cs_143.to_csv(self.path + 'catalogs/' + 'cs_143' + '.csv', index=False)
df_cs_217 = pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})
df_cs_217.to_csv(self.path + 'catalogs/' + 'cs_217' + '.csv', index=False)
df_cs_353 = pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})
df_cs_353.to_csv(self.path + 'catalogs/' + 'cs_353' + '.csv', index=False)
df_cs_545 = pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})
df_cs_545.to_csv(self.path + 'catalogs/' + 'cs_545' + '.csv', index=False)
df_cs_857 = pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})
df_cs_857.to_csv(self.path + 'catalogs/' + 'cs_857' + '.csv', index=False)
freq = 0
if '100GHz' in bands:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in bands:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in bands:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in bands:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in bands:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in bands:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT'])
for L in range(1, len(bands)):
for subset in tqdm(itertools.combinations(bands, L)):
freq = 0
if '100GHz' in subset:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in subset:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in subset:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in subset:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in subset:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in subset:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
cs_100.close()
cs_143.close()
cs_217.close()
cs_353.close()
cs_545.close()
cs_857.close()
def remove_duplicates_on_radec(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=5, plot=False):
""""Takes two different dataframes with columns 'RA' & 'DEC' and performs a spatial
coordinate match with a tol=5 arcmin tolerance. Saves a .csv file containing df_main
without objects in common from df_with_dup.
Args:
df_main (pd.DataFrame): main dataframe.
df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None.
output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None.
with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False.
tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 5.
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
if with_itself == True:
scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_main.query("ismatched == False", inplace=True)
df_main.drop(columns=['ismatched', 'ID'], inplace=True)
df_main = df_main.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_main.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
elif with_itself == False:
assert df_with_dup is not None
ID = np.arange(0, len(df_with_dup))
df_with_dup = df_with_dup[['RA', 'DEC']].copy()
df_with_dup.insert(loc=0, value=ID, column='ID')
scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg')
pcatalog_sub = SkyCoord(ra=df_with_dup['RA'].values, dec=df_with_dup['DEC'].values, unit='deg')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, pcatalog_sub, nthneighbor=1)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_with_dup.drop(columns=['RA', 'DEC'], inplace=True)
df_wo_dup = pd.merge(df_main, df_with_dup, indicator=True, on='ID', how='outer').query('_merge=="both"').drop('_merge', axis=1)
df_wo_dup.query("ismatched == False", inplace=True)
df_wo_dup.drop(columns=['ismatched', 'ID'], inplace=True)
df_wo_dup = df_wo_dup.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_wo_dup.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
df_main = df_wo_dup.copy()
if plot == True and output_name is not None:
fig = plt.figure(figsize=(8,8), tight_layout=False)
ax = fig.add_subplot(111)
ax.set_facecolor('white')
ax.grid(True, color='grey', lw=0.5)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.set_xlabel(r'$\mathrm{angular\;distance\;\left(arcmin\right)}$', fontsize=20)
ax.set_ylabel(output_name, fontsize=20)
ax.hist(np.array(df_d2d['d2d'].values)*60, bins = 400)
ax.axvline(tol, color='k', linestyle='--')
ax.set_xlim(0, 2*tol)
plt.savefig(self.output_path + 'figures/' + 'd2d_' + output_name + '.png', bbox_inches='tight', transparent=False)
plt.show()
plt.close()
return df_main
def remove_duplicates_on_lonlat(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=2, plot=False):
""""Takes two different dataframes with columns 'GLON' & 'GLAT' and performs a spatial
coordinate match with a tol=2 arcmin tolerance. Saves a .csv file containing df_main
without objects in common from df_with_dup.
Args:
df_main (pd.DataFrame): main dataframe.
output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None.
df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None.
with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False.
tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 2.
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
if with_itself == True:
scatalog_sub = SkyCoord(df_main['GLON'].values, df_main['GLAT'].values, unit='deg', frame='galactic')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_main.query("ismatched == False", inplace=True)
df_main.drop(columns=['ismatched', 'ID'], inplace=True)
df_main = df_main.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_main.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
elif with_itself == False:
assert df_with_dup is not None
ID = np.arange(0, len(df_with_dup))
df_with_dup = df_with_dup[['GLON', 'GLAT']].copy()
df_with_dup.insert(loc=0, value=ID, column='ID')
scatalog_sub = SkyCoord(df_main['GLON'].values, df_main['GLAT'].values, unit='deg', frame='galactic')
pcatalog_sub = SkyCoord(df_with_dup['GLON'].values, df_with_dup['GLAT'].values, unit='deg', frame='galactic')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, pcatalog_sub, nthneighbor=1)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = | pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d}) | pandas.DataFrame |
# Importing the basic libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
# Reading our dataset
data = pd.read_csv('kc_house_data.csv')
# cleaning some data
data = data.drop(['date', 'zipcode'], axis = 1)
# Plotting the correlation matrix
corr = data.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(12, 9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5})
# Building our models
features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'grade', 'sqft_above', 'sqft_basement', 'condition', 'yr_built', 'yr_renovated', 'lat', 'long', 'sqft_living15', 'sqft_lot15']
X = data[features]
y = data.price
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
# Results
df = | pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) | pandas.DataFrame |
import pytest
from pandas import DataFrame
import pandas._testing as tm
@pytest.fixture(params=[True, False])
def by_blocks_fixture(request):
return request.param
@pytest.fixture(params=["DataFrame", "Series"])
def obj_fixture(request):
return request.param
def _assert_frame_equal_both(a, b, **kwargs):
"""
Check that two DataFrame equal.
This check is performed commutatively.
Parameters
----------
a : DataFrame
The first DataFrame to compare.
b : DataFrame
The second DataFrame to compare.
kwargs : dict
The arguments passed to `tm.assert_frame_equal`.
"""
tm.assert_frame_equal(a, b, **kwargs)
tm.assert_frame_equal(b, a, **kwargs)
def _assert_not_frame_equal(a, b, **kwargs):
"""
Check that two DataFrame are not equal.
Parameters
----------
a : DataFrame
The first DataFrame to compare.
b : DataFrame
The second DataFrame to compare.
kwargs : dict
The arguments passed to `tm.assert_frame_equal`.
"""
try:
tm.assert_frame_equal(a, b, **kwargs)
msg = "The two DataFrames were equal when they shouldn't have been"
pytest.fail(msg=msg)
except AssertionError:
pass
def _assert_not_frame_equal_both(a, b, **kwargs):
"""
Check that two DataFrame are not equal.
This check is performed commutatively.
Parameters
----------
a : DataFrame
The first DataFrame to compare.
b : DataFrame
The second DataFrame to compare.
kwargs : dict
The arguments passed to `tm.assert_frame_equal`.
"""
_assert_not_frame_equal(a, b, **kwargs)
_assert_not_frame_equal(b, a, **kwargs)
@pytest.mark.parametrize("check_like", [True, False])
def test_frame_equal_row_order_mismatch(check_like, obj_fixture):
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])
df2 = DataFrame({"A": [3, 2, 1], "B": [6, 5, 4]}, index=["c", "b", "a"])
if not check_like: # Do not ignore row-column orderings.
msg = "{obj}.index are different".format(obj=obj_fixture)
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)
else:
_assert_frame_equal_both(df1, df2, check_like=check_like, obj=obj_fixture)
@pytest.mark.parametrize(
"df1,df2",
[
(DataFrame({"A": [1, 2, 3]}), DataFrame({"A": [1, 2, 3, 4]})),
(DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), DataFrame({"A": [1, 2, 3]})),
],
)
def test_frame_equal_shape_mismatch(df1, df2, obj_fixture):
msg = "{obj} are different".format(obj=obj_fixture)
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, obj=obj_fixture)
@pytest.mark.parametrize(
"df1,df2,msg",
[
# Index
(
DataFrame.from_records({"a": [1, 2], "c": ["l1", "l2"]}, index=["a"]),
DataFrame.from_records({"a": [1.0, 2.0], "c": ["l1", "l2"]}, index=["a"]),
"DataFrame\\.index are different",
),
# MultiIndex
(
DataFrame.from_records(
{"a": [1, 2], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]
),
DataFrame.from_records(
{"a": [1.0, 2.0], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]
),
"MultiIndex level \\[0\\] are different",
),
],
)
def test_frame_equal_index_dtype_mismatch(df1, df2, msg, check_index_type):
kwargs = dict(check_index_type=check_index_type)
if check_index_type:
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, **kwargs)
else:
tm.assert_frame_equal(df1, df2, **kwargs)
def test_empty_dtypes(check_dtype):
columns = ["col1", "col2"]
df1 = DataFrame(columns=columns)
df2 = DataFrame(columns=columns)
kwargs = dict(check_dtype=check_dtype)
df1["col1"] = df1["col1"].astype("int64")
if check_dtype:
msg = r"Attributes of DataFrame\..* are different"
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, **kwargs)
else:
tm.assert_frame_equal(df1, df2, **kwargs)
def test_frame_equal_index_mismatch(obj_fixture):
msg = """{obj}\\.index are different
{obj}\\.index values are different \\(33\\.33333 %\\)
\\[left\\]: Index\\(\\['a', 'b', 'c'\\], dtype='object'\\)
\\[right\\]: Index\\(\\['a', 'b', 'd'\\], dtype='object'\\)""".format(
obj=obj_fixture
)
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])
df2 = | DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "d"]) | pandas.DataFrame |
import json
import os
import csv
import pandas as pd
import re
import pickle
import collections
import subprocess
import spacy
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
# global variable settings
supported_languages = ['DE', 'EN', 'ES', 'FR']
lexicon_languages = {lang: False for lang in supported_languages}
supported_word_types = ['PROPN','NOUN']
# ensure the models are downloaded in the first place by 'python -m spacy download <language_model>'
language_models = {'EN': 'en_core_web_sm', 'DE': 'de_core_news_sm', 'FR': 'fr_core_news_sm', 'ES': 'es_core_news_sm',
'IT': 'it_core_news_sm', 'LT': 'lt_core_news_sm', 'NB': 'nb_core_news_sm', 'nl': 'nl_core_news_sm',
'PT': 'pt_core_news_sm'}
try:
api
except NameError:
class api:
queue = list()
class Message:
def __init__(self, body=None, attributes=""):
self.body = body
self.attributes = attributes
def send(port, msg):
if port == outports[1]['name']:
api.queue.append(msg)
else:
# print('{}: {}'.format(port, msg))
pass
def set_config(config):
api.config = config
class config:
## Meta data
config_params = dict()
tags = {'sdi_utils': '', 'spacy': ''}
version = "0.0.18"
operator_name = "text_words"
operator_description = "Words from Text"
operator_description_long = "Extracts words from text for further analysis."
add_readme = dict()
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
language = 'None'
config_params['language'] = {'title': 'Language', 'description': 'Filter for language of media.',
'type': 'string'}
types = 'PROPN, NOUN'
config_params['types'] = {'title': 'Types',
'description': 'Define the kind of data extraction.', 'type': 'string'}
entity_types = 'ORG, PER, LOC'
config_params['entity_types'] = {'title': 'Entity types',
'description': 'List of entity types', 'type': 'string'}
max_word_len = 80
config_params['max_word_len'] = {'title': 'Maximum word lenght', 'description': 'Maximum word length.',
'type': 'integer'}
min_word_len = 3
config_params['min_word_len'] = {'title': 'Minimum word length', 'description': 'Minimum word length.',
'type': 'integer'}
# global variables
id_set = set()
def process(msg):
global id_set
operator_name = 'text_words'
logger, log_stream = slog.set_logging(operator_name, api.config.debug_mode)
logger.info("Main Process started. Logging level: {}".format(logger.level))
time_monitor = tp.progress()
df = msg.body
att_dict = msg.attributes
# Remove ID that has been processed
df = df.loc[~df['ID'].isin(id_set)]
id_set.update(df['ID'].unique().tolist())
# Languages
language_filter = tfp.read_value(api.config.language)
logger.info('Language filter: {}'.format(language_filter))
if not language_filter :
language_filter = df['LANGUAGE'].unique().tolist()
language_filter = [ lang for lang in language_filter if lang in language_models.keys()]
nlp = dict()
for lc in language_filter :
nlp[lc] = spacy.load(language_models[lc])
df = df.loc[df['LANGUAGE'].isin(language_filter)]
# Warning for languages not supported
languages_not_supported = [ lang for lang in language_filter if not lang in language_models.keys()]
if languages_not_supported :
logger.warning(('The text of following langauges not analysed due to unsupported language: {}'.format(languages_not_supported)))
# word types
types = tfp.read_list(api.config.types)
logger.info('Word types to be extracted: {}'.format(types))
entity_types = tfp.read_list(api.config.entity_types)
logger.info('Entity types to be extracted: {}'.format(entity_types))
# Create doc for all
word_bag_list = list()
def get_words(id, language, text) :
if not isinstance(text,str) :
logger.warning(('Record with error - ID: {} - {}'.format(id,text) ))
return -1
doc = nlp[language](text)
words = list()
for t in types:
words.extend([[id,language,t,token.lemma_[:api.config.max_word_len]] for token in doc if token.pos_ == t])
for et in entity_types:
words.extend([[id,language,et,ent.text[:api.config.max_word_len]] for ent in doc.ents if ent.label_ == et])
word_bag_list.append(pd.DataFrame(words,columns = ['ID','LANGUAGE','TYPE','WORD']))
df.apply(lambda x : get_words(x['ID'],x['LANGUAGE'],x['TEXT']),axis=1)
word_bag = pd.concat(word_bag_list)
word_bag = word_bag.loc[word_bag['WORD'].str.len() >= api.config.min_word_len ]
word_bag['COUNT'] = 1
word_bag = word_bag.groupby(['ID','LANGUAGE','TYPE','WORD'])['COUNT'].sum().reset_index()
# test for duplicates
dup_s = word_bag.duplicated(subset=['ID','LANGUAGE','TYPE','WORD']).value_counts()
num_duplicates = dup_s[True] if True in dup_s else 0
logger.info('Duplicates: {} / {}'.format(num_duplicates, word_bag.shape[0]))
att_dict['message.lastBatch'] = True
table_msg = api.Message(attributes=att_dict, body=word_bag)
logger.info('Labels in document: {}'.format(word_bag['TYPE'].unique().tolist()))
api.send(outports[0]['name'], log_stream.getvalue())
api.send(outports[1]['name'], table_msg)
inports = [{'name': 'docs', 'type': 'message.DataFrame', "description": "Message with body as dictionary."}]
outports = [{'name': 'log', 'type': 'string', "description": "Logging data"}, \
{'name': 'data', 'type': 'message.DataFrame', "description": "Table with word index"}]
#api.set_port_callback(inports[0]['name'], process)
def test_operator():
config = api.config
config.debug_mode = True
config.types = 'PROPN'
config.entity_types = 'PER, ORG, LOC'
config.language = 'None'
config.max_word_len = 80
config.min_word_len = 3
config.batch_size = 10
api.set_config(config)
doc_file = '/Users/Shared/data/onlinemedia/data/doc_data_cleansed.csv'
df = pd.read_csv(doc_file,sep=',',nrows=1000000000)
msg = api.Message(attributes={'file': {'path': doc_file},'format':'pandas'}, body=df)
process(msg)
out_file = '/Users/Shared/data/onlinemedia/data/word_extraction.csv'
df_list = [d.body for d in api.queue]
| pd.concat(df_list) | pandas.concat |
import pandas as pd
from scipy import sparse
from itertools import repeat
import pytest
import anndata as ad
from anndata.utils import make_index_unique
from anndata.tests.helpers import gen_typed_df
def test_make_index_unique():
index = pd.Index(["val", "val", "val-1", "val-1"])
with pytest.warns(UserWarning):
result = make_index_unique(index)
expected = pd.Index(["val", "val-2", "val-1", "val-1-1"])
assert list(expected) == list(result)
assert result.is_unique
def test_adata_unique_indices():
m, n = (10, 20)
obs_index = pd.Index(repeat("a", m), name="obs")
var_index = pd.Index(repeat("b", n), name="var")
adata = ad.AnnData(
X=sparse.random(m, n, format="csr"),
obs=gen_typed_df(m, index=obs_index),
var=gen_typed_df(n, index=var_index),
obsm={"df": gen_typed_df(m, index=obs_index)},
varm={"df": gen_typed_df(n, index=var_index)},
)
pd.testing.assert_index_equal(adata.obsm["df"].index, adata.obs_names)
pd.testing.assert_index_equal(adata.varm["df"].index, adata.var_names)
adata.var_names_make_unique()
adata.obs_names_make_unique()
assert adata.obs_names.name == "obs"
assert adata.var_names.name == "var"
assert len(pd.unique(adata.obs_names)) == m
assert len( | pd.unique(adata.var_names) | pandas.unique |
import pandas as pd
import numpy as np
from collections import namedtuple
def myiter(d, cols=None):
if cols is None:
v = d.values.tolist()
cols = d.columns.values.tolist()
else:
j = [d.columns.get_loc(c) for c in cols]
v = d.values[:, j].tolist()
n = namedtuple('MyTuple', cols)
for line in iter(v):
yield n(*line)
list = ['Cardio', 'Smoking','Alcohol','Diabetes','Alzheimers','Cancer','Obesity','Arthritis','Asthma','Stroke']
data = {'Cardio':'317', 'Smoking':'300','Alcohol':'249','Diabetes':'245','Alzheimers':'223','Cancer':'171','Obesity':'147','Arthritis':'128','Asthma':'56','Stroke':'33'}
| pd.DataFrame.from_dict(data, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import os
import re
import pandas as pd
import numpy as np
import sys
import glob
import traceback
import json
import folium
import datetime
from ast import literal_eval
sys.path.append('/lib')
import lib.helper as helper
# %load_ext folium_magic
CURRENT_DIR = os.getcwd()
DATA_DIR = CURRENT_DIR + '/data'
RAW_DATA_DIR = DATA_DIR + '/raw'
ANALYSES_DIR = "data/analyses/"
MAPS_DIR = DATA_DIR + "/maps"
UK_REGIONS_FILE = CURRENT_DIR + '/lib/UK-regions.json'
# In[35]:
#
# # Absolute or relative path to processed instructor and workshop data that we want to analyse/map (extracted from the Carpentries REDASH)
# instructors_file = "data/processed/processed_carpentry_instructors_UK_2020-12-10_redash.csv"
# workshops_file = "data/processed/processed_carpentry_workshops_UK_2020-12-10_redash.csv"
# In[ ]:
# For executing from command line after converting to python script
args = helper.parse_command_line_parameters_redash()
instructors_file = args.processed_instructors_file
workshops_file = args.processed_workshops_file
# In[5]:
instructors_df = pd.read_csv(instructors_file, encoding = "utf-8")
# instructors_df = instructors_df.drop(labels=['first_name', 'last_name'], axis=1)
# load 'taught_workshops_per_year' column as dictionary
instructors_df.loc[~instructors_df['taught_workshops_per_year'].isnull(),['taught_workshops_per_year']] = instructors_df.loc[~instructors_df['taught_workshops_per_year'].isnull(),'taught_workshops_per_year'].apply(lambda x: literal_eval(x))
instructors_df.loc[instructors_df['taught_workshops_per_year'].isnull(),['taught_workshops_per_year']] = instructors_df.loc[instructors_df['taught_workshops_per_year'].isnull(),'taught_workshops_per_year'].apply(lambda x: {})
# Let's change type of some columns and do some conversions
# Convert list of strings into list of dates for 'taught_workshop_dates' and 'earliest_badge_awarded' columns (turn NaN into [])
instructors_df['taught_workshop_dates'] = instructors_df['taught_workshop_dates'].str.split(',')
instructors_df.loc[instructors_df['taught_workshop_dates'].isnull(), ['taught_workshop_dates']] = instructors_df.loc[instructors_df['taught_workshop_dates'].isnull(), 'taught_workshop_dates'].apply(lambda x: [])
instructors_df['taught_workshop_dates'] = instructors_df['taught_workshop_dates'].apply(lambda list_str: [datetime.datetime.strptime(date_str, '%Y-%m-%d').date() for date_str in list_str])
# Convert list of strings into list of dates for ' ' column (turn NaN into [])
instructors_df['taught_workshops'] = instructors_df['taught_workshops'].str.split(',')
instructors_df.loc[instructors_df['taught_workshops'].isnull(), ['taught_workshops']] = instructors_df.loc[instructors_df['taught_workshops'].isnull(), 'taught_workshop_dates'].apply(lambda x: [])
# Convert 'earliest_badge_awarded' column from strings to datetime
instructors_df['earliest_badge_awarded'] = pd.to_datetime(instructors_df['earliest_badge_awarded'], format="%Y-%m-%d").apply(lambda x: x.date())
print(type(instructors_df['earliest_badge_awarded'][0]))
# In[6]:
# Let's inspect our instructors data
instructors_df.head(10)
# In[7]:
# Let's inspect our data a bit more
print("Columns: ")
print(instructors_df.columns)
print("\nData types: ")
print(instructors_df.dtypes)
print("\nExpecting a list for 'taught_workshop_dates' column: ")
print(type(instructors_df['taught_workshop_dates'][0]))
print("\nExpecting dates in the list in 'taught_workshop_dates' column: ")
print(instructors_df['taught_workshop_dates'][0])
print("\nExpecting datetime for 'earliest_badge_awarded' column: ")
print(instructors_df['earliest_badge_awarded'][0])
print("\n'earliest_badge_awarded' column should not have nulls:")
print(instructors_df[instructors_df['earliest_badge_awarded'].isnull()])
print("\nWhich instructors have null for institution?")
print(instructors_df[instructors_df['institution'].isna()].index)
print("\nWhich instructors have null for region?")
print(instructors_df[instructors_df['region'].isna()].index)
print("\nWhich instructors have null for geo-coordinates?")
print(instructors_df[instructors_df['longitude'].isna()].index)
# In[8]:
# How many instructors are there in total?
instructors_df.index.size
# In[9]:
# Get the date of the last taught workshop
instructors_workshops_df = | pd.DataFrame(instructors_df[['taught_workshops', 'taught_workshop_dates', 'taught_workshops_per_year', 'earliest_badge_awarded']]) | pandas.DataFrame |
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import os
from pathlib import Path
import sys
import Imitate
from Imitate import *
import RegressionImitator
from RegressionImitator import *
import pandas
import seaborn as sns
colors = [
"tab:blue",
"tab:orange",
"tab:purple",
"tab:red",
"tab:green",
"tab:brown",
"tab:pink",
]
def get_network_name(m):
name = m.split("-")[1]
return (
name.capitalize()
.replace("net", "Net")
.replace("resnext", "ResNext")
.replace("deep", "Deep")
.replace("deeper", "Deeper")
.replace("_res", "_Res")
)
def get_clean_names(m):
return m.split("-")[1]
def index_one(num):
return num[0]
def get_df(data_dict, mazes):
df = pd.DataFrame.from_dict(data_dict)
df = df.assign(Maze=mazes)
df = df.T
df.columns = df.iloc[-1]
df = df.drop(df.index[-1])
# df['Network'] = df.index
df = df.reset_index()
df.rename(columns={'index':'Network'}, inplace=True)
df['std'] = df[df.columns[1:]].std(axis=1)
df['mean'] = df[df.columns[1:-1]].mean(axis=1)
# df = df.assign(lower_error=1.96*df['std'])
df = df.assign(error=1.96*df['std']/np.sqrt(len(mazes)))
return df
def get_error(df):
ci_bounds = df['error'].to_numpy()
return ci_bounds
def get_colors(num_unique_mazes) -> list:
color_labels = []
for i in range(num_unique_mazes):
color = colors[i % len(colors)]
for i in range(4):
color_labels.append(color)
return color_labels
def plot_bars(df, metric):
matplotlib.rcParams.update({'font.size': 12})
full_names = list(df["Network"])
clean_names = list(map(get_network_name, full_names))
unique_names = list(pd.unique(clean_names))
sparse_labels = []
for i in range(0, len(clean_names)):
if i % 4 == 0:
sparse_labels.append(clean_names[i])
else:
sparse_labels.append("")
color_labels = get_colors(len(unique_names))
ci_bounds = get_error(df)
max_error = max(df["error"])
increment = 5
fig, ax = plt.subplots(figsize=(12, 12))
y_lab = "Average Steps Needed Over Averaged Mazes"
x = np.arange(len(clean_names))
width = 0.65
vals = list(df["mean"])
if metric == "Completion":
ax.xaxis.set_major_formatter(mtick.PercentFormatter())
y_lab = "Average Completion Over Averaged Mazes"
ax.set_xticks(np.arange(0, 100, 10))
ax.set_xlim(left = 0.0, right = 100.0)
else:
ax.set_xticks(np.arange(0, max(vals), 200))
ax.set_xlim(left = 0.0, right = max(vals))
ax.barh(
x,
vals,
xerr=ci_bounds,
color=color_labels,
align="center",
alpha=0.75,
ecolor="grey",
capsize=2,
)
ax.set_yticks(x)
# ax.set_yticks(np.arange(0, max(vals) + max_error, increment))
ax.set_yticklabels(labels=sparse_labels)
# ax.legend()
# Axis styling.
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_color("#DDDDDD")
ax.tick_params(bottom=False, left=False)
ax.set_axisbelow(True)
ax.xaxis.grid(True, color="#EEEEEE")
ax.yaxis.grid(False)
ax.set_xlabel("Percentage Completed", labelpad=15)
ax.set_ylabel("Architecture", labelpad=15)
ax.set_title(f"{metric} Navigated per Network Replicate")
return ax
# Scatter Plot Code
def get_csv(file):
return "csv" in file
def get_petrained(file):
return "-pretrained" in file
def get_nonpetrained(file):
return "-notpretrained" in file
def get_losses(csv_files, data_dir, loss_type):
training_losses = []
for c in csv_files:
if c == "classification-resnet18-pretrained-trainlog-0.csv":
training_losses.append(0.0)
continue
df = pandas.read_csv(data_dir + "/" + c)
if len(df) == 0:
training_losses.append(0.0)
else:
if loss_type == "time":
training_losses.append(min(df[loss_type]))
else:
training_losses.append(min(df[loss_type]))
return training_losses
def merge_loss_data(data_dir, df, loss_type, model_type, average=False):
data_files = os.listdir(data_dir)
data_files.sort()
csvs = list(filter(get_csv, data_files))
if model_type == "pretrained":
csvs = list(filter(get_petrained, csvs))
else:
csvs = list(filter(get_nonpetrained, csvs))
losses = get_losses(csvs, data_dir, loss_type)
means = df['mean']
names = list(map(get_network_name, df['Network']))
if average:
losses = np.array(losses).reshape(-1,4).mean(axis=1)
means = np.array(means).reshape(-1,4).mean(axis=1)
names = list(pd.unique(names))
df = | pandas.DataFrame() | pandas.DataFrame |
"""Kusto helper functions"""
def dataframe_from_result_table(table, raise_errors=True):
import pandas
import json
from six import text_type
kusto_to_dataframe_data_types = {
"bool": "bool",
"uint8": "int64",
"int16": "int64",
"uint16": "int64",
"int": "int64",
"uint": "int64",
"long": "int64",
"ulong": "int64",
"float": "float64",
"real": "float64",
"decimal": "float64",
"string": "object",
"datetime": "datetime64[ns]",
"guid": "object",
"timespan": "timedelta64[ns]",
"dynamic": "object",
# Support V1
"DateTime": "datetime64[ns]",
"Int32": "int32",
"Int64": "int64",
"Double": "float64",
"String": "object",
"SByte": "object",
"Guid": "object",
"TimeSpan": "object",
}
"""Returns Pandas data frame."""
if not table.columns or not table.rows:
return | pandas.DataFrame() | pandas.DataFrame |
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import json
import seaborn as sns
import datetime
import numpy as np
import pandas as pd
from glob import glob
import os
import sys
import pytz
from IPython import embed;
from opendrift.readers.reader_current_from_drifter import Reader as DrifterReader
from opendrift.readers.reader_current_from_track import Reader as TrackReader
from opendrift.readers.reader_netCDF_CF_generic import Reader as GenericReader
from opendrift.readers.reader_netCDF_CF_unstructured import Reader as UnstructuredReader
from opendrift.readers.reader_grib2 import Reader as Grib2Reader
from opendrift.readers.reader_ROMS_native import Reader as ROMSReader
from download_fft_data import download_data
comp_start_time = datetime.datetime(2021, 11, 1, 17, 0, 0, 0, pytz.UTC)
comp_end_time = datetime.datetime(2021, 12, 4, 17, 0, 0, 0, pytz.UTC)
#comp_eval_time = datetime.datetime(2021, 11, 1, 17, 0, 0, 0, pytz.UTC)
def make_datetimes_from_args(args):
start_time = datetime.datetime(args.start_year, args.start_month, args.start_day, args.start_hour, 0, 0, 0, pytz.UTC)
end_time = start_time + datetime.timedelta(days=args.future_days)
start_str = start_time.strftime("%Y%m%d-%H%M")
end_str = end_time.strftime("%Y%m%d-%H%M")
return start_time, start_str, end_time, end_str
def load_hindcast_environment_data(start=datetime.datetime(2021,12,2), end=datetime.datetime(2021,11,22), load_path='data/hindcast'):
dataset_filenames = []
#start_date = datetime.datetime.strptime(start.strftime('%Y-%m-%d'),'%Y-%m-%d')
#end_date = datetime.datetime.strptime(end.strftime('%Y-%m-%d'),'%Y-%m-%d') + datetime.timedelta(days=1)
wind_filenames = glob('{load_path}/wind/*.nc'.format(load_path=load_path))
current_filenames = glob('{load_path}/current/*.nc4'.format(load_path=load_path))
readers = []
remap = {'u-component_of_wind_planetary_boundary':'x_wind',
'v-component_of_wind_planetary_boundary':'y_wind',
'water_u':'x_sea_water_velocity',
'water_v':'y_sea_water_velocity',
'Mean_period_of_wind_waves_surface':'sea_surSignificant_height_of_wind_waves_surface',
'Significant_height_of_wind_waves_surface':'sea_surface_wave_significant_height',
'u-component_of_wind_surface':'x_wind',
'v-component_of_wind_surface':'y_wind',
}
for wfile in wind_filenames:
dataset_filenames.append(wfile)
readers.append(GenericReader(wfile, standard_name_mapping=remap))
for cfile in current_filenames:
readers.append(GenericReader(cfile, standard_name_mapping=remap))
return readers
def load_environment_data(data_dir, start_time=comp_start_time, use_gfs=True, use_ncep=False, use_ww3=True, use_rtofs=True):
# https://polar.ncep.noaa.gov/global/examples/usingpython.shtml
# 'https://tds.hycom.org/thredds/dodsC/GLBy0.08/latest'
# //nomads.ncep.noaa.gov/pub/data/nccf/com/rtofs/prod/rtofs.20211120/rtofs
remap_gfs = {'u-component_of_wind_planetary_boundary':'x_wind',
'v-component_of_wind_planetary_boundary':'y_wind'}
remap_ww3 = { 'Mean_period_of_wind_waves_surface':'sea_surSignificant_height_of_wind_waves_surface',
'Significant_height_of_wind_waves_surface':'sea_surface_wave_significant_height',
'u-component_of_wind_surface':'x_wind',
'v-component_of_wind_surface':'y_wind',
}
remap_ncep = {
'reftime1':'reftime',
'time1':'time',
'lat':'lat',
'lon':'lon',
'u-component_of_wind_surface':'x_wind',
'v-component_of_wind_surface':'y_wind',
}
readers = []
# Several models to choose from
if use_ncep:
# p5 degree (56km)
# start: 2021-10-27 06:00:00 end: 2021-11-28 00:00:00 step: 3:00:00
ncep_wind_data = GenericReader(os.path.join(data_dir, 'ncep_global_best_2021-10-27_2021-11-28.nc'), standard_name_mapping=remap_ncep)
readers.append(ncep_wind_data)
if use_ww3:
# https://thredds.ucar.edu/thredds/ncss/grib/NCEP/WW3/Global/Best/dataset.html
ww3_wave_data1 = GenericReader(os.path.join(data_dir, 'ww3_11-01_11-11.nc'), standard_name_mapping=remap_ww3)
# ww3 is only 7 days in advance
# 2021-11-10 03:00:00 end: 2021-11-28 00:00:00 step: 3:00:00
ww3_wave_data2 = GenericReader(os.path.join(data_dir, 'ww3_11-10_12-03.nc'), standard_name_mapping=remap_ww3)
readers.append(ww3_wave_data1)
readers.append(ww3_wave_data2)
if use_rtofs:
# 8 day hourly forecast of current
rtofs_data = GenericReader(os.path.join(data_dir, 'rtofs_11-22.nc'))
readers.append(rtofs_data)
if use_gfs:
# GFS 0.5 degree (56km) (higher res than 1 deg)
# https://thredds.ucar.edu/thredds/gfsp5 # 14 day
# start: 2021-10-21 00:00:00 end: 2021-12-06 12:00:00 step: 3:00:00
# gfs should go last in case ww3 is used
gfsp5_wind_data = GenericReader(os.path.join(data_dir, 'gfs_0p5deg_10-20_12-06.nc'), standard_name_mapping=remap_gfs)
readers.append(gfsp5_wind_data)
return readers
def get_rtofs_currents(data_dir, start_time=comp_start_time):
pred_dir = os.path.join(data_dir, 'rtofs')
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
avail_dates = glob(os.path.join(pred_dir, '2021*'))
# for days older than today, use nowcast date
today = pytz.utc.localize(datetime.datetime.utcnow())
assert start_time <= today
date = start_time
weather_files = []
today_str = "%s%s%s"%(today.year, today.month, today.day)
while date < today:
date_str = "%s%s%s"%(date.year, date.month, date.day)
# get hindcast data
if date <= today:
print('getting hindcast data for %s'%date)
search_path = os.path.join(pred_dir, date_str, 'rtofs_glo_2ds_n0*progremap.nc')
files = glob(search_path)
print('found %s files' %len(files))
if not len(files):
print("WARNING no files found")
print(search_path)
weather_files.extend(sorted(files))
date = date+ datetime.timedelta(days=1)
# try looking for today data
search_path = os.path.join(pred_dir, today_str, 'rtofs_glo_2ds_f*progremap.nc')
files = glob(search_path)
print('found %s today files' %len(files))
if not len(files):
print('using yesterdays forecast data')
yes = today + datetime.timedelta(days=-1)
yes_str = "%s%s%s"%(yes.year, yes.month, yes.day)
search_path = os.path.join(pred_dir, yes_str, 'rtofs_glo_2ds_*progremap.nc')
files = glob(search_path)
print('found %s yesteday files' %len(files))
weather_files.extend(sorted(files))
return weather_files
def download_predictions(data_dir):
"""
Download the RTOFS forecast
Home > RTOFS model forecast documentation
RTOFS (Atlantic) is a basin-scale ocean forecast system based on the HYbrid Coordinate Ocean Model (HYCOM).
The model is run once a day, completing at about 1400Z. Each run starts with a 24 hour assimiliation hindcast and produces ocean surface forecasts every hour and full volume forecasts every 24 hours from the 0000Z nowcast out to 120 hours.
For example for the 20211118 model data there are 138 files:
** future **
20211118/rtofs_glo_2ds_f003_prog.nc
- 2021-11-18 03:00:00
20211118/rtofs_glo_2ds_f023_prog.nc
- 2021-11-18 23:00:00
20211118/rtofs_glo_2ds_f123_prog.nc
- 2021-11-23 03:00:00
** nowcast (hindcast) **
20211118/rtofs_glo_2ds_n014_prog.nc
- 2021-11-17 14:00:00
Latitude-longitude points on the native grid for the Atlantic RTOFS model are on a curvilinear orthogonal grid. The latitude-longitude point files for correct interpretation of the data files as well as other data files and software for processing Atlantic RTOFS data is available here. See the README and the Readme.RTOFS files for more information on the files and software. Vertical coordinates are interpolated to 40 fixed-depth positions, as specified here.
# prog files look like:
root group (NETCDF4 data model, file format HDF5):
Conventions: CF-1.0
title: HYCOM ATLb2.00
institution: National Centers for Environmental Prediction
source: HYCOM archive file
experiment: 92.8
history: archv2ncdf2d
dimensions(sizes): MT(1), Y(3298), X(4500), Layer(1)
variables(dimensions): float64 MT(MT), float64 Date(MT), int32 Layer(Layer), int32 Y(Y), int32 X(X), float32 Latitude(Y, X), float32 Longitude(Y, X), float32 u_velocity(MT, Layer, Y, X), float32 v_velocity(MT, Layer, Y, X), float32 sst(MT, Y, X), float32 sss(MT, Y, X), float32 layer_density(MT, Layer, Y, X)
groups:
"""
today = datetime.datetime.now()
yes = today + datetime.timedelta(days=-1)
tom = today + datetime.timedelta(days=1)
for date in [yes, today, tom]:
date_str = "%s%s%s"%(date.year, date.month, date.day)
print('starting date', date)
pred_dir = os.path.join(data_dir, 'rtofs', date_str)
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
# search for old ls files
old_files = glob(os.path.join(pred_dir, 'ls-l*'))
for old in old_files:
os.remove(old)
wget_ls = 'wget https://nomads.ncep.noaa.gov/pub/data/nccf/com/rtofs/prod/rtofs.%s/ls-l -P %s'%(date_str, pred_dir)
os.system(wget_ls)
ls_fpath = os.path.join(pred_dir, 'ls-l')
# if day is ready, read it
if os.path.exists(ls_fpath):
ls = open(ls_fpath, 'r')
get_files = []
for ff in ls:
if 'prog.nc' in ff:
# TODO prefer nowcast
# rtofs_glo_2ds_f000_prog.nc # future
# rtofs_glo_2ds_n000_prog.nc # nowcast
get_files.append(ff.split(' ')[-1].strip())
target_path = os.path.join(pred_dir, get_files[-1])
print(target_path)
if not os.path.exists(target_path):
wget_ls = 'wget https://nomads.ncep.noaa.gov/pub/data/nccf/com/rtofs/prod/rtofs.%s/%s -P %s'%(date_str, get_files[-1], pred_dir)
print('downloading', get_files[-1])
os.system(wget_ls)
nn_path = target_path.replace('.nc', 'remap.nc')
if not os.path.exists(nn_path):
# remap colinear - should check this!
cmd = 'cdo remapnn,global_.08 %s %s'%(target_path, nn_path)
os.system(cmd)
def load_drifter_data(search_path='data/challenge_*day*.json', start_date=comp_start_time, end_date=comp_end_time):
# load sorted dates
dates = sorted(glob(search_path))
bad_spots = []
track_columns = ['latitude', 'longitude', 'timestamp', 'spotterId', 'day', 'date']
wave_columns = ['significantWaveHeight', 'peakPeriod', 'meanPeriod', 'peakDirection',
'peakDirectionalSpread', 'meanDirection', 'meanDirectionalSpread',
'timestamp', 'latitude', 'longitude', 'spotterId', 'day', 'date']
wave_df = pd.DataFrame(columns=wave_columns)
track_df = pd.DataFrame(columns=track_columns)
start_day = start_date+datetime.timedelta(days=-2)
for cnt, date in enumerate(dates):
st = date.index('sofar')+len('sofar_')
en = st + 8
date_str = date[st:en]
date_ts = datetime.datetime(int(date_str[:4]), int(date_str[4:6]), int(date_str[6:]), 0, 0, 0, 0, pytz.UTC)
if date_ts >= start_day:
print(date_ts, start_day)
print('loading date: %s'% date)
day_data = json.load(open(date))['all_data']
for spot in range(len(day_data)):
spot_day_data = day_data[spot]['data']
this_track_df = pd.DataFrame(spot_day_data['track'])
this_wave_df = pd.DataFrame(spot_day_data['waves'])
this_track_df['spotterId'] = spot_day_data['spotterId']
this_wave_df['spotterId'] = spot_day_data['spotterId']
this_track_df['day'] = cnt
this_wave_df['day'] = cnt
# get date from filename
this_track_df['date'] = date_str
this_wave_df['date'] = date_str
if len(this_track_df.columns) != len(track_columns):
# some spots have no data
bad_spots.append((date, spot, spot_day_data))
else:
track_df = track_df.append(this_track_df)
if len(this_wave_df.columns) != len(wave_columns):
# some spots have no data
bad_spots.append((date, spot, spot_day_data))
else:
wave_df = wave_df.append(this_wave_df)
track_df = track_df.drop_duplicates()
track_df['real_sample'] = 1
track_df['sample_num'] = 1
track_df.index = np.arange(track_df.shape[0])
for spot in track_df['spotterId'].unique():
spot_indexes = track_df[track_df['spotterId'] == spot].index
track_df.loc[spot_indexes, 'sample_num'] = np.arange(len(spot_indexes), dtype=np.int64)
track_df['scaled_sample_num'] = track_df['sample_num'] / track_df['sample_num'].max()
track_df['ts'] = track_df['timestamp']
track_df['ts_utc'] = | pd.to_datetime(track_df['ts']) | pandas.to_datetime |
#!/usr/bin/env python3
# This script assumes that the non-numerical column headers
# in train and predi files are identical.
# Thus the sm header(s) in the train file must be numeric (day/month/year).
import sys
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA #TruncatedSVD as SVD
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def mask(df, f):
return df[f(df)]
def is_int(val):
try:
int(val)
return True
except:
return False
def remove_sparse_rows(data, error=-99999.0):
data_matrix = data.as_matrix()
data_matrix = [row for row in data_matrix if error in row]
return pd.DataFrame(data_matrix, columns=data.columns)
def fit_data(train_data, num_comps="mle"):
# Build pipeline and fit it to training data.
scaler = StandardScaler()
# https://github.com/scikit-learn/scikit-learn/issues/9884
pca = PCA(n_components=num_comps, svd_solver="full")
pipeline = Pipeline([("scaler", scaler), ("pca", pca)])
pipeline.fit(train_data)
return pipeline
#Select the target number of components.
# Uses Avereage Eigenvalue technique from:
# http://pubs.acs.org/doi/pdf/10.1021/ie990110i
def choose_num_comps(train_data, bound=1):
model = fit_data(train_data)
eigenvals = model.named_steps['pca'].explained_variance_
#print(f"eigenvals:\n{eigenvals}\n")
return len([ev for ev in eigenvals if (ev >= bound)])
# Assumes the first two columns are x/y-coordinates
# and integer-headed columns are sm data, not covariates.
def get_params(data):
columns = list(data.columns)[2:]
return [col for col in columns if not is_int(col)]
# Apply to {df} pca transformatio {model}
# that maps {params}-headed data to {num_comps} new columns.
def apply_model(df, model, params, num_comps):
pre_model = df[params]
post_model = model.transform(pre_model)
#print(f"one row of post_model:\n{post_model[0]}")
new_cols = [f"Component{i}" for i in range(num_comps)]
post_model = pd.DataFrame(post_model, columns=new_cols)
#print(f"one row of post_model:\n{post_model.iloc[0]}")
pre_base = df.drop(params, axis=1)
#print(f"one row of pre_base:\n{pre_base.iloc[0]}")
post_model.reset_index(drop=True, inplace=True)
pre_base.reset_index(drop=True, inplace=True)
post_full = pd.concat([pre_base, post_model], axis=1)
#print(f"one row of post_fill:\n{post_full.iloc[0]}")
#print(f"sizes:\npost_model: {post_model.shape}\npre_base: {pre_base.shape}\npost_full: {post_full.shape}\n")
return post_full
def joint_pca(train_data, predi_data, params):
# Run PCA on train_data to create a dimension-reduction model.
pca_train = train_data[params]
num_comps = choose_num_comps(pca_train)
#print(f"num_comps:\n{num_comps}\n")
model = fit_data(pca_train, num_comps)
#print(f"model:\n{model}\n")
#print(f"one row of train_data before:\n{train_data.iloc[1]}")
#print(f"one row of predi_data before:\n{predi_data.iloc[1]}")
# Apply the same model to both train and predi data.
train_data = apply_model(train_data, model, params, num_comps)
predi_data = apply_model(predi_data, model, params, num_comps)
#print(f"one row of train_data after:\n{train_data.iloc[1]}")
#print(f"one row of predi_data after:\n{predi_data.iloc[1]}")
components = model.named_steps["pca"].components_
return train_data, predi_data, components
if __name__ == "__main__":
train_in = sys.argv[1]
predi_in = sys.argv[2]
train_out = sys.argv[3]
predi_out = sys.argv[4]
log_file = sys.argv[5]
# Read in data files.
train_data = pd.read_csv(train_in, header=0)
predi_data = | pd.read_csv(predi_in, header=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import pdb
import sys
import os
from sklearn.ensemble import GradientBoostingRegressor
from joblib import dump, load
import re
##################################################################3
# (Sept 2020 - Jared) - PG-MTL training script on 145 source lake
# Features and hyperparamters must be manually specified below
# (e.g. feats = ['dif_max_depth', ....]; n_estimators = 5500, etc)
####################################################################3
#file to save model to
save_file_path = '../../models/metamodel_pgdl_RMSE_GBR.joblib'
#########################################################################################
#paste features found in "pbmtl_feature_selection.py" here
feats = ['n_obs_sp', 'n_obs_su', 'dif_max_depth', 'dif_surface_area',
'dif_glm_strat_perc', 'perc_dif_max_depth', 'perc_dif_surface_area',
'perc_dif_sqrt_surface_area']
###################################################################################
#######################################################################3
#paste hyperparameters found in "pbmtl_hyperparameter_search.py" here
#
n_estimators = 5500
lr = .05
#####################################################################
ids = pd.read_csv('../../metadata/pball_site_ids.csv', header=None)
ids = ids[0].values
glm_all_f = pd.read_csv("../../results/glm_transfer/RMSE_transfer_glm_pball.csv")
train_lakes = [re.search('nhdhr_(.*)', x).group(1) for x in np.unique(glm_all_f['target_id'].values)]
train_lakes_wp = np.unique(glm_all_f['target_id'].values) #with prefix
#compile training data
train_df = pd.DataFrame()
for _, lake_id in enumerate(train_lakes):
new_df = pd.DataFrame()
#get performance results (metatargets), filter out target as source
lake_df_res = | pd.read_csv("../../results/transfer_learning/target_"+lake_id+"/resultsPGRNNbasic_pball",header=None,names=['source_id','rmse']) | pandas.read_csv |
import pandas as pd
import requests
import datetime
import configparser
#maximum 10 days from current date or upgrade to premium
today = datetime.date.today()
start = today - datetime.timedelta(days=10)
config = configparser.ConfigParser()
config.read('config.ini')
#last two are left centered
left = ['msnbc','the-huffington-post','cnn','mashable','new-york-magazine','abc-news', 'vice-news']
right = ['the-telegraph','national-review','daily-mail','fox-news','breitbart-news','the-american-conservative']
df_left = | pd.DataFrame() | pandas.DataFrame |
'''
Created on Nov 12, 2018
@author: <NAME> (<EMAIL>)
'''
import os
import glob
import argparse
import time
import pandas as pd
import numpy as np
import scipy.io as io
from keras.models import Model
from keras.layers import GRU, Dense, Dropout, Input
from keras import optimizers
from keras.utils import multi_gpu_model
import keras
import ipyparallel as ipp
# Constant.
MODEL_FILE_NAME = 'yaw_misalignment_calibrator.h5'
RESULT_FILE_NAME = 'ymc_result.csv'
dt = pd.Timedelta(10.0, 'm')
testTimeRanges = [(pd.Timestamp('2018-05-19'), pd.Timestamp('2018-05-26') - dt)
, (pd.Timestamp('2018-05-26'), pd.Timestamp('2018-06-02') - dt)
, (pd.Timestamp('2018-06-02'), pd.Timestamp('2018-06-09') - dt)
, (pd.Timestamp('2018-06-09'), pd.Timestamp('2018-06-16') - dt)
, (pd.Timestamp('2018-08-24'), pd.Timestamp('2018-08-31') - dt)
, ( | pd.Timestamp('2018-08-28') | pandas.Timestamp |
import os
from fbprophet import Prophet
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def add_stf(x,y):
return(x+y)
def run_all_forecasts(spp, num_of_days=30):
for csv in passage_csvs():
run_forecast(csv, spp, num_of_days)
def run_forecast(csv, spp, num_of_days=30, display_count=False):
df_opts = create_dataframe(csv, spp, display_count)
predict_passage(df_opts['dataframe'],df_opts['dam'],df_opts['spp'],num_of_days)
def passage_csvs():
files = [os.path.abspath(f"csv/passage_data/{x}") for x in os.listdir('csv/passage_data')]
return files
def create_dataframe(csv, spp, display_count=False):
print('Formatting the dataframe')
df = pd.read_csv(csv)
# isolate the dam
dam_name = df['dam'][0]
# select columns to remove to isolate a spp
cols_to_remove = [col for col in df.columns if f"{spp}" not in col and 'count_date' not in col]
df = df.drop(cols_to_remove, axis=1)
# create date range to accommodate missing dates
df['count_date'] = | pd.to_datetime(df['count_date']) | pandas.to_datetime |
import numpy as np
import constants as const
import pandas as pd
import re
from sklearn import preprocessing
DESIRED_PARTS_OF_SPEECH = [
'ADJ',
'ADP',
'ADV',
'AUX',
'CONJ',
'CCONJ',
'DET',
'INTJ',
'NOUN',
'NUM',
'PART',
'PRON',
'PROPN',
'SCONJ',
'VERB']
""" Categorical attribute column names """
CATEGORICALS = [
"IssuerDept",
"ReceiverDept",
"IndividualTeam",
"ManagerPeer",
"MonetaryType",
"Program",
"CoreValue"
]
""" Numeric attribute column names """
NUMERICS = [
"Award Value",
"TenureRecAtIssuance",
"TenureIssAtIssuance"
]
"""
Builds a set of features and a label from the various vectorizations, text attributes,
and categorical & numeric attributes in the source data.
"""
class DataBuilder(object):
def __init__(self, unlabelled=False):
self.unlabelled = unlabelled
if self.unlabelled:
self.file_base = const.FILE_UNIQUE_UNLABELLED
self.file_doc2vec = const.FILE_DOC2VEC_INPUTS_UNLABELLED
self.file_transfer = const.FILE_TRANSFER_FEATURES_UNLABELLED
self.file_pos = const.FILE_POS_UNLABELLED
else:
self.file_base = const.FILE_UNIQUE_LABELLED
self.file_doc2vec = const.FILE_DOC2VEC_INPUTS
self.file_transfer = const.FILE_TRANSFER_FEATURES
self.file_pos = const.FILE_POS_LABELLED
self.dataframe = self.load_unique()
self.feature_funcs = {
'word2vec': self.load_word2vec,
'doc2vec': self.load_doc2vec,
'transfer_features': self.load_transfer_features,
'word_count': self.load_word_count,
'char_count': self.load_char_count,
'parts_of_speech': self.load_parts_of_speech
}
"""
Loads a set of features with the specified label set as numpy arrays x, y
:param features: List of all features to load. May include vectorizations like word2vec, doc2vec,
transfer_features, categorical or numeric attributes, and extracted attributes like doc_count and char_count
:param label: Which rating type to use as label: RatingMean, RatingMin, RatingMax, or Rating (first encountered)
"""
def load(self, features, label, as_dataframe=False):
# All data loads should use the "Index" column from files to make sure we're joining up the right records.
df = pd.DataFrame(index=self.dataframe.index)
for feature in features:
if feature in self.feature_funcs.keys():
feature_df = self.feature_funcs[feature]()
elif feature in CATEGORICALS:
feature_df = pd.get_dummies(self.dataframe[feature], feature)
elif feature in NUMERICS:
feature_df = self.dataframe[feature]
else:
raise(Exception("No source known for feature '{0}'".format(feature)))
if (df.shape[0] != 0 and df.shape[0] != feature_df.shape[0]):
raise Exception("Feature {0} has {1} rows but our current set has {2}".format(feature, feature_df.shape[0], df.shape[0]))
# Join will pair columns from the dataframes based on index
df = df.join(feature_df)
x = df if as_dataframe else np.asarray(df)
if self.unlabelled:
y = None
elif as_dataframe:
y = self.dataframe[label]
else:
y = self.dataframe[label].values
return x, y
def load_unique(self):
try:
df = pd.read_csv(self.file_base, encoding='utf-8')
except UnicodeDecodeError:
df = pd.read_csv(self.file_base, encoding='latin-1')
# Convert empty monetary values to 0
df[["Award Value"]] = df[["Award Value"]].fillna(value=0)
df.set_index("Index", inplace=True)
return df
def load_parts_of_speech(self):
df = | pd.read_csv(self.file_pos, encoding='utf-8') | pandas.read_csv |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types"
r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
with pytest.raises(TypeError):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
with pytest.raises(TypeError):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
with pytest.raises(TypeError):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
with pytest.raises(TypeError):
td.__rfloordiv__(dt64)
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
with pytest.raises(TypeError):
td.__rfloordiv__(np.float64(2.0))
with pytest.raises(TypeError):
td.__rfloordiv__(np.uint8(9))
with pytest.raises(TypeError, match="Invalid dtype"):
# deprecated GH#19761, enforced GH#29797
td.__rfloordiv__(np.int32(2.0))
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# Array-like others
assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1
res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()]))
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")])
res = td.__rfloordiv__(arr)
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
ser = pd.Series([1], dtype=np.int64)
res = td.__rfloordiv__(ser)
assert res is NotImplemented
with pytest.raises(TypeError, match="Invalid dtype"):
# Deprecated GH#19761, enforced GH#29797
# TODO: GH-19761. Change to TypeError.
ser // td
# ----------------------------------------------------------------
# Timedelta.__mod__, __rmod__
def test_mod_timedeltalike(self):
# GH#19365
td = Timedelta(hours=37)
# Timedelta-like others
result = td % Timedelta(hours=6)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
result = td % timedelta(minutes=60)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % NaT
assert result is NaT
def test_mod_timedelta64_nat(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64("NaT", "ns")
assert result is NaT
def test_mod_timedelta64(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64(2, "h")
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
def test_mod_offset(self):
# GH#19365
td = Timedelta(hours=37)
result = td % offsets.Hour(5)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=2)
def test_mod_numeric(self):
# GH#19365
td = Timedelta(hours=37)
# Numeric Others
result = td % 2
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % 1e12
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
result = td % int(1e12)
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
def test_mod_invalid(self):
# GH#19365
td = Timedelta(hours=37)
with pytest.raises(TypeError):
td % Timestamp("2018-01-22")
with pytest.raises(TypeError):
td % []
def test_rmod_pytimedelta(self):
# GH#19365
td = Timedelta(minutes=3)
result = timedelta(minutes=4) % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=1)
def test_rmod_timedelta64(self):
# GH#19365
td = Timedelta(minutes=3)
result = np.timedelta64(5, "m") % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=2)
def test_rmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
with pytest.raises(TypeError):
Timestamp("2018-01-22") % td
with pytest.raises(TypeError):
15 % td
with pytest.raises(TypeError):
16.0 % td
with pytest.raises(TypeError):
np.array([22, 24]) % td
# ----------------------------------------------------------------
# Timedelta.__divmod__, __rdivmod__
def test_divmod_numeric(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, 53 * 3600 * 1e9)
assert result[0] == Timedelta(1, unit="ns")
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=1)
assert result
result = divmod(td, np.nan)
assert result[0] is NaT
assert result[1] is NaT
def test_divmod(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
result = divmod(td, 54)
assert result[0] == Timedelta(hours=1)
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(0)
result = divmod(td, NaT)
assert np.isnan(result[0])
assert result[1] is NaT
def test_divmod_offset(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, offsets.Hour(-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_divmod_invalid(self):
# GH#19365
td = Timedelta(days=2, hours=6)
with pytest.raises(TypeError):
divmod(td, Timestamp("2018-01-22"))
def test_rdivmod_pytimedelta(self):
# GH#19365
result = divmod(timedelta(days=2, hours=6), Timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == | Timedelta(hours=6) | pandas.Timedelta |
import numpy as np
import pandas as pd
from scipy.stats import chi2_contingency
from .mismatch import (
get_reference_base,
calculate_mismatch_odds_ratio,
MISMATCH_COUNTS_COLUMNS,
)
RESULT_COLUMNS = [
'chrom', 'pos', 'strand', 'mismatch_odds_ratio',
'homog_G_kd', 'homog_G_kd_pval',
'homog_G_cntrl', 'homog_G_cntrl_pval',
'hetero_G', 'hetero_G_pval' # FDR is calculated later
]
def filter_empty_cols(c):
'''Remove columns for bases which are not represented in the data'''
c_t = c.T
c_t = c_t[c_t.any(1)]
return c_t.T
def test_significant_der_sites(counts, fdr_threshold, fasta):
'''Conduct G-test on counts to identify sites with differential error profiles'''
res = []
mismatch_counts = []
kd_has_replicates = len(counts['kd'].columns.levels[0]) > 1
cntrl_has_replicates = len(counts['cntrl'].columns.levels[0]) > 1
for (chrom, pos, strand), c in counts.iterrows():
c = filter_empty_cols(c.unstack(-1))
cntrl = c.loc['cntrl']
kd = c.loc['kd']
# pool replicates and conduct G test of kd vs cntrl
cntrl_pooled = cntrl.sum(0)
kd_pooled = kd.sum(0)
kd_vs_cntrl_g, kd_vs_cntrl_p_val, *_ = chi2_contingency(
[cntrl_pooled, kd_pooled], lambda_='log-likelihood')
# if result is likely to be significant we need to test
# homogeneity of cntrl and kd replicates (if we have replicates).
# If it isn't we can save some time by skipping these tests!
# We haven't calculated FDR yet but it cannot be smaller than P
if kd_vs_cntrl_p_val < fdr_threshold:
# homogeneity test of cntrl
if cntrl_has_replicates:
cntrl_hom_g, cntrl_hom_p_val, *_ = chi2_contingency(
filter_empty_cols(cntrl),
lambda_='log-likelihood'
)
else:
cntrl_hom_g, cntrl_hom_p_val = np.nan, np.nan
# homogeneity test of kd
if kd_has_replicates:
kd_hom_g, kd_hom_p_val, *_ = chi2_contingency(
filter_empty_cols(kd),
lambda_='log-likelihood'
)
else:
kd_hom_g, kd_hom_p_val = np.nan, np.nan
else:
# Otherwise set NaNs
cntrl_hom_g, cntrl_hom_p_val = np.nan, np.nan
kd_hom_g, kd_hom_p_val = np.nan, np.nan
# use reference base to generate odds ratio for mismatch compared to the reference
# (kd_mm / kd_m) / (cntrl_mm / cntrl_m)
ref_base = get_reference_base(fasta, chrom, pos)
odds_ratio, mm_counts = calculate_mismatch_odds_ratio(
ref_base, kd_pooled, cntrl_pooled
)
res.append([chrom, pos, strand, odds_ratio,
kd_hom_g, kd_hom_p_val,
cntrl_hom_g, cntrl_hom_p_val,
kd_vs_cntrl_g, kd_vs_cntrl_p_val])
mismatch_counts.append(mm_counts)
res = | pd.DataFrame(res, columns=RESULT_COLUMNS) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 13:21:59 2017
@author: Astrid
"""
from shark import supp
import pandas as pd
import os
import numpy as np
import math as m
def extractClimateYears(path_climate, year_start, years, number, path_save=None, exclude=list()):
climateparam_list = supp.getFileList(path_climate, '.ccd')[0]
# remove climate parameters from exclude
for e in exclude:
climateparam_list.remove(e+'.ccd')
# extract years of eacht climate parameter
row_start = int(365*24*(year_start-1))
row_end = int(365*24*years + row_start)
if path_save == None:
clim= | pd.DataFrame() | pandas.DataFrame |
import requests
import deeptrade
import pandas as pd
class Sentiment():
def __init__(self):
self.head = {'Authorization': "Token %s" %deeptrade.api_key}
def by_date(self,date,dataframe=False):
"""
:parameters:
- date: a day date in the format %YYYY-%MM-%DD
- dataframe: whehter result in json (False) or pandas dataframe
:returns:
json or pandas dataframe with all the tickers of the day date and
their corresponding sentiment
"""
endpoint = deeptrade.api_base+"sentiment_get/"+date
g = requests.get(endpoint, headers=self.head).json()
if dataframe:
df = pd.DataFrame(g)
return df
else:
return g
def by_ticker(self,ticker,dataframe=False):
"""
:parameters:
- ticker: a ticker such as 'AMZN'
- dataframe: whehter result in json (False) or pandas dataframe
:returns:
json or pandas dataframe with all the hist. sentiment information of the ticker
"""
endpoint = deeptrade.api_base+"sentiment_post/"+ticker
g = requests.get(endpoint, headers=self.head).json()
if dataframe:
df = | pd.DataFrame(g) | pandas.DataFrame |
""" Helper methods constant across all workers """
import requests, datetime, time, traceback, json, os, sys, math
import sqlalchemy as s
import pandas as pd
import os
import sys, logging
from urllib.parse import urlparse
def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}):
""" map objects => { *our db col* : *gh json key*} """
need_insertion_count = 0
need_update_count = 0
for i, obj in enumerate(new_data):
if type(obj) != dict:
logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj))
continue
obj['flag'] = 'none' # default of no action needed
existing_tuple = None
for db_dupe_key in list(duplicate_col_map.keys()):
if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any():
if table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'):
existing_tuple = table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0]
continue
logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key))
obj['flag'] = 'need_insertion'
need_insertion_count += 1
break
if obj['flag'] == 'need_insertion':
logging.info('Already determined that current tuple needs insertion, skipping checking updates. '
'Moving to next tuple.\n')
continue
# If we need to check the values of the existing tuple to determine if an update is needed
for augur_col, value_check in value_update_col_map.items():
not_nan_check = not (pd.isna(value_check) and pd.isna(existing_tuple[augur_col])) if value_check is not None else True
if existing_tuple[augur_col] != value_check and not_nan_check:
continue
logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col))
obj['flag'] = 'need_update'
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
if obj['flag'] == 'need_update':
logging.info('Already determined that current tuple needs update, skipping checking further updates. '
'Moving to next tuple.\n')
continue
# Now check the existing tuple's values against the response values to determine if an update is needed
for col in update_col_map.keys():
if update_col_map[col] not in obj:
continue
if obj[update_col_map[col]] == existing_tuple[col]:
continue
logging.info("Found a tuple that needs an update for column: {}\n".format(col))
obj['flag'] = 'need_update'
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) +
"was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count))
return new_data
def check_duplicates(new_data, table_values, key):
need_insertion = []
for obj in new_data:
if type(obj) == dict:
if not table_values.isin([obj[key]]).any().any():
need_insertion.append(obj)
# else:
# logging.info("Tuple with github's {} key value already".format(key) +
# "exists in our db: {}\n".format(str(obj[key])))
logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) +
"was reduced to {} tuples.\n".format(str(len(need_insertion))))
return need_insertion
def connect_to_broker(self):
connected = False
for i in range(5):
try:
logging.info("attempt {}\n".format(i))
if i > 0:
time.sleep(10)
requests.post('http://{}:{}/api/unstable/workers'.format(
self.config['broker_host'],self.config['broker_port']), json=self.specs)
logging.info("Connection to the broker was successful\n")
connected = True
break
except requests.exceptions.ConnectionError:
logging.error('Cannot connect to the broker. Trying again...\n')
if not connected:
sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n')
def dump_queue(queue):
"""
Empties all pending items in a queue and returns them in a list.
"""
result = []
queue.put("STOP")
for i in iter(queue.get, 'STOP'):
result.append(i)
# time.sleep(.1)
return result
def find_id_from_login(self, login):
idSQL = s.sql.text("""
SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}'
""".format(login))
rs = pd.read_sql(idSQL, self.db, params={})
data_list = [list(row) for row in rs.itertuples(index=False)]
try:
return data_list[0][0]
except:
logging.info("contributor needs to be added...")
cntrb_url = ("https://api.github.com/users/" + login)
logging.info("Hitting endpoint: {} ...\n".format(cntrb_url))
r = requests.get(url=cntrb_url, headers=self.headers)
update_gh_rate_limit(self, r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
cntrb = {
"cntrb_login": contributor['login'] if 'login' in contributor else None,
"cntrb_email": email,
"cntrb_company": company,
"cntrb_location": location,
"cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None,
"cntrb_canonical": None,
"gh_user_id": contributor['id'],
"gh_login": contributor['login'],
"gh_url": contributor['url'],
"gh_html_url": contributor['html_url'],
"gh_node_id": contributor['node_id'],
"gh_avatar_url": contributor['avatar_url'],
"gh_gravatar_id": contributor['gravatar_id'],
"gh_followers_url": contributor['followers_url'],
"gh_following_url": contributor['following_url'],
"gh_gists_url": contributor['gists_url'],
"gh_starred_url": contributor['starred_url'],
"gh_subscriptions_url": contributor['subscriptions_url'],
"gh_organizations_url": contributor['organizations_url'],
"gh_repos_url": contributor['repos_url'],
"gh_events_url": contributor['events_url'],
"gh_received_events_url": contributor['received_events_url'],
"gh_type": contributor['type'],
"gh_site_admin": contributor['site_admin'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
result = self.db.execute(self.contributors_table.insert().values(cntrb))
logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key))
self.results_counter += 1
self.cntrb_id_inc = int(result.inserted_primary_key[0])
logging.info("Inserted contributor: " + contributor['login'] + "\n")
return find_id_from_login(self, login)
def get_owner_repo(github_url):
split = github_url.split('/')
owner = split[-2]
repo = split[-1]
if '.git' in repo:
repo = repo[:-4]
return owner, repo
def get_max_id(self, table, column, default=25150, operations_table=False):
maxIdSQL = s.sql.text("""
SELECT max({0}.{1}) AS {1}
FROM {0}
""".format(table, column))
db = self.db if not operations_table else self.helper_db
rs = pd.read_sql(maxIdSQL, db, params={})
if rs.iloc[0][column] is not None:
max_id = int(rs.iloc[0][column]) + 1
logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id))
else:
max_id = default
logging.info("Could not find max id for {} column in the {} table... using default set to: \
{}\n".format(column, table, max_id))
return max_id
def get_table_values(self, cols, tables, where_clause=""):
table_str = tables[0]
del tables[0]
col_str = cols[0]
del cols[0]
for table in tables:
table_str += ", " + table
for col in cols:
col_str += ", " + col
tableValuesSQL = s.sql.text("""
SELECT {} FROM {} {}
""".format(col_str, table_str, where_clause))
logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL))
values = pd.read_sql(tableValuesSQL, self.db, params={})
return values
def init_oauths(self):
self.oauths = []
self.headers = None
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://api.github.com/users/gabe-heim"
# Make a list of api key in the config combined w keys stored in the database
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}'
""".format(self.config['key']))
for oauth in [{'oauth_id': 0, 'access_token': self.config['key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")):
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
logging.info("Getting rate limit info for oauth: {}\n".format(oauth))
response = requests.get(url=url, headers=self.headers)
self.oauths.append({
'oauth_id': oauth['oauth_id'],
'access_token': oauth['access_token'],
'rate_limit': int(response.headers['X-RateLimit-Remaining']),
'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds()
})
logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1]))
if len(self.oauths) == 0:
logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n")
# First key to be used will be the one specified in the config (first element in
# self.oauths array will always be the key in use)
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}):
# Paginate backwards through all the tuples but get first page in order
# to determine if there are multiple pages and if the 1st page covers all
update_keys = list(update_col_map.keys()) if update_col_map else []
update_keys += list(value_update_col_map.keys()) if value_update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
table_values = get_table_values(self, cols_query, [table], where_clause)
i = 1
multiple_pages = False
tuples = []
while True:
num_attempts = 0
success = False
while num_attempts < 3:
logging.info("Hitting endpoint: " + url.format(i) + " ...\n")
r = requests.get(url=url.format(i), headers=self.headers)
update_gh_rate_limit(self, r)
logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*'))
try:
j = r.json()
except:
j = json.loads(json.dumps(r.text))
if type(j) != dict and type(j) != str:
success = True
break
elif type(j) == dict:
logging.info("Request returned a dict: {}\n".format(j))
if j['message'] == 'Not Found':
logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
num_attempts -= 1
update_gh_rate_limit(self, r, temporarily_disable=True)
if j['message'] == 'Bad credentials':
update_gh_rate_limit(self, r, bad_credentials=True)
elif type(j) == str:
logging.info("J was string: {}\n".format(j))
if '<!DOCTYPE html>' in j:
logging.info("HTML was returned, trying again...\n")
elif len(j) == 0:
logging.info("Empty string, trying again...\n")
else:
try:
j = json.loads(j)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Find last page so we can decrement from there
if 'last' in r.links and not multiple_pages and not self.finishing_task:
param = r.links['last']['url'][-6:]
i = int(param.split('=')[1]) + 1
logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n")
multiple_pages = True
elif not multiple_pages and not self.finishing_task:
logging.info("Only 1 page of request\n")
elif self.finishing_task:
logging.info("Finishing a previous task, paginating forwards ..."
" excess rate limit requests will be made\n")
if len(j) == 0:
logging.info("Response was empty, breaking from pagination.\n")
break
# Checking contents of requests with what we already have in the db
j = assign_tuple_action(self, j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map)
if not j:
logging.info("Assigning tuple action failed, moving to next page.\n")
i = i + 1 if self.finishing_task else i - 1
continue
try:
to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none']
except Exception as e:
logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e))
i = i + 1 if self.finishing_task else i - 1
continue
if len(to_add) == 0 and multiple_pages and 'last' in r.links:
logging.info("{}".format(r.links['last']))
if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]):
logging.info("No more pages with unknown tuples, breaking from pagination.\n")
break
tuples += to_add
i = i + 1 if self.finishing_task else i - 1
# Since we already wouldve checked the first page... break
if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0:
logging.info("No more pages to check, breaking from pagination.\n")
break
return tuples
def query_github_contributors(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors
"""
logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n")
github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url']
# Extract owner/repo from the url for the endpoint
path = urlparse(github_url)
split = path[2].split('/')
owner = split[1]
name = split[2]
# Handles git url case by removing the extension
if ".git" in name:
name = name[:-4]
# Set the base of the url and place to hold contributors to insert
contributors_url = ("https://api.github.com/repos/" + owner + "/" +
name + "/contributors?per_page=100&page={}")
# Get contributors that we already have stored
# Set our duplicate and update column map keys (something other than PK) to
# check dupicates/needed column updates with
table = 'contributors'
table_pkey = 'cntrb_id'
update_col_map = {'cntrb_email': 'email'}
duplicate_col_map = {'cntrb_login': 'login'}
#list to hold contributors needing insertion or update
contributors = paginate(self, contributors_url, duplicate_col_map, update_col_map, table, table_pkey)
logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n")
for repo_contributor in contributors:
try:
# Need to hit this single contributor endpoint to get extra data including...
# `created at`
# i think that's it
cntrb_url = ("https://api.github.com/users/" + repo_contributor['login'])
logging.info("Hitting endpoint: " + cntrb_url + " ...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
update_gh_rate_limit(self, r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
canonical_email = contributor['email']
cntrb = {
"cntrb_login": contributor['login'],
"cntrb_created_at": contributor['created_at'],
"cntrb_email": email,
"cntrb_company": company,
"cntrb_location": location,
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": canonical_email,
"gh_user_id": contributor['id'],
"gh_login": contributor['login'],
"gh_url": contributor['url'],
"gh_html_url": contributor['html_url'],
"gh_node_id": contributor['node_id'],
"gh_avatar_url": contributor['avatar_url'],
"gh_gravatar_id": contributor['gravatar_id'],
"gh_followers_url": contributor['followers_url'],
"gh_following_url": contributor['following_url'],
"gh_gists_url": contributor['gists_url'],
"gh_starred_url": contributor['starred_url'],
"gh_subscriptions_url": contributor['subscriptions_url'],
"gh_organizations_url": contributor['organizations_url'],
"gh_repos_url": contributor['repos_url'],
"gh_events_url": contributor['events_url'],
"gh_received_events_url": contributor['received_events_url'],
"gh_type": contributor['type'],
"gh_site_admin": contributor['site_admin'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
# Commit insertion to table
if repo_contributor['flag'] == 'need_update':
result = self.db.execute(self.contributors_table.update().where(
self.history_table.c.cntrb_email==email).values(cntrb))
logging.info("Updated tuple in the contributors table with existing email: {}".format(email))
self.cntrb_id_inc = repo_contributor['pkey']
elif repo_contributor['flag'] == 'need_insertion':
result = self.db.execute(self.contributors_table.insert().values(cntrb))
logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key))
self.results_counter += 1
logging.info("Inserted contributor: " + contributor['login'] + "\n")
# Increment our global track of the cntrb id for the possibility of it being used as a FK
self.cntrb_id_inc = int(result.inserted_primary_key[0])
except Exception as e:
logging.info("Caught exception: {}".format(e))
logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url))
continue
def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0):
"""
Read a variable in specified section of the config file, unless provided an environment variable
:param section: location of given variable
:param name: name of variable
"""
config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path)
_config_file_name = 'augur.config.json'
_config_bad = False
_already_exported = {}
_runtime_location = 'runtime/'
_default_config = {}
_config_file = None
try:
_config_file = open(config_file_path, 'r+')
except:
print('Couldn\'t open {}'.format(_config_file_name))
# Load the config file
try:
config_text = _config_file.read()
_config = json.loads(config_text)
except json.decoder.JSONDecodeError as e:
if not _config_bad:
_using_config_file = False
print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e)))
_config = _default_config
value = None
if environment_variable is not None:
value = os.getenv(environment_variable)
if value is None:
try:
if name is not None:
value = _config[section][name]
else:
value = _config[section]
except Exception as e:
value = default
if not section in _config:
_config[section] = {}
return value
def record_model_process(self, repo_id, model):
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": model,
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Stopped",
"total_results": self.results_counter
}
if self.finishing_task:
result = self.helper_db.execute(self.history_table.update().where(
self.history_table.c.history_id==self.history_id).values(task_history))
self.history_id += 1
else:
result = self.helper_db.execute(self.history_table.insert().values(task_history))
logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key))
self.history_id = int(result.inserted_primary_key[0])
def register_task_completion(self, task, repo_id, model):
# Task to send back to broker
task_completed = {
'worker_id': self.config['id'],
'job_type': "MAINTAIN",
'repo_id': repo_id,
'job_model': model
}
key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN"
task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN"
if key == 'INVALID_GIVEN':
register_task_failure(self, task, repo_id, "INVALID_GIVEN: not github nor git url")
return
# Add to history table
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": model,
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Success",
"total_results": self.results_counter
}
self.helper_db.execute(self.history_table.update().where(
self.history_table.c.history_id==self.history_id).values(task_history))
logging.info("Recorded job completion for: " + str(task_completed) + "\n")
# Update job process table
updated_job = {
"since_id_str": repo_id,
"last_count": self.results_counter,
"last_run": datetime.datetime.now(),
"analysis_state": 0
}
self.helper_db.execute(self.job_table.update().where(
self.job_table.c.job_model==model).values(updated_job))
logging.info("Updated job process for model: " + model + "\n")
# Notify broker of completion
logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" +
"This task inserted: " + str(self.results_counter) + " tuples.\n\n")
requests.post('http://{}:{}/api/unstable/completed_task'.format(
self.config['broker_host'],self.config['broker_port']), json=task_completed)
# Reset results counter for next task
self.results_counter = 0
def register_task_failure(self, task, repo_id, e):
logging.info("Worker ran into an error for task: {}\n".format(task))
logging.info("Printing traceback...\n")
tb = traceback.format_exc()
logging.info(tb)
logging.info(f'This task inserted {self.results_counter} tuples before failure.\n')
logging.info("Notifying broker and logging task failure in database...\n")
key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN"
url = task['given'][key]
""" Query all repos with repo url of given task """
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(url))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
task['worker_id'] = self.config['id']
try:
requests.post("http://{}:{}/api/unstable/task_error".format(
self.config['broker_host'],self.config['broker_port']), json=task)
except requests.exceptions.ConnectionError:
logging.error('Could not send task failure message to the broker\n')
except Exception:
logging.exception('An error occured while informing broker about task failure\n')
# Add to history table
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": task['models'][0],
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Error",
"total_results": self.results_counter
}
self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history))
logging.info("Recorded job error in the history table for: " + str(task) + "\n")
# Update job process table
updated_job = {
"since_id_str": repo_id,
"last_count": self.results_counter,
"last_run": datetime.datetime.now(),
"analysis_state": 0
}
self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==task['models'][0]).values(updated_job))
logging.info("Updated job process for model: " + task['models'][0] + "\n")
# Reset results counter for next task
self.results_counter = 0
def retrieve_tuple(self, key_values, tables):
table_str = tables[0]
del tables[0]
key_values_items = list(key_values.items())
for col, value in [key_values_items[0]]:
where_str = col + " = '" + value + "'"
del key_values_items[0]
for col, value in key_values_items:
where_str += ' AND ' + col + " = '" + value + "'"
for table in tables:
table_str += ", " + table
retrieveTupleSQL = s.sql.text("""
SELECT * FROM {} WHERE {}
""".format(table_str, where_str))
values = json.loads( | pd.read_sql(retrieveTupleSQL, self.db, params={}) | pandas.read_sql |
import yfinance as yahoo
import pandas as pd, numpy as np
import ssl
import urllib.request
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
#from pytickersymbols import PyTickerSymbols #https://pypi.org/project/pytickersymbols/
# functions to get info about each market and their current stock tickets
# markets to operate: USA (Nasdaq & SP500), England, China, Japan, Canada, Brazil, Australia
# the handlers will result with a list of metrics that will be use by main script
# to build respective portfolio
def GSPC():
USA = | pd.read_html("https://topforeignstocks.com/indices/components-of-the-sp-500-index/") | pandas.read_html |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 19 13:31:38 2019
@author: Muhammad
"""
import h5py
import mcmc.util_cupy as util
import matplotlib.pyplot as plt
import cupy as cp, numpy as np
import mcmc.image_cupy as imc
# import mcmc.measurement as mcmcMeas
from skimage.transform import iradon
import pathlib
import seaborn as sns
import os
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
import argparse
image_extension = '.png'
def _process_data(samples_history,u_samples_history,n,n_ext,t_start,t_end,target_image,corrupted_image,burn_percentage,isSinogram,sinogram,theta,fbp,SimulationResult_dir,result_file,cmap = plt.cm.seismic_r):
burn_start_index = np.int(0.01*burn_percentage*u_samples_history.shape[0])
#initial conditions
samples_init = samples_history[0,:]
#change
u_samples_history = u_samples_history[burn_start_index:,:]
samples_history = samples_history[burn_start_index:,:]
N = u_samples_history.shape[0]
#initial condition
vF_init = util.symmetrize(cp.asarray(samples_init)).reshape(2*n-1,2*n-1,order=imc.ORDER)
# vF_init = vF_init.conj()
vF_mean = util.symmetrize(cp.asarray(np.mean(samples_history,axis=0)))
vF_stdev = util.symmetrize(cp.asarray(np.std(samples_history,axis=0)))
vF_abs_stdev = util.symmetrize(cp.asarray(np.std(np.abs(samples_history),axis=0)))
fourier = imc.FourierAnalysis_2D(n,n_ext,t_start,t_end)
sL2 = util.sigmasLancosTwo(cp.int(n))
# if isSinogram:
# vF_init = util.symmetrize_2D(fourier.rfft2(cp.asarray(fbp,dtype=cp.float32)))
# if not isSinogram:
vForiginal = util.symmetrize_2D(fourier.rfft2(cp.array(target_image)))
reconstructed_image_original = fourier.irfft2(vForiginal[:,n-1:])
reconstructed_image_init = fourier.irfft2(vF_init[:,n-1:])
samples_history_cp = cp.asarray(samples_history)
v_image_count=0
v_image_M = cp.zeros_like(reconstructed_image_original)
v_image_M2 = cp.zeros_like(reconstructed_image_original)
v_image_aggregate = (v_image_count,v_image_M,v_image_M2)
for i in range(N):
vF = util.symmetrize(samples_history_cp[i,:]).reshape(2*n-1,2*n-1,order=imc.ORDER)
v_temp = fourier.irfft2(vF[:,n-1:])
v_image_aggregate = util.updateWelford(v_image_aggregate,v_temp)
v_image_mean,v_image_var,v_image_s_var = util.finalizeWelford(v_image_aggregate)
#TODO: This is sign of wrong processing, Remove this
# if isSinogram:
# reconstructed_image_init = cp.fliplr(reconstructed_image_init)
# v_image_mean = cp.fliplr(v_image_mean)
# v_image_s_var = cp.fliplr(v_image_s_var)
mask = cp.zeros_like(reconstructed_image_original)
r = (mask.shape[0]+1)//2
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
x = 2*(i - r)/mask.shape[0]
y = 2*(j - r)/mask.shape[1]
if (x**2+y**2 < 1):
mask[i,j]=1.
u_samples_history_cp = cp.asarray(u_samples_history)
u_image = cp.zeros_like(v_image_mean)
# ell_image = cp.zeros_like(v_image_mean)
u_image_count=0
u_image_M = cp.zeros_like(u_image)
u_image_M2 = cp.zeros_like(u_image)
u_image_aggregate = (u_image_count,u_image_M,u_image_M2)
ell_image_count=0
ell_image_M = cp.zeros_like(u_image)
ell_image_M2 = cp.zeros_like(u_image)
ell_image_aggregate = (ell_image_count,ell_image_M,ell_image_M2)
for i in range(N):
uF = util.symmetrize(u_samples_history_cp[i,:]).reshape(2*n-1,2*n-1,order=imc.ORDER)
u_temp = fourier.irfft2(uF[:,n-1:])
u_image_aggregate = util.updateWelford(u_image_aggregate,u_temp)
ell_temp = cp.exp(u_temp)
ell_image_aggregate = util.updateWelford(ell_image_aggregate, ell_temp)
u_image_mean,u_image_var,u_image_s_var = util.finalizeWelford(u_image_aggregate)
ell_image_mean,ell_image_var,ell_image_s_var = util.finalizeWelford(ell_image_aggregate)
# if isSinogram:
# u_image_mean = cp.flipud(u_image_mean) #cp.rot90(cp.fft.fftshift(u_image),1)
# u_image_var = cp.flipud(u_image_var) #cp.rot90(cp.fft.fftshift(u_image),1)
# ell_image_mean = cp.flipud(ell_image_mean)# cp.rot90(cp.fft.fftshift(ell_image),1)
# ell_image_var = cp.flipud(ell_image_var)# cp.rot90(cp.fft.fftshift(ell_image),1)
ri_fourier = cp.asnumpy(reconstructed_image_original)
if isSinogram:
ri_compare = fbp
else:
ri_compare = ri_fourier
is_masked=True
if is_masked:
reconstructed_image_var = mask*v_image_s_var
reconstructed_image_mean = mask*v_image_mean
reconstructed_image_init = mask*reconstructed_image_init
u_image_mean = mask*u_image_mean #cp.rot90(cp.fft.fftshift(u_image),1)
u_image_s_var = mask*u_image_s_var #cp.rot90(cp.fft.fftshift(u_image),1)
ell_image_mean = mask*ell_image_mean# cp.rot90(cp.fft.fftshift(ell_image),1)
ell_image_s_var = mask*ell_image_s_var# cp.rot90(cp.fft.fftshift(ell_image),1)
else:
reconstructed_image_mean = v_image_mean
ri_init = cp.asnumpy(reconstructed_image_init)
# ri_fourier = fourier.irfft2((sL2.astype(cp.float32)*vForiginal)[:,n-1:])
vForiginal_n = cp.asnumpy(vForiginal)
vF_init_n = cp.asnumpy(vF_init)
ri_fourier_n = cp.asnumpy(ri_fourier)
vF_mean_n = cp.asnumpy(vF_mean.reshape(2*n-1,2*n-1,order=imc.ORDER))
vF_stdev_n = cp.asnumpy(vF_stdev.reshape(2*n-1,2*n-1,order=imc.ORDER))
vF_abs_stdev_n = cp.asnumpy(vF_abs_stdev.reshape(2*n-1,2*n-1,order=imc.ORDER))
ri_mean_n = cp.asnumpy(reconstructed_image_mean)
ri_var_n = cp.asnumpy(reconstructed_image_var)
ri_std_n = np.sqrt(ri_var_n)
# ri_n_scalled = ri_n*cp.asnumpy(scalling_factor)
u_mean_n = cp.asnumpy(u_image_mean)
u_var_n = cp.asnumpy(u_image_s_var)
ell_mean_n = cp.asnumpy(ell_image_mean)
ell_var_n = cp.asnumpy(ell_image_s_var)
#Plotting one by one
#initial condition
fig = plt.figure()
plt.subplot(1,2,1)
im = plt.imshow(np.absolute(vF_init_n),cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Fourier - real part')
plt.subplot(1,2,2)
im = plt.imshow(np.angle(vF_init_n),cmap=cmap,vmin=-np.pi,vmax=np.pi)
fig.colorbar(im)
plt.title('Fourier - imaginary part')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'vF_init')+image_extension, bbox_inches='tight')
plt.close()
#vF Original
fig = plt.figure()
plt.subplot(1,2,1)
im = plt.imshow(np.absolute(vForiginal_n),cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Fourier - absolute')
plt.subplot(1,2,2)
im = plt.imshow(np.angle(vForiginal_n),cmap=cmap,vmin=-np.pi,vmax=np.pi)
fig.colorbar(im)
plt.title('Fourier - angle')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'vForiginal')+image_extension, bbox_inches='tight')
plt.close()
#vF Original
fig = plt.figure()
plt.subplot(1,2,1)
im = plt.imshow(np.absolute(vF_mean_n),cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Fourier - absolute')
plt.subplot(1,2,2)
im = plt.imshow(np.angle(vF_mean_n),cmap=cmap,vmin=-np.pi,vmax=np.pi)
fig.colorbar(im)
plt.title('Fourier - phase')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'vF_mean')+image_extension, bbox_inches='tight')
plt.close()
#Absolute error of vF - vForiginal
fig = plt.figure()
im = plt.imshow(np.abs(vF_mean_n-vForiginal_n),cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Fourier abs Error')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'abs_err_vF_mean')+image_extension, bbox_inches='tight')
plt.close()
#Absolute error of vF_init - vForiginal
fig = plt.figure()
im = plt.imshow(np.abs(vF_init_n-vForiginal_n),cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Fourier abs Error')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'abs_err_vF_init')+image_extension, bbox_inches='tight')
plt.close()
#Absolute error of vF_init - vForiginal
fig = plt.figure()
im = plt.imshow(np.abs(vF_init_n-vF_mean_n),cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Fourier abs Error')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'abs_err_vF_init_vF_mean')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(ri_mean_n,cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Reconstructed Image mean')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'ri_mean_n')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(ri_fourier,cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Reconstructed Image through Fourier')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'ri_or_n')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(ri_init,cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Reconstructed Image through Fourier')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'ri_init')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(ri_var_n,cmap=cmap)
fig.colorbar(im)
plt.title('Reconstructed Image variance')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'ri_var_n')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(target_image,cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Target Image')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'target_image')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(ri_compare,cmap=cmap,vmin=-1,vmax=1)
if isSinogram:
plt.title('Filtered Back Projection -FBP')
else:
plt.title('Reconstructed Image From vFOriginal')
fig.colorbar(im)
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'ri_compare')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow((target_image-ri_mean_n),cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Error SPDE')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'err_RI_TI')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow((target_image-ri_compare),cmap=cmap)#,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Error SPDE')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'err_RIO_TI')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow((ri_compare-target_image),cmap=cmap,vmin=-1,vmax=1)
fig.colorbar(im)
plt.title('Error FPB')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'err_RI_CMP')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(u_mean_n,cmap=cmap)
fig.colorbar(im)
plt.title('Mean $u$')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'u_mean_n')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(u_var_n,cmap=cmap)
plt.title('Var $u$')
fig.colorbar(im)
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'u_var_n')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(ell_mean_n,cmap=cmap)
fig.colorbar(im)
plt.title('Mean $\ell$')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'ell_mean_n')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
im = plt.imshow(ell_var_n,cmap=cmap)
fig.colorbar(im)
plt.title('Var $\ell$')
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'ell_var_n')+image_extension, bbox_inches='tight')
plt.close()
fig = plt.figure()
if isSinogram:
im = plt.imshow(sinogram,cmap=cmap)
plt.title('Sinogram')
else:
im = plt.imshow(corrupted_image,cmap=cmap)
plt.title('corrupted_image --- CI')
fig.colorbar(im)
plt.tight_layout()
plt.savefig(str(SimulationResult_dir/'measurement')+image_extension, bbox_inches='tight')
plt.close()
#plot several slices
N_slices = 16
t_index = np.arange(target_image.shape[1])
for i in range(N_slices):
fig = plt.figure()
slice_index = target_image.shape[0]*i//N_slices
plt.plot(t_index,target_image[slice_index,:],'-k',linewidth=0.5,markersize=1)
plt.plot(t_index,ri_fourier_n[slice_index,:],'-r',linewidth=0.5,markersize=1)
plt.plot(t_index,ri_mean_n[slice_index,:],'-b',linewidth=0.5,markersize=1)
plt.fill_between(t_index,ri_mean_n[slice_index,:]-2*ri_std_n[slice_index,:],
ri_mean_n[slice_index,:]+2*ri_std_n[slice_index,:],
color='b', alpha=0.1)
plt.plot(t_index,ri_compare[slice_index,:],':k',linewidth=0.5,markersize=1)
plt.savefig(str(SimulationResult_dir/'1D_Slice_{}'.format(slice_index-(target_image.shape[0]//2)))+image_extension, bbox_inches='tight')
plt.close()
f_index = np.arange(n)
for i in range(N_slices):
fig = plt.figure()
slice_index = vForiginal_n.shape[0]*i//N_slices
plt.plot(f_index,np.abs(vForiginal_n[slice_index,n-1:]),'-r',linewidth=0.5,markersize=1)
plt.plot(f_index,np.abs(vF_init_n[slice_index,n-1:]),':k',linewidth=0.5,markersize=1)
plt.plot(f_index,np.abs(vF_mean_n[slice_index,n-1:]),'-b',linewidth=0.5,markersize=1)
plt.fill_between(f_index,np.abs(vF_mean_n[slice_index,n-1:])-2*vF_abs_stdev_n[slice_index,n-1:],
np.abs(vF_mean_n[slice_index,n-1:])+2*vF_abs_stdev_n[slice_index,n-1:],
color='b', alpha=0.1)
plt.savefig(str(SimulationResult_dir/'1D_F_Slice_{}'.format(slice_index-n))+image_extension, bbox_inches='tight')
plt.close()
# fig.colorbar(im, ax=ax[:,:], shrink=0.8)
# fig.savefig(str(SimulationResult_dir/'Result')+image_extension, bbox_inches='tight')
# for ax_i in ax.flatten():
# extent = ax_i.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# # print(ax_i.title.get_text())
# fig.savefig(str(SimulationResult_dir/ax_i.title.get_text())+''+image_extension, bbox_inches=extent.expanded(1.2, 1.2))
#
# fig = plt.figure()
# plt.hist(u_samples_history[:,0],bins=50,density=1)
error = (target_image-ri_mean_n)
error_CMP = (target_image-ri_compare)
L2_error = np.linalg.norm(error)
MSE = np.sum(error*error)/error.size
PSNR = 10*np.log10(np.max(ri_mean_n)**2/MSE)
SNR = np.mean(ri_mean_n)/np.sqrt(MSE*(error.size/(error.size-1)))
L2_error_CMP = np.linalg.norm(error_CMP)
MSE_CMP = np.sum(error_CMP*error_CMP)/error_CMP.size
PSNR_CMP = 10*np.log10(np.max(ri_compare)**2/MSE_CMP)
SNR_CMP = np.mean(ri_compare)/np.sqrt(MSE_CMP*(error_CMP.size/(error_CMP.size-1)))
metric = {'L2_error':L2_error,
'MSE':MSE,
'PSNR':PSNR,
'SNR':SNR,
'L2_error_CMP':L2_error_CMP,
'MSE_CMP':MSE_CMP,
'PSNR_CMP':PSNR_CMP,
'SNR_CMP':SNR_CMP}
with h5py.File(result_file,mode='a') as file:
for key,value in metric.items():
if key in file.keys():
del file[key]
# else:
file.create_dataset(key,data=value)
print('Shallow-SPDE : L2-error {}, MSE {}, SNR {}, PSNR {},'.format(L2_error,MSE,SNR,PSNR))
print('FBP : L2-error {}, MSE {}, SNR {}, PSNR {}'.format(L2_error_CMP,MSE_CMP,SNR_CMP,PSNR_CMP))
def post_analysis(input_dir,relative_path_str="/scratch/work/emzirm1/SimulationResult",folder_plot=True,filename='result.hdf5',cmap = plt.cm.seismic_r):
sns.set_style("ticks")
sns.set_context('paper')
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
relative_path = pathlib.Path(relative_path_str)
SimulationResult_dir = relative_path /input_dir
result_file = str(SimulationResult_dir/filename)
if not folder_plot:
with h5py.File(result_file,mode='r') as file:
n_layers = file['n_layers'][()]
try:
samples_history = file['Layers {}/samples_history'.format(n_layers-1)][()]
except KeyError:
pass
finally:
samples_history = file['Layers {}/samples_history'.format(n_layers-1)][()]
u_samples_history = file['Layers {}/samples_history'.format(n_layers-2)][()]
n = file['fourier/basis_number'][()]
n_ext = file['fourier/extended_basis_number'][()]
t_start = file['t_start'][()]
t_end = file['t_end'][()]
target_image = file['measurement/target_image'][()]
corrupted_image = file['measurement/corrupted_image'][()]
burn_percentage = file['burn_percentage'][()]
# meas_std = file['measurement/stdev'][()]
isSinogram = 'sinogram' in file['measurement'].keys()
if isSinogram:
sinogram = file['measurement/sinogram'][()]
theta = file['measurement/theta'][()]
fbp = iradon(sinogram,theta,circle=True)
else:
for result_file in SimulationResult_dir.iterdir():
loaded_first_file=False
if result_file.name.endswith('hdf5'):
with h5py.File(result_file,mode='r') as file:
if not loaded_first_file:
n_layers = file['n_layers'][()]
samples_history = file['Layers {}/samples_history'.format(n_layers-1)][()]
u_samples_history = file['Layers {}/samples_history'.format(n_layers-2)][()]
n = file['fourier/basis_number'][()]
n_ext = file['fourier/extended_basis_number'][()]
t_start = file['t_start'][()]
t_end = file['t_end'][()]
target_image = file['measurement/target_image'][()]
corrupted_image = file['measurement/corrupted_image'][()]
burn_percentage = file['burn_percentage'][()]
isSinogram = 'sinogram' in file['measurement'].keys()
if isSinogram:
sinogram = file['measurement/sinogram'][()]
theta = file['measurement/theta'][()]
fbp = iradon(sinogram,theta,circle=True)
loaded_first_file = True
else:
samples_history = file['Layers {}/samples_history'.format(n_layers-1)][()]
u_samples_history = file['Layers {}/samples_history'.format(n_layers-2)][()]
_process_data(samples_history,u_samples_history,n,n_ext,t_start,t_end,target_image,corrupted_image,burn_percentage,
isSinogram,sinogram,theta,fbp,SimulationResult_dir,result_file,cmap = plt.cm.seismic_r)
def drawSlices(ri_mean_n,N_slices):
""" beta2 in ps / km
C is chirp
z is an array of z positions """
t = np.arange(ri_mean_n.shape[1])
X,Y = np.meshgrid(t,np.arange(0,ri_mean_n.shape[1],ri_mean_n.shape[1]//N_slices))
fig = plt.figure()
ax = fig.add_subplot(1,1,1, projection='3d')
verts = []
for i in range(N_slices):
slice_index = ri_mean_n.shape[0]*i//N_slices
verts.append(list(zip(t,ri_mean_n[slice_index, :])))
poly = PolyCollection(verts, facecolors=(1,1,1,1), edgecolors=(0,0,0,1),linewidths=0.5)
poly.set_alpha(0.3)
ax.add_collection3d(poly, zs=Y[:, 0], zdir='y')
ax.set_xlim3d(np.min(X), np.max(X))
ax.set_ylim3d(np.min(Y), np.max(Y))
ax.set_zlim3d(np.min(ri_mean_n), np.max(ri_mean_n))
plt.savefig('Slices'+image_extension, bbox_inches='tight')
def make_summaries(parent_path_str="/scratch/work/emzirm1/SimulationResult",dim=2):
parent_path = pathlib.Path(parent_path_str)
Series = []
column_names = []
column_names_sino=[]
column_names_sino_with_error = []
index_column = ['file_name']
for f in parent_path.iterdir():
for fname in f.iterdir():
if str(fname).endswith('.hdf5'):
try:
with h5py.File(fname,mode='r+') as file:
if 'd' in file.keys():
d =file['d'][()]
if d ==dim:
#Check wheter Column_names still empty, if it is add column names
if not column_names:
print('creating column name')
column_names = list(file.keys())
#remove the groups
column_names.remove('Layers 0')
column_names.remove('Layers 1')
column_names.remove('fourier')
column_names.remove('measurement')
column_names.remove('pcn')
#add some necessaries
column_names.append('fourier/basis_number')
column_names.append('fourier/t_start')
column_names.append('fourier/t_end')
column_names.append('measurement/stdev')
column_names.append('pcn/n_layers')
column_names.append('pcn/beta')
column_names.append('pcn/beta_feedback_gain')
column_names.append('pcn/target_acceptance_rate')
column_names.append('pcn/max_record_history')
if 'file_name' not in column_names:
column_names.append('file_name')
if not column_names_sino:
column_names_sino = column_names.copy()
if 'measurement/n_r' in file.keys():
column_names_sino.append('measurement/n_r')
column_names_sino.append('measurement/n_theta')
if not column_names_sino_with_error and 'L2_error' in file.keys():
column_names_sino_with_error = column_names_sino.copy()
column_names_sino_with_error.append('L2_error')
column_names_sino_with_error.append('L2_error_CMP')
column_names_sino_with_error.append('MSE')
column_names_sino_with_error.append('MSE_CMP')
column_names_sino_with_error.append('PSNR')
column_names_sino_with_error.append('PSNR_CMP')
if 'file_name' in file.keys():
del file['file_name']
file.create_dataset('file_name',data=str(fname.absolute()))
print('Appending Series')
if 'sinogram' not in file['measurement'].keys():
pass
# content = [file[key][()] for key in column_names]
# Series.append(pd.Series(content,index=column_names_sino_with_error))
else:
if 'L2_error' in file.keys():
content = [file[key][()] for key in column_names_sino_with_error]
Series.append(pd.Series(content,index=column_names_sino_with_error))
# else:
# content = [file[key][()] for key in column_names_sino]
# Series.append(pd.Series(content,index=column_names_sino_with_error))
else:
#print('Dimension not match')
continue
except Exception as e:
print('Something bad happen when opening hdf5 file {}: {}'.format(str(fname),e.args))
continue
else:
continue
df = | pd.DataFrame(Series,columns=column_names_sino_with_error) | pandas.DataFrame |
from os import mkdir
import pandas as pd
import glob
import yaml
from sklearn.preprocessing import MaxAbsScaler
from joblib import dump
# NOTE must be run from repo root.
with open("params.yaml") as yml:
params = yaml.safe_load(yml)
files = glob.glob("data/initial_features/*")
dfs = []
for file in sorted(files):
df = | pd.read_csv(file) | pandas.read_csv |
import os
import pandas as pd
import numpy as np
import logging
import wget
import time
import pickle
from src.features import preset
from src.features import featurizer
from src.data.utils import LOG
from matminer.data_retrieval.retrieve_MP import MPDataRetrieval
from tqdm import tqdm
from pathlib import Path
from src.data.get_data_MP import data_MP
import dotenv
def featurize_by_material_id(material_ids: np.array,
featurizerObject: featurizer.extendedMODFeaturizer,
MAPI_KEY: str,
writeToFile: bool = True) -> pd.DataFrame:
""" Run all of the preset featurizers on the input dataframe.
Arguments:
df: the input dataframe with a `"structure"` column
containing `pymatgen.Structure` objects.
Returns:
The featurized DataFrame.
"""
def apply_featurizers(criterion, properties, mpdr, featurizerObject):
LOG.info("Downloading dos and bandstructure objects..")
timeDownloadStart = time.time()
df_portion = mpdr.get_dataframe(criteria=criterion, properties=properties)
timeDownloadEnd = time.time()
LOG.info(df_portion)
df_time, df_portion = featurizerObject.featurize(df_portion)
df_time["download_objects"] = [timeDownloadEnd-timeDownloadStart]
return df_time, df_portion
properties = ["material_id","full_formula", "bandstructure", "dos", "structure"]
mpdr = MPDataRetrieval(MAPI_KEY)
steps = 1
leftover = len(material_ids)%steps
df = pd.DataFrame({})
df_timers = | pd.DataFrame({}) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
def create_initial_infections(
empirical_infections,
synthetic_data,
start,
end,
seed,
virus_shares,
reporting_delay,
population_size,
):
"""Create a DataFrame with initial infections.
.. warning::
In case a person is drawn to be newly infected more than once we only
infect her on the first date. If the probability of being infected is
large, not correcting for this will lead to a lower infection probability
than in the empirical data.
Args:
empirical_infections (pandas.Series): Newly infected Series with the index
levels ["date", "county", "age_group_rki"]. Should already be corrected
upwards to include undetected cases.
synthetic_data (pandas.DataFrame): Dataset with one row per simulated
individual. Must contain the columns age_group_rki and county.
start (str or pd.Timestamp): Start date.
end (str or pd.Timestamp): End date.
seed (int)
virus_shares (dict or None): If None, it is assumed that there is only one
strain. If dict, keys are the names of the virus strains and the values
are pandas.Series with a DatetimeIndex and the share among newly infected
individuals on each day as value.
reporting_delay (int): Number of days by which the reporting of cases is
delayed. If given, later days are used to get the infections of the
demanded time frame.
population_size (int): Population size behind the empirical_infections.
Returns:
pandas.DataFrame: DataFrame with same index as synthetic_data and one column
for each day between start and end. Dtype is boolean or categorical.
Values identify which individual gets infected with which variant.
"""
np.random.seed(seed)
assert reporting_delay >= 0, "Reporting delay must be >= 0"
reporting_delay = pd.Timedelta(days=reporting_delay)
start = pd.Timestamp(start) + reporting_delay
end = | pd.Timestamp(end) | pandas.Timestamp |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestDataFrame(unittest.TestCase):
def setUp(self):
data_null = np.array([
["007", 1, 1, 2.0, True],
[None, 2, 2, None, True],
["12", None, 4, 2.0, False],
["1312", 0, None, 1.2, None],
])
self.df_null = pd.DataFrame({
"f_string": data_null[:, 0],
"f_long": data_null[:, 1],
"f_int": data_null[:, 2],
"f_double": data_null[:, 3],
"f_boolean": data_null[:, 4]
})
data = np.array([
["a", 1, 1, 2.0, True],
["abc", 2, 2, 2.4, True],
["c", 4, 4, 2.0, False],
["a", 0, 1, 1.2, False],
])
self.df = pd.DataFrame({
"f_string": data[:, 0],
"f_long": data[:, 1],
"f_int": data[:, 2],
"f_double": data[:, 3],
"f_boolean": data[:, 4]
})
def test_memory_null(self):
from pyalink.alink.config import g_config
g_config["collect_storage_type"] = "memory"
schema = "f_string string,f_long long,f_int int,f_double double,f_boolean boolean"
op = dataframeToOperator(self.df_null, schema, op_type="batch")
col_names = op.getColNames()
col_types = op.getColTypes()
self.assertEqual(col_names[0], "f_string")
self.assertEqual(col_names[1], "f_long")
self.assertEqual(col_names[2], "f_int")
self.assertEqual(col_names[3], "f_double")
self.assertEqual(col_names[4], "f_boolean")
self.assertEqual(col_types[0], "VARCHAR")
self.assertEqual(col_types[1], "BIGINT")
self.assertEqual(col_types[2], "INT")
self.assertEqual(col_types[3], "DOUBLE")
self.assertEqual(col_types[4], "BOOLEAN")
df2 = op.collectToDataframe()
print(df2)
print(df2.dtypes)
self.assertEqual(df2['f_string'].dtype, pd.StringDtype())
self.assertEqual(df2['f_long'].dtype, pd.Int64Dtype())
self.assertEqual(df2['f_int'].dtype, pd.Int32Dtype())
self.assertEqual(df2['f_double'].dtype, np.float64)
self.assertEqual(df2['f_boolean'].dtype, pd.BooleanDtype())
def test_memory(self):
from pyalink.alink.config import g_config
g_config["collect_storage_type"] = "memory"
schema = "f_string string,f_long long,f_int int,f_double double,f_boolean boolean"
op = dataframeToOperator(self.df, schemaStr=schema, op_type="batch")
col_names = op.getColNames()
col_types = op.getColTypes()
self.assertEqual(col_names[0], "f_string")
self.assertEqual(col_names[1], "f_long")
self.assertEqual(col_names[2], "f_int")
self.assertEqual(col_names[3], "f_double")
self.assertEqual(col_names[4], "f_boolean")
self.assertEqual(col_types[0], "VARCHAR")
self.assertEqual(col_types[1], "BIGINT")
self.assertEqual(col_types[2], "INT")
self.assertEqual(col_types[3], "DOUBLE")
self.assertEqual(col_types[4], "BOOLEAN")
df2 = op.collectToDataframe()
print(df2)
print(df2.dtypes)
self.assertEqual(df2['f_string'].dtype, pd.StringDtype())
self.assertEqual(df2['f_long'].dtype, pd.Int64Dtype())
self.assertEqual(df2['f_int'].dtype, pd.Int32Dtype())
self.assertEqual(df2['f_double'].dtype, np.float64)
self.assertEqual(df2['f_boolean'].dtype, pd.BooleanDtype())
def test_string_not_converted_to_double(self):
data = np.array([
["007"],
["012"],
])
source = dataframeToOperator(pd.DataFrame.from_records(data), schemaStr="str string", op_type="batch")
df = source.collectToDataframe()
print(df)
self.assertEqual(df['str'].iloc[0], "007")
self.assertEqual(df['str'].iloc[1], "012")
def test_df_to_op_speed(self):
import time
start_time = time.time()
m = {0: True, 1: False, 2: None}
users = []
for col in range(10000):
r = col % 3
users.append([col, "1\"" + str(col) + "\"1", m.get(r)])
df = pd.DataFrame(users)
source = BatchOperator.fromDataframe(df, schemaStr='id int, label string, b boolean')
source.firstN(10).print()
end_time = time.time()
elapsed_time = end_time - start_time
print(elapsed_time)
self.assertTrue(elapsed_time < 10)
def test_op_to_df_speed(self):
import time
start_time = time.time()
m = {0: True, 1: False, 2: None}
users = []
for col in range(50000):
r = col % 3
users.append([col, "1\"" + str(col) + "\"1", m.get(r)])
df = | pd.DataFrame(users) | pandas.DataFrame |
'''
DESCRIPTION
This module imports, processes and cleans raw data.
The output is a transformed data to used for a ML Pipeline
INPUTS
messages_filepath - path containing the messages csv data file
categories_filepath - path containing the categories csv data file
OUTPUTS
Saves the combined processed data as a SQLite database
SCRIPT EXECUTION SAMPLE
python process_data.py disaster_messages.csv disaster_categories.csv Disaster_Response_Data.db
'''
#import relevant libraries
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
DESCRIPTION
Loads and combines raw data files at given file path
INPUTS
messages_filepath - path containing the messages csv data file
categories_filepath - path containing the categories csv data file
OUTPUTS
df - Dataframe with combined data
'''
# read in messages csv file as dataframe
messages = | pd.read_csv(messages_filepath) | pandas.read_csv |
import os
from loguru import logger
import googlemaps
import pandas as pd
geo = os.getenv("LTC_FACILITIES_GEOCODED_CSV")
if geo is None:
raise ValueError("you must set a value for the LTC_FACILITIES_GEOCODED_CSV env variable")
ltc_geo = | pd.read_csv(geo) | pandas.read_csv |
# This test focuses on the
# development and exploration of growth models and their properties.
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from typing import Optional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import seaborn as sns
from psbutils.filecheck import Plottable, figure_found
from scipy.optimize import curve_fit
def plot_figure(name: str, ax: Optional[Plottable] = None) -> None:
sns.despine()
assert figure_found(ax, f"test_growth_models/{name}")
plt.clf()
# Logistic
def logistic_model(ts: np.ndarray, mu: float, K: float, c0: float, lag: float) -> np.ndarray:
return np.array([K / (1.0 + (K - c0) / c0 * np.exp(mu * (lag - t))) if t > lag else c0 for t in ts])
# Gompertz
def model(ts: np.ndarray, mu: float, K: float, c0: float, lag: float) -> np.ndarray:
return np.array([K * np.exp(np.log(c0 / K) * np.exp(mu * (lag - t))) if t > lag else c0 for t in ts])
@pytest.mark.timeout(30)
def test_growth_models():
r_true = 0.015
K_true = 2
c0_true = 0.002
lag_true = 200
sig = 0.05
n = 101
ts = np.linspace(0, 1200, n)
xs = model(ts, r_true, K_true, c0_true, lag_true)
np.random.seed(42)
ys = xs * (1 + sig * np.random.randn(n))
plt.figure(figsize=(6.4, 4.8))
plt.scatter(ts, xs)
plot_figure("plot1_scatter")
mle = curve_fit(model, ts, ys, p0=[0.02, 2, 0.01, 100])[0]
r, K, c0, lag = mle[:4]
df = | pd.DataFrame(mle, columns=["MLE"]) | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from distutils.version import LooseVersion
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class BooleanOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([True, True, False])
@property
def psser(self):
return ps.from_pandas(self.pser)
@property
def float_pser(self):
return | pd.Series([1, 2, 3], dtype=float) | pandas.Series |
import geopandas
import pandas
import ogr
import os
import numpy
import gdal
from tqdm import tqdm
from pygeos import from_wkb
def query_b(geoType,keyCol,**valConstraint):
"""
This function builds an SQL query from the values passed to the retrieve() function.
Arguments:
*geoType* : Type of geometry (osm layer) to search for.
*keyCol* : A list of keys/columns that should be selected from the layer.
***valConstraint* : A dictionary of constraints for the values. e.g. WHERE 'value'>20 or 'value'='constraint'
Returns:
*string: : a SQL query string.
"""
query = "SELECT " + "osm_id"
for a in keyCol: query+= ","+ a
query += " FROM " + geoType + " WHERE "
# If there are values in the dictionary, add constraint clauses
if valConstraint:
for a in [*valConstraint]:
# For each value of the key, add the constraint
for b in valConstraint[a]: query += a + b
query+= " AND "
# Always ensures the first key/col provided is not Null.
query+= ""+str(keyCol[0]) +" IS NOT NULL"
return query
def retrieve(osm_path,geoType,keyCol,**valConstraint):
"""
Function to extract specified geometry and keys/values from OpenStreetMap
Arguments:
*osm_path* : file path to the .osm.pbf file of the region
for which we want to do the analysis.
*geoType* : Type of Geometry to retrieve. e.g. lines, multipolygons, etc.
*keyCol* : These keys will be returned as columns in the dataframe.
***valConstraint: A dictionary specifiying the value constraints.
A key can have multiple values (as a list) for more than one constraint for key/value.
Returns:
*GeoDataFrame* : a geopandas GeoDataFrame with all columns, geometries, and constraints specified.
"""
driver=ogr.GetDriverByName('OSM')
data = driver.Open(osm_path)
query = query_b(geoType,keyCol,**valConstraint)
sql_lyr = data.ExecuteSQL(query)
features =[]
# cl = columns
cl = ['osm_id']
for a in keyCol: cl.append(a)
if data is not None:
print('query is finished, lets start the loop')
for feature in tqdm(sql_lyr):
try:
if feature.GetField(keyCol[0]) is not None:
geom = from_wkb(feature.geometry().ExportToWkb())
if geom is None:
continue
# field will become a row in the dataframe.
field = []
for i in cl: field.append(feature.GetField(i))
field.append(geom)
features.append(field)
except:
print("WARNING: skipped OSM feature")
else:
print("ERROR: Nonetype error when requesting SQL. Check required.")
cl.append('geometry')
if len(features) > 0:
return | pandas.DataFrame(features,columns=cl) | pandas.DataFrame |
import time
import requests
from bs4 import BeautifulSoup
import pandas as pd
from bdshare.util import vars as vs
def get_current_trade_data(symbol=None, retry_count=1, pause=0.001):
"""
get last stock price.
:param symbol: str, Instrument symbol e.g.: 'ACI' or 'aci'
:return: dataframecd
"""
for _ in range(retry_count):
time.sleep(pause)
try:
r = requests.get(vs.DSE_URL+vs.DSE_LSP_URL)
if r.status_code != 200:
r = requests.get(vs.DSE_ALT_URL+vs.DSE_LSP_URL)
except Exception as e:
print(e)
else:
soup = BeautifulSoup(r.content, 'html5lib')
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table fixedHeader'})
# print(table)
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'symbol': cols[1].text.strip().replace(",", ""),
'ltp': cols[2].text.strip().replace(",", ""),
'high': cols[3].text.strip().replace(",", ""),
'low': cols[4].text.strip().replace(",", ""),
'close': cols[5].text.strip().replace(",", ""),
'ycp': cols[6].text.strip().replace(",", ""),
'change': cols[7].text.strip().replace("--", "0"),
'trade': cols[8].text.strip().replace(",", ""),
'value': cols[9].text.strip().replace(",", ""),
'volume': cols[10].text.strip().replace(",", "")
})
df = pd.DataFrame(quotes)
if symbol:
df = df.loc[df.symbol == symbol.upper()]
return df
else:
return df
def get_dsex_data(symbol=None, retry_count=1, pause=0.001):
"""
get dseX share price.
:param symbol: str, Instrument symbol e.g.: 'ACI' or 'aci'
:return: dataframe
"""
for _ in range(retry_count):
time.sleep(pause)
try:
r = requests.get(vs.DSE_URL+vs.DSEX_INDEX_VALUE)
if r.status_code != 200:
r = requests.get(vs.DSE_ALT_URL+vs.DSEX_INDEX_VALUE)
except Exception as e:
print(e)
else:
soup = BeautifulSoup(r.content, 'html5lib')
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table'})
# print(table)
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'symbol': cols[1].text.strip().replace(",", ""),
'ltp': cols[2].text.strip().replace(",", ""),
'high': cols[3].text.strip().replace(",", ""),
'low': cols[4].text.strip().replace(",", ""),
'close': cols[5].text.strip().replace(",", ""),
'ycp': cols[6].text.strip().replace(",", ""),
'change': cols[7].text.strip().replace("--", "0"),
'trade': cols[8].text.strip().replace(",", ""),
'value': cols[9].text.strip().replace(",", ""),
'volume': cols[10].text.strip().replace(",", "")
})
df = pd.DataFrame(quotes)
if symbol:
df = df.loc[df.symbol == symbol.upper()]
return df
else:
return df
def get_current_trading_code():
"""
get last stock codes.
:return: dataframe
"""
try:
r = requests.get(vs.DSE_URL+vs.DSE_LSP_URL)
if r.status_code != 200:
r = requests.get(vs.DSE_ALT_URL+vs.DSE_LSP_URL)
except Exception as e:
print(e)
#soup = BeautifulSoup(r.text, 'html.parser')
soup = BeautifulSoup(r.content, 'html5lib')
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table fixedHeader'})
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'symbol': cols[1].text.strip().replace(",", "")})
df = pd.DataFrame(quotes)
return df
def get_hist_data(start=None, end=None, code='All Instrument'):
"""
get historical stock price.
:param start: str, Start date e.g.: '2020-03-01'
:param end: str, End date e.g.: '2020-03-02'
:param code: str, Instrument symbol e.g.: 'ACI'
:return: dataframe
"""
# data to be sent to post request
data = {'startDate': start,
'endDate': end,
'inst': code,
'archive': 'data'}
try:
r = requests.get(url=vs.DSE_URL+vs.DSE_DEA_URL, params=data)
if r.status_code != 200:
r = requests.get(url=vs.DSE_ALT_URL+vs.DSE_DEA_URL, params=data)
except Exception as e:
print(e)
#soup = BeautifulSoup(r.text, 'html.parser')
soup = BeautifulSoup(r.content, 'html5lib')
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table fixedHeader'})
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'date': cols[1].text.strip().replace(",", ""),
'symbol': cols[2].text.strip().replace(",", ""),
'ltp': cols[3].text.strip().replace(",", ""),
'high': cols[4].text.strip().replace(",", ""),
'low': cols[5].text.strip().replace(",", ""),
'open': cols[6].text.strip().replace(",", ""),
'close': cols[7].text.strip().replace(",", ""),
'ycp': cols[8].text.strip().replace(",", ""),
'trade': cols[9].text.strip().replace(",", ""),
'value': cols[10].text.strip().replace(",", ""),
'volume': cols[11].text.strip().replace(",", "")
})
df = pd.DataFrame(quotes)
if 'date' in df.columns:
df = df.set_index('date')
df = df.sort_index(ascending=False)
else:
print('No data found')
return df
def get_basic_hist_data(start=None, end=None, code='All Instrument', index=None, retry_count=1, pause=0.001):
"""
get historical stock price.
:param start: str, Start date e.g.: '2020-03-01'
:param end: str, End date e.g.: '2020-03-02'
:param code: str, Instrument symbol e.g.: 'ACI'
:param retry_count : int, e.g.: 3
:param pause : int, e.g.: 0
:return: dataframe
"""
# data to be sent to post request
data = {'startDate': start,
'endDate': end,
'inst': code,
'archive': 'data'}
for _ in range(retry_count):
time.sleep(pause)
try:
r = requests.get(url=vs.DSE_URL+vs.DSE_DEA_URL, params=data)
if r.status_code != 200:
r = requests.get(url=vs.DSE_ALT_URL+vs.DSE_DEA_URL, params=data)
except Exception as e:
print(e)
else:
#soup = BeautifulSoup(r.text, 'html.parser')
soup = BeautifulSoup(r.content, 'html5lib')
# columns: date, open, high, close, low, volume
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table fixedHeader'})
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'date': cols[1].text.strip().replace(",", ""),
'open': float(cols[6].text.strip().replace(",", "")),
'high': float(cols[4].text.strip().replace(",", "")),
'low': float(cols[5].text.strip().replace(",", "")),
'close': float(cols[7].text.strip().replace(",", "")),
'volume': int(cols[11].text.strip().replace(",", ""))
})
df = pd.DataFrame(quotes)
if 'date' in df.columns:
if (index == 'date'):
df = df.set_index('date')
df = df.sort_index(ascending=True)
df = df.sort_index(ascending=True)
else:
print('No data found')
return df
def get_close_price_data(start=None, end=None, code='All Instrument'):
"""
get stock close price.
:param start: str, Start date e.g.: '2020-03-01'
:param end: str, End date e.g.: '2020-03-02'
:param code: str, Instrument symbol e.g.: 'ACI'
:return: dataframe
"""
# data to be sent to post request
data = {'startDate': start,
'endDate': end,
'inst': code,
'archive': 'data'}
try:
r = requests.get(url=vs.DSE_URL+vs.DSE_CLOSE_PRICE_URL, params=data)
if r.status_code != 200:
r = requests.get(url=vs.DSE_ALT_URL+vs.DSE_CLOSE_PRICE_URL, params=data)
except Exception as e:
print(e)
else:
soup = BeautifulSoup(r.content, 'html5lib')
# columns: date, open, high, close, low, volume
quotes = [] # a list to store quotes
table = soup.find(
'table', attrs={'class': 'table table-bordered background-white'})
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'date': cols[1].text.strip().replace(",", ""),
'symbol': cols[2].text.strip().replace(",", ""),
'close': cols[3].text.strip().replace(",", ""),
'ycp': cols[4].text.strip().replace(",", "")
})
df = | pd.DataFrame(quotes) | pandas.DataFrame |
import pandas as pd
from flask_testing import TestCase
from cellphonedb.src.app.cellphonedb_app import cellphonedb_app
from cellphonedb.src.app.app_logger import app_logger
from cellphonedb.src.app.flask.flask_app import create_app
from cellphonedb.src.core.database.sqlalchemy_models.db_model_gene import Gene
from cellphonedb.src.core.database.sqlalchemy_models.db_model_multidata import Multidata
from cellphonedb.src.core.database.sqlalchemy_models.db_model_protein import Protein
class TestDatabaseRelationsChecks(TestCase):
def test_all_protein_have_gen(self):
expected_protein_without_gene = 235
protein_query = cellphonedb_app.cellphonedb.database_manager.database.session.query(Protein,
Multidata.name).join(
Multidata)
protein_df = pd.read_sql(protein_query.statement,
cellphonedb_app.cellphonedb.database_manager.database.engine)
protein_ids = protein_df['id_protein'].tolist()
gene_query = cellphonedb_app.cellphonedb.database_manager.database.session.query(
Gene.protein_id)
gene_protein_ids = \
pd.read_sql(gene_query.statement,
cellphonedb_app.cellphonedb.database_manager.database.engine)[
'protein_id'].tolist()
protein_without_gene = []
for protein_id in protein_ids:
if not protein_id in gene_protein_ids:
protein_without_gene.append(protein_df[protein_df['id_protein'] == protein_id]['name'].iloc[0])
if len(protein_without_gene) != expected_protein_without_gene:
app_logger.warning('There are {} Proteins without gene'.format(len(protein_without_gene)))
app_logger.warning(protein_without_gene)
unknowed_proteins_without_gene = []
for protein in protein_without_gene:
if not protein in KNOWED_PROTEINS_WITHOUT_GENE:
unknowed_proteins_without_gene.append(protein)
if unknowed_proteins_without_gene:
app_logger.warning(
'There are {} unknowed proteins without gene'.format(len(unknowed_proteins_without_gene)))
app_logger.warning( | pd.Series(unknowed_proteins_without_gene) | pandas.Series |
# 必要なライブラリのimport ※selenium,time,os,datetime,pandas
from selenium import webdriver
import time
import os
import datetime
import pandas as pd
def main():
'''
メインの実行部分
'''
# 何年前からのデータを取得するのかを入力
year = 2015
# 今年の西暦を取得
this_year = datetime.date.today().year
# 銘柄コードの入力
stock_code = 6758
# Seleniumでダウンロードするデフォルトディレクトリ
download_directory = r"/Users/io/Desktop/prophet/stockdata"
# CSVのダウンロードの実行
download(stock_code, year, this_year, download_directory)
# ダンロードしたデータフレームの結合
df = concat_df(stock_code, year, this_year, download_directory)
# 結合したデータフレームをCSVファイルで出力
df.to_csv(f"{download_directory}/{stock_code}_{year}_{this_year}.csv")
# 完成したデータフレームの出力
print(df)
def download(stock_code, year, this_year, download_directory):
'''
特定の銘柄のCSVデータを各年ごとにダウンロード(デフォルトディレクトリの指定)
'''
options = webdriver.ChromeOptions() # クロームオプションの設定
prefs = {"download.default_directory" : download_directory} # デフォルトのダウンロードディレクトリの指定
options.add_experimental_option("prefs",prefs)
driver = webdriver.Chrome(options=options) # オプションを指定してクロームドライバーの起動
driver.implicitly_wait(30) # 要素が見つからなかった時の待ち時間を指定
# 各年のCSVデータのダウンロードをfor文で繰り返す
for y in range(year, this_year + 1):
driver.get(f"https://kabuoji3.com/stock/{stock_code}/")
# ファイルがすでに存在している場合はcontinueで次のyへ行く
if os.path.isfile(f"{download_directory}/{stock_code}_{y}.csv"):
# 3秒待機
time.sleep(3)
continue
# seleniumでy年のテキストをクリック
driver.find_element_by_link_text(str(y)).click()
# 3秒待機
time.sleep(3)
# クリックでCSVのダウンロード
driver.find_element_by_name("csv").click()
driver.find_element_by_name("csv").click()
# 3秒待機
time.sleep(3)
# ブラウザを閉じて終了する
driver.quit()
def concat_df(stock_code, year, this_year, download_directory):
'''
ダウンロードしたCSVをデータフレームとして結合
'''
# 結合するデータフレームを入れる空のリストを用意
concat_list = []
# データフレームの結合
for y in range(year, this_year + 1):
# データフレームの読み込み
df = pd.read_csv(f"{download_directory}/{stock_code}_{y}.csv", encoding="cp932")
# 日付行の削除
df.drop(index='日付', inplace=True)
# pd.concatで結合するためにリストにデータフレームを入れていく
concat_list.append(df)
# pd.concatでデータフレームの結合
df = | pd.concat(concat_list) | pandas.concat |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import pandas
from modin.pandas.utils import to_pandas
import modin.pandas as pd
import os
import sqlite3
TEST_PARQUET_FILENAME = "test.parquet"
TEST_CSV_FILENAME = "test.csv"
TEST_JSON_FILENAME = "test.json"
TEST_HTML_FILENAME = "test.html"
TEST_EXCEL_FILENAME = "test.xlsx"
TEST_FEATHER_FILENAME = "test.feather"
TEST_HDF_FILENAME = "test.hdf"
TEST_MSGPACK_FILENAME = "test.msg"
TEST_STATA_FILENAME = "test.dta"
TEST_PICKLE_FILENAME = "test.pkl"
TEST_SAS_FILENAME = os.getcwd() + "/data/test1.sas7bdat"
TEST_SQL_FILENAME = "test.db"
SMALL_ROW_SIZE = 2000
@pytest.fixture
def ray_df_equals_pandas(ray_df, pandas_df):
return to_pandas(ray_df).sort_index().equals(pandas_df.sort_index())
@pytest.fixture
def setup_parquet_file(row_size, force=False):
if os.path.exists(TEST_PARQUET_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_parquet(TEST_PARQUET_FILENAME)
@pytest.fixture
def create_test_ray_dataframe():
df = pd.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
return df
@pytest.fixture
def create_test_pandas_dataframe():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
return df
@pytest.fixture
def test_files_eq(path1, path2):
with open(path1, "rb") as file1, open(path2, "rb") as file2:
file1_content = file1.read()
file2_content = file2.read()
if file1_content == file2_content:
return True
else:
return False
@pytest.fixture
def teardown_test_file(test_path):
if os.path.exists(test_path):
os.remove(test_path)
@pytest.fixture
def teardown_parquet_file():
if os.path.exists(TEST_PARQUET_FILENAME):
os.remove(TEST_PARQUET_FILENAME)
@pytest.fixture
def setup_csv_file(row_size, force=False, delimiter=","):
if os.path.exists(TEST_CSV_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_csv(TEST_CSV_FILENAME, sep=delimiter)
@pytest.fixture
def teardown_csv_file():
if os.path.exists(TEST_CSV_FILENAME):
os.remove(TEST_CSV_FILENAME)
@pytest.fixture
def setup_json_file(row_size, force=False):
if os.path.exists(TEST_JSON_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_json(TEST_JSON_FILENAME)
@pytest.fixture
def teardown_json_file():
if os.path.exists(TEST_JSON_FILENAME):
os.remove(TEST_JSON_FILENAME)
@pytest.fixture
def setup_html_file(row_size, force=False):
if os.path.exists(TEST_HTML_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_html(TEST_HTML_FILENAME)
@pytest.fixture
def teardown_html_file():
if os.path.exists(TEST_HTML_FILENAME):
os.remove(TEST_HTML_FILENAME)
@pytest.fixture
def setup_clipboard(row_size, force=False):
df = pandas.DataFrame({"col1": np.arange(row_size), "col2": np.arange(row_size)})
df.to_clipboard()
@pytest.fixture
def setup_excel_file(row_size, force=False):
if os.path.exists(TEST_EXCEL_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_excel(TEST_EXCEL_FILENAME)
@pytest.fixture
def teardown_excel_file():
if os.path.exists(TEST_EXCEL_FILENAME):
os.remove(TEST_EXCEL_FILENAME)
@pytest.fixture
def setup_feather_file(row_size, force=False):
if os.path.exists(TEST_FEATHER_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_feather(TEST_FEATHER_FILENAME)
@pytest.fixture
def teardown_feather_file():
if os.path.exists(TEST_FEATHER_FILENAME):
os.remove(TEST_FEATHER_FILENAME)
@pytest.fixture
def setup_hdf_file(row_size, force=False):
if os.path.exists(TEST_HDF_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_hdf(TEST_HDF_FILENAME, "test")
@pytest.fixture
def teardown_hdf_file():
if os.path.exists(TEST_HDF_FILENAME):
os.remove(TEST_HDF_FILENAME)
@pytest.fixture
def setup_msgpack_file(row_size, force=False):
if os.path.exists(TEST_MSGPACK_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_msgpack(TEST_MSGPACK_FILENAME)
@pytest.fixture
def teardown_msgpack_file():
if os.path.exists(TEST_MSGPACK_FILENAME):
os.remove(TEST_MSGPACK_FILENAME)
@pytest.fixture
def setup_stata_file(row_size, force=False):
if os.path.exists(TEST_STATA_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_stata(TEST_STATA_FILENAME)
@pytest.fixture
def teardown_stata_file():
if os.path.exists(TEST_STATA_FILENAME):
os.remove(TEST_STATA_FILENAME)
@pytest.fixture
def setup_pickle_file(row_size, force=False):
if os.path.exists(TEST_PICKLE_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_pickle(TEST_PICKLE_FILENAME)
@pytest.fixture
def teardown_pickle_file():
if os.path.exists(TEST_PICKLE_FILENAME):
os.remove(TEST_PICKLE_FILENAME)
@pytest.fixture
def setup_sql_file(conn, force=False):
if os.path.exists(TEST_SQL_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
df.to_sql(TEST_SQL_FILENAME.split(".")[0], conn)
@pytest.fixture
def teardown_sql_file():
if os.path.exists(TEST_SQL_FILENAME):
os.remove(TEST_SQL_FILENAME)
def test_from_parquet():
setup_parquet_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME)
ray_df = pd.read_parquet(TEST_PARQUET_FILENAME)
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_parquet_file()
def test_from_parquet_with_columns():
setup_parquet_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
ray_df = pd.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_parquet_file()
def test_from_csv():
setup_csv_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_csv(TEST_CSV_FILENAME)
ray_df = pd.read_csv(TEST_CSV_FILENAME)
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_csv_file()
def test_from_csv_chunksize():
setup_csv_file(SMALL_ROW_SIZE)
# Tests __next__ and correctness of reader as an iterator
# Use larger chunksize to read through file quicker
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=500)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=500)
for ray_df, pd_df in zip(rdf_reader, pd_reader):
assert ray_df_equals_pandas(ray_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=1)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=1)
ray_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
assert ray_df_equals_pandas(ray_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=1)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=1)
ray_df = rdf_reader.read()
pd_df = pd_reader.read()
assert ray_df_equals_pandas(ray_df, pd_df)
def test_from_json():
setup_json_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_json(TEST_JSON_FILENAME)
ray_df = pd.read_json(TEST_JSON_FILENAME)
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_json_file()
def test_from_html():
setup_html_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_html(TEST_HTML_FILENAME)[0]
ray_df = pd.read_html(TEST_HTML_FILENAME)
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_html_file()
@pytest.mark.skip(reason="No clipboard on Travis")
def test_from_clipboard():
setup_clipboard(SMALL_ROW_SIZE)
pandas_df = pandas.read_clipboard()
ray_df = pd.read_clipboard()
assert ray_df_equals_pandas(ray_df, pandas_df)
def test_from_excel():
setup_excel_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_excel(TEST_EXCEL_FILENAME)
ray_df = pd.read_excel(TEST_EXCEL_FILENAME)
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_excel_file()
@pytest.mark.skip(reason="Arrow version mismatch between Pandas and Feather")
def test_from_feather():
setup_feather_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_feather(TEST_FEATHER_FILENAME)
ray_df = pd.read_feather(TEST_FEATHER_FILENAME)
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_feather_file()
@pytest.mark.skip(reason="Memory overflow on Travis")
def test_from_hdf():
setup_hdf_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_hdf(TEST_HDF_FILENAME, key="test")
ray_df = pd.read_hdf(TEST_HDF_FILENAME, key="test")
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_hdf_file()
def test_from_msgpack():
setup_msgpack_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_msgpack(TEST_MSGPACK_FILENAME)
ray_df = pd.read_msgpack(TEST_MSGPACK_FILENAME)
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_msgpack_file()
def test_from_stata():
setup_stata_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_stata(TEST_STATA_FILENAME)
ray_df = pd.read_stata(TEST_STATA_FILENAME)
assert ray_df_equals_pandas(ray_df, pandas_df)
teardown_stata_file()
def test_from_pickle():
setup_pickle_file(SMALL_ROW_SIZE)
pandas_df = | pandas.read_pickle(TEST_PICKLE_FILENAME) | pandas.read_pickle |
import pandas as pd
def calculate_demographic_data(print_data=True):
# Read data from file
df = pd.read_csv("adult.data.csv")
# How many of each race are represented in this dataset? This should be a Pandas series with race names as the index labels.
race_count = pd.Series(df['race'].value_counts())
# What is the average age of men?
average_age_men = round(df.loc[df['sex'] == 'Male', 'age'].mean(),1)
# What is the percentage of people who have a Bachelor's degree?
percentage_bachelors = round((df.education.value_counts().Bachelors/df.shape[0])*100 , 1)
# What percentage of people with advanced education (`Bachelors`, `Masters`, or `Doctorate`) make more than 50K?
# What percentage of people without advanced education make more than 50K?
# with and without `Bachelors`, `Masters`, or `Doctorate`
temp = | pd.DataFrame(df, columns= ['education','salary']) | pandas.DataFrame |
import os.path
import pandas as pd
import networkx as nx
from tqdm import tqdm
years = [2019, 2020, 2021]
# Read dataframe
df = | pd.read_csv('https://github.com/alvarofpp/dataset-flights-brazil/raw/main/data/anac.zip') | pandas.read_csv |
from unittest import TestCase
import pandas as pd
from moonstone.normalization.counts.random_selection import (
RandomSelection, TaxonomyRandomSelection
)
class TestRandomSelection(TestCase):
def setUp(self):
self.raw_data = [
[199, 1, 48, 75],
[0, 24, 1, 0],
[1, 25, 1, 25],
]
self.column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
self.index = ['Gen_1', 'Gen_2', 'Gen_3']
self.raw_df = pd.DataFrame(self.raw_data, columns=self.column_names, index=self.index)
def test_normalize_default_threshold(self):
expected_data = [
[50, 1, 48, 40],
[0, 24, 1, 0],
[0, 25, 1, 10],
]
expected_df = pd.DataFrame(expected_data, columns=self.column_names, index=self.index).astype(float)
tested_normalization = RandomSelection(self.raw_df, random_seed=2935)
| pd.testing.assert_frame_equal(tested_normalization.normalized_df, expected_df) | pandas.testing.assert_frame_equal |
"""
In this file I will look to do the following
1. Loop thought various threasholds that separate top lowest 90% of values
a. Optimise a RF for the threashold
b. Run the RF over the test data
c. record the ROC_AUC for the RF on the test data
2. Plot the ROC_AUC vs the threashold"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.tree import DecisionTreeClassifier
from evaluate_model import evaluate_model
def find_best_params(dtfm, labels_col, classifier=RandomForestClassifier,
test_size=0.3, random_state=np.random, logger=False):
"""Returns the best params in terms of roc_auc for an optimised Random Forest
Trained and tested over a passed in dataset
USGAE: find_best_params(dtfm, labels_col, test_size=0.3, random_state=np.random, logger=logger)
INPUTS:
dtfm: dataframe to train and test model on
labels_col: column title containing the classification flag
**Optional** (default)
test_size: (0.3) proportion of data set to use for testing
random_state: (np.random) seed used by the random number generator
logger: (False) wheather to log outputs or not
OUTPUT:
roc_auc: (scalar) Area under the receiver operating
charachteristic curve
"""
dtfm_labels = dtfm.pop(labels_col)
# separate the labels from the data
labels = np.array(dtfm_labels)
print(dtfm.head())
# print value counts so we can see how split affects data
if logger:
print("Output value count:\n {}".format(dtfm_labels.value_counts()))
# split data into train and test sets split% test
train, test, train_labels, test_labels = train_test_split(dtfm, labels, stratify = labels, test_size = test_size, random_state=random_state)
#imputation of missing values
train = train.fillna(train.mean())
test = test.fillna(test.mean())
c_name = classifier.__name__
if c_name == 'RandomForestClassifier':
# Hyperparameter grid
param_grid = {
'n_estimators': np.linspace(10, 200, 10).astype(int),
'max_depth': [None] + list(np.linspace(3, 20, 10).astype(int)),
'max_features': ['auto', 'sqrt', None] + list(np.arange(0.5, 1, 0.1)),
'max_leaf_nodes': [None] + list(np.linspace(10, 50, 10).astype(int)),
'min_samples_split': [2],
'bootstrap': [True],
}
elif c_name == 'DecisionTreeClassifier':
# Hyperparameter grid
param_grid = {
'max_features': ['auto', 'sqrt', None] + list(np.arange(0.5, 1, 0.1)),
'max_leaf_nodes': [None] + list(np.linspace(10, 50, 10).astype(int)),
'min_samples_split': [2],
}
else:
raise ValueError('That is not a supported Classifier')
# Estimator for use in random search
estimator = classifier(random_state = random_state)
# Create the random search model
rs = RandomizedSearchCV(estimator, param_grid, n_jobs = -1,
scoring = 'roc_auc', cv = 3,
n_iter = 10, verbose = 1,
random_state=random_state)
# Fit
rs.fit(train, train_labels)
print("Best params:\n{}".format(rs.best_params_))
# print result
if logger:
print("Best params:\n{}".format(rs.best_params_))
return rs.best_params_
def roc_auc(dtfm, labels_col, c_params, classifier=RandomForestClassifier,
test_size=0.3, random_state=np.random, logger=False, optimise=True):
"""Returns the roc_auc for an optimised Random Forest
Trained and tested over a passed in dataset
USGAE: roc_auc(dtfm, labels_col, test_size=0.3, random_state=np.random, logger=logger)
INPUTS:
dtfm: dataframe to train and test model on
labels_col: column title containing the classification flag
**Optional** (default)
test_size: (0.3) proportion of data set to use for testing
random_state: (np.random) seed used by the random number generator
logger: (False) wheather to log outputs or not
OUTPUT:
roc_auc: (scalar) Area under the receiver operating
charachteristic curve
"""
dtfm_labels = dtfm.pop(labels_col)
# separate the labels from the data
labels = np.array(dtfm_labels)
# print value counts so we can see how split affects data
if logger:
print("Output value count:\n {}".format(dtfm_labels.value_counts()))
# split data into train and test sets split% test
train, test, train_labels, test_labels = train_test_split(dtfm, labels, stratify = labels, test_size = test_size, random_state=random_state)
#imputation of missing values
train = train.fillna(train.mean())
test = test.fillna(test.mean())
# Features for feature importances
features = list(train.columns)
c_name = classifier.__name__
if c_name == 'RandomForestClassifier':
# tune model
model = classifier(n_estimators=c_params['n_estimators'], max_depth=c_params['max_depth'],
max_features=c_params['max_features'], max_leaf_nodes=c_params['max_leaf_nodes'],
min_samples_split=c_params['min_samples_split'], bootstrap=c_params['bootstrap'],
random_state = random_state)
elif c_name == 'DecisionTreeClassifier':
# tune model
model = classifier(max_features=c_params['max_features'], max_leaf_nodes=c_params['max_leaf_nodes'],
min_samples_split=c_params['min_samples_split'], random_state=random_state)
else:
raise ValueError('That is not a supported Classifier')
# Fit
model.fit(train, train_labels)
train_predictions = model.predict(train)
train_probs = model.predict_proba(train)[:, 1]
predictions = model.predict(test)
probs = model.predict_proba(test)[:, 1]
[baseline, results, train_results ] = evaluate_model(predictions, probs,
train_predictions, train_probs,
test_labels, train_labels, logger=logger)
# calculate variables of most importance in model
fi_model = pd.DataFrame({'feature': features,
'importance': model.feature_importances_})
if logger:
print("Features importance in RF:\n{}".format(fi_model.sort_values('importance', 0, ascending=True)))
return [
baseline, results,
train_results, fi_model,
]
def test_threasholds(threasholds, dtfm, classifier=RandomForestClassifier,
dep_key='BLAST_D8', random_state=50, logger=False, optimise=True):
if logger== True:
print("Threasholds at which we'll calculte ROC_AUC: \n {}".format(threasholds))
# ROC_AUC array
roc_auc_arr = []
roc_auc_train_arr = []
# Precision
precision_arr = []
precision_train_arr = []
# Recall
recall_arr = []
recall_train_arr = []
#Accuracy
accuracy_arr = []
accuracy_train_arr = []
# feature importance
fi_dtfm = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28, 2020
@author: jpdeleon
"""
# Import standard library
import itertools
from datetime import datetime
from pathlib import Path
# Import modules
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
import networkx as nx
# Import from package
from fastquant import get_pse_data_cache, DATA_PATH
pl.style.use("fivethirtyeight")
CALENDAR_FORMAT = "%m-%d-%Y"
TODAY = datetime.now().date().strftime(CALENDAR_FORMAT)
__all__ = ["Network"]
class Network:
"""
Parameters
----------
symbol : str
phisix company symbol (optional)
sector : str
specific sector
sigma : float
outlier rejection threshold (default=None)
metric : str
distance metrics:
bonnano=distinguishes between a + or a - correlated pair of stocks;
bonnano=does not distinguish
n_companies : str
limit to top n companies correlated to symbol (if symbol is given)
"""
def __init__(
self,
symbol=None,
sector=None,
start_date="1-1-2020",
end_date=None,
metric="bonnano",
n_companies=5,
sigma=5,
exclude_symbols=None,
interpolation_method="pad",
indicator="close",
verbose=True,
clobber=False,
update_cache=False,
):
self.symbol = None if symbol is None else symbol.upper()
self.sector = sector
self.start_date = start_date
self.end_date = TODAY if end_date is None else end_date
self.stock_data = None
self.verbose = verbose
self.clobber = clobber
self.sigma = sigma
self.exclude_symbols = exclude_symbols
self.indicator = indicator
self.interpolation_method = interpolation_method
self.n_companies = n_companies
self.update_cache = update_cache
self.data_cache = get_pse_data_cache(
update=self.update_cache, verbose=False
)
self.data = self.data_cache.xs(indicator, level=1, axis=1)
self.filtered_data = self.filter_data()
self.company_table = self.load_company_table()
self.all_sectors = self.company_table.Sector.unique().tolist()
self.all_subsectors = self.company_table.Subsector.unique().tolist()
self.price_corr = self.compute_corr()
self.metric = metric.lower()
self.dist, self.labs = self.build_distance_matrix()
self.MST = self.build_minimum_spanning_tree()
self.populate_graph_attribute()
def load_company_table(self):
fp = Path(DATA_PATH, "stock_table.csv")
table = | pd.read_csv(fp) | pandas.read_csv |
import numpy as np
import pandas as pd
import string
from sklearn.utils import resample
base_dir = "/opt/ml/processing"
df = pd.read_csv( f"{base_dir}/input/Womens Clothing E-Commerce Reviews.csv")
df = df[df['Review Text'].notna()] # drop rows where Review text is missing
def process_review(text):
punctuation = string.punctuation
review = text.lower()
review = review.replace("\r\n", " ").replace("\n\n", " ")
translator = str.maketrans("","", punctuation)
review = review.translate(translator)
return review
# create columns for concat reviews and new labels
df['Complete_Review'] = df['Title'] + ' ' + df['Review Text']
df = df[df['Complete_Review'].notna()] # drop rows where review text is missing
df['Label'] = df['Rating'].map({1:'negative',2:'negative',3:'none',4:'none',5:'positive'})
df = df.loc[df['Label'].isin(['negative','positive'])] # only use positive and negative reviews
df['Review'] = df['Complete_Review'].astype(str).apply(process_review)
df['Processed'] = '__label__' + df['Label'].astype(str) + ' ' + df['Review']
# create train:test split
train, validation, test = np.split(df, [int(0.7 * len(df)), int(0.85 * len(df))])
# deal with unbalanced classes
# only include resampling for training set so no data leakeage for validation sets
positive = train.loc[train['Label']=='positive']
negative = train.loc[train['Label']=='negative']
# oversample the minority classes
negative_oversample = resample(negative, replace=True, n_samples=len(positive))
# remake training set using balanced class camples
train = | pd.concat([positive,negative_oversample]) | pandas.concat |
import pandas as pd
import pandas.testing as tm
print(pd.__version__)
tdi = pd.timedelta_range("1 Day", periods=3)
ser = | pd.Series(tdi) | pandas.Series |
import pandas as pd
from dplypy.dplyframe import DplyFrame
from dplypy.pipeline import drop
def test_drop():
pandas_df = pd.DataFrame(
data={
"col1": [0, 1, 2, 3],
"col2": [3, 4, 5, 6],
"col3": [6, 7, 8, 9],
"col4": [9, 10, 11, 12],
}
)
# Drop by columns
df1 = DplyFrame(pandas_df)
output1 = df1 + drop("col1", axis=1)
expected1 = pandas_df.drop("col1", axis=1)
pd.testing.assert_frame_equal(output1.pandas_df, expected1)
df2 = DplyFrame(pandas_df)
output2 = df2 + drop(["col3", "col4"], axis=1)
expected2 = pandas_df.drop(["col3", "col4"], axis=1)
pd.testing.assert_frame_equal(output2.pandas_df, expected2)
try:
df2 + drop(["col1", "col5"], axis=1)
except KeyError:
pass
else:
raise AssertionError("KeyError was not raised")
df3 = DplyFrame(pandas_df)
output3 = df3 + drop(columns="col1")
expected3 = pandas_df.drop(columns="col1")
pd.testing.assert_frame_equal(output3.pandas_df, expected3)
df4 = DplyFrame(pandas_df)
output4 = df4 + drop(columns=["col3", "col4"])
expected4 = pandas_df.drop(columns=["col3", "col4"])
pd.testing.assert_frame_equal(output4.pandas_df, expected4)
df5 = DplyFrame(pandas_df)
output5 = df5 + drop(columns=["col3", "col4"], axis=1)
expected5 = pandas_df.drop(columns=["col3", "col4"], axis=1)
pd.testing.assert_frame_equal(output5.pandas_df, expected5)
try:
df5 + drop(columns=["col1", "col5"])
except KeyError:
pass
else:
raise AssertionError("KeyError was not raised")
# Drop by rows
df6 = DplyFrame(pandas_df)
output6 = df6 + drop(0)
expected6 = pandas_df.drop(0)
| pd.testing.assert_frame_equal(output6.pandas_df, expected6) | pandas.testing.assert_frame_equal |
__author__ = 'etuka'
import re
import os
import copy
import threading
import pandas as pd
from bson import ObjectId
from dal import cursor_to_list
import web.apps.web_copo.lookup.lookup as lookup
import web.apps.web_copo.templatetags.html_tags as htags
from web.apps.web_copo.schemas.utils import data_utils as d_utils
from dal.copo_da import Submission, DataFile, DAComponent, Person, Sample, Description
class Investigation:
def __init__(self, copo_isa_records=dict(), study_schema=dict()):
self.copo_isa_records = copo_isa_records
self.study_schema = study_schema
self.profile_id = str(self.copo_isa_records.get("profile").get("_id"))
def get_schema(self):
component = "investigation"
properties = d_utils.get_db_json_schema(component)
if properties:
for k in properties:
if k == "@id":
record = dict(name=self.copo_isa_records.get("submission_token"))
properties[k] = ISAHelpers().get_id_field(component=component, record=record)
else:
try:
properties[k] = getattr(Investigation, "_" + k)(self, properties[k])
except Exception as e:
print(e)
properties[k] = ISAHelpers().get_schema_key_type(properties.get(k, dict()))
return properties
def get_datafilehashes(self):
return self.copo_isa_records["datafilehashes"]
def _title(self, spec=dict()):
return ISAHelpers().get_schema_key_type(spec)
def _description(self, spec=dict()):
# this property is set from the Profile record
return self.copo_isa_records.get("profile").get("description", str())
def _identifier(self, spec=dict()):
return self.copo_isa_records.get("submission_token")
def _studies(self, spec=dict()):
return self.study_schema
def _ontologySourceReferences(self, spec=dict()):
osr = list()
records = list()
records = records + list(self.copo_isa_records.get("publication"))
records = records + list(self.copo_isa_records.get("person"))
records = records + list(self.copo_isa_records.get("sample"))
records = records + list(self.copo_isa_records.get("source"))
records = records + list(self.copo_isa_records.get("datafile"))
records.append(self.copo_isa_records.get("technology_type"))
target_record_list = list()
target_object_keys = set(d_utils.get_db_json_schema("ontology_annotation").keys())
for record in records:
target_record_list = ISAHelpers().get_object_instances(record, target_record_list, target_object_keys)
termsources = [x["termSource"] for x in target_record_list if len(x["termSource"]) > 0]
termsources = list(set(termsources))
component = "ontology_source_reference"
# get ontology base uri
base_url = lookup.ONTOLOGY_LKUPS.get("ontology_file_uri", str())
for ts in termsources:
value_dict = dict(
name=ts,
file=base_url + ts
)
osr_schema = d_utils.get_db_json_schema(component)
for k in osr_schema:
if k == "@id":
osr_schema[k] = ISAHelpers().get_id_field(component, dict(name=ts))
else:
osr_schema[k] = value_dict.get(k, ISAHelpers().get_schema_key_type(osr_schema.get(k, dict())))
osr.append(osr_schema)
return osr
def _submissionDate(self, spec=dict()):
# todo: not decided on a value for this property, return a default type
return ISAHelpers().get_schema_key_type(spec)
def _filename(self, spec=dict()):
filename = 'i_' + self.copo_isa_records.get("submission_token") + '.txt'
return filename
def _publicReleaseDate(self, spec=dict()):
# todo: not decided on a value for this property, return a default type
return ISAHelpers().get_schema_key_type(spec)
def _publications(self, spec=dict()):
# set at the Study level
return ISAHelpers().get_schema_key_type(spec)
def _people(self, spec=dict()):
# set at the Study level
return ISAHelpers().get_schema_key_type(spec)
def _comments(self, spec=dict()):
# todo: not decided on a value for this property, return a default type
return ISAHelpers().get_schema_key_type(spec)
class Study:
def __init__(self, copo_isa_records=str(), assay_schema=dict()):
self.copo_isa_records = copo_isa_records
self.assay_schema = assay_schema
self.profile_id = str(self.copo_isa_records.get("profile").get("_id"))
def get_schema(self):
component = "study"
schemas = list()
properties = d_utils.get_db_json_schema(component)
if properties:
for k in properties:
if k == "@id":
record = dict(name=self.copo_isa_records.get("submission_token"))
properties[k] = ISAHelpers().get_id_field(component=component, record=record)
else:
try:
properties[k] = getattr(Study, "_" + k)(self, properties[k])
except Exception as e:
print(e)
properties[k] = ISAHelpers().get_schema_key_type(properties.get(k, dict()))
schemas.append(properties)
return schemas
def _assays(self, spec):
return self.assay_schema
def _publications(self, spec=dict()):
component = "publication"
return ISAHelpers().get_isa_records(component, list(self.copo_isa_records.get(component)))
def _people(self, spec=dict()):
component = "person"
return ISAHelpers().get_isa_records(component, list(self.copo_isa_records.get(component)))
def _studyDesignDescriptors(self, spec=dict()):
# this property is contingent on the 'study type' associated with a datafile
sdd = list()
# this needs to be represented as an ontology annotation
value_dict = dict(annotationValue=d_utils.lookup_study_type_label(self.copo_isa_records.get("study_type"))
)
component = "ontology_annotation"
isa_schema = d_utils.get_db_json_schema(component)
for k in isa_schema:
isa_schema = ISAHelpers().resolve_schema_key(isa_schema, k, component, value_dict)
sdd.append(isa_schema)
return sdd
def _protocols(self, spec=dict()):
# this property is contingent on the 'study type' associated with a datafile
protocols = list()
# get protocols
protocol_list = list(self.copo_isa_records["protocol_list"])
for pr in protocol_list:
# parameters
parameters = list()
for pv in pr.get("parameterValues", list()):
pv = htags.trim_parameter_value_label(pv).lower()
ontology_schema = d_utils.get_db_json_schema("ontology_annotation")
for k in ontology_schema:
ontology_schema = ISAHelpers().resolve_schema_key(ontology_schema, k, "ontology_annotation",
dict(annotationValue=pv))
pv_dict = dict(parameterName=ontology_schema)
pp_schema = d_utils.get_db_json_schema("protocol_parameter")
for k in pp_schema:
if k == "@id":
pp_schema[k] = ISAHelpers().get_id_field("parameter", dict(
name=pv.replace(" ", "_")))
else:
pp_schema[k] = pv_dict.get(k,
ISAHelpers().get_schema_key_type(pp_schema.get(k, dict())))
parameters.append(pp_schema)
# components
components = list()
if pr.get("name", str()) == "nucleic acid sequencing":
# get sequencing instrument attached datafiles
seq_instruments = list(self.copo_isa_records["seq_instruments"])
for si in seq_instruments:
ontology_schema = d_utils.get_db_json_schema("ontology_annotation")
for k in ontology_schema:
ontology_schema = ISAHelpers().resolve_schema_key(ontology_schema, k,
"ontology_annotation",
dict(annotationValue="DNA sequencer"))
# get components properties
component_schema = d_utils.get_db_json_schema("protocol").get("components", dict()).get("items",
dict()).get(
"properties", dict())
components_value_dict = dict(componentName=si,
componentType=ontology_schema)
for k in component_schema:
component_schema[k] = components_value_dict.get(k, ISAHelpers().get_schema_key_type(
component_schema.get(k, dict())))
components.append(component_schema)
# protocolType
ontology_schema = d_utils.get_db_json_schema("ontology_annotation")
for k in ontology_schema:
ontology_schema = ISAHelpers().resolve_schema_key(ontology_schema, k,
"ontology_annotation",
dict(annotationValue=pr.get("name", str())))
protocol_type = ontology_schema
value_dict = dict(
name=pr.get("name", str()),
parameters=parameters,
components=components,
protocolType=protocol_type
)
protocol_schema = d_utils.get_db_json_schema("protocol")
for k in protocol_schema:
if k == "@id":
protocol_schema[k] = ISAHelpers().get_id_field("protocol", dict(
name=pr.get("name", str()).replace(" ", "_")))
else:
protocol_schema[k] = value_dict.get(k, ISAHelpers().get_schema_key_type(
protocol_schema.get(k, dict())))
protocols.append(protocol_schema)
return protocols
def _materials(self, spec=dict()):
sources = list(self.copo_isa_records["treated_source"])
samples = list(self.copo_isa_records["treated_sample"])
materials_value_dict = dict(sources=sources,
samples=samples
)
materials_properties = spec.get("properties", dict())
for k in materials_properties:
materials_properties[k] = materials_value_dict.get(k, ISAHelpers().get_schema_key_type(
materials_properties.get(k, dict())))
return materials_properties
def _processSequence(self, spec=dict()):
process_sequence = list()
samples = list(self.copo_isa_records["treated_sample"])
# get executed protocol
executes_protocol = [p for p in self._protocols(dict()) if "sample collection" in p.get("name")]
id_part = str()
if executes_protocol:
id_part = (executes_protocol[0]["name"]).replace(" ", "_")
executes_protocol = {"@id": executes_protocol[0]["@id"]}
else:
executes_protocol = dict()
for indx, sample in enumerate(samples):
value_dict = dict(
executesProtocol=executes_protocol,
inputs=sample.get("derivesFrom", list()),
outputs=[{"@id": sample["@id"]}]
)
process_schema = d_utils.get_db_json_schema("process")
for k in process_schema:
if k == "@id":
process_schema[k] = ISAHelpers().get_id_field("process", dict(
name=id_part + str(indx + 1)))
else:
process_schema[k] = value_dict.get(k,
ISAHelpers().get_schema_key_type(process_schema.get(k, dict())))
process_sequence.append(process_schema)
return process_sequence
def _factors(self, spec=dict()):
factors = list()
seen_list = list()
components = ["sample"]
for component in components:
component_list = list(self.copo_isa_records[component])
for rec in component_list:
for fv in rec.get("factorValues", list()):
cat_dict = fv.get("category", dict())
annotation_value = cat_dict.get("annotationValue", str())
if annotation_value and annotation_value.lower() not in seen_list:
ontology_schema = d_utils.get_db_json_schema("ontology_annotation")
for k in ontology_schema:
ontology_schema = ISAHelpers().resolve_schema_key(ontology_schema, k,
"ontology_annotation",
cat_dict)
value_dict = dict(
factorName=annotation_value,
factorType=ontology_schema)
factor_schema = d_utils.get_db_json_schema("factor")
for k in factor_schema:
if k == "@id":
factor_schema[k] = ISAHelpers().get_id_field("factor",
dict(
name=annotation_value.replace(
" ", "_")))
else:
factor_schema[k] = value_dict.get(k, ISAHelpers().get_schema_key_type(
factor_schema.get(k, dict())))
factors.append(factor_schema)
seen_list.append(annotation_value.lower())
return factors
def _characteristicCategories(self, spec=dict()):
characteristic_categories = list()
seen_list = list()
components = ["sample", "source"]
for component in components:
component_list = list(self.copo_isa_records[component])
for rec in component_list:
# get organism
if "organism" in rec and "organism" not in seen_list:
ontology_schema = d_utils.get_db_json_schema("ontology_annotation")
for k in ontology_schema:
ontology_schema = ISAHelpers().resolve_schema_key(ontology_schema, k,
"ontology_annotation",
dict(annotationValue="organism"))
value_dict = dict(characteristicType=ontology_schema)
material_attribute_schema = d_utils.get_db_json_schema("material_attribute")
for k in material_attribute_schema:
if k == "@id":
material_attribute_schema[k] = ISAHelpers().get_id_field("characteristic_category", dict(
name="organism"))
else:
material_attribute_schema[k] = value_dict.get(k, ISAHelpers().get_schema_key_type(
material_attribute_schema.get(k, dict())))
characteristic_categories.append(material_attribute_schema)
seen_list.append("organism")
for ch in rec.get("characteristics", list()):
cat_dict = ch.get("category", dict())
annotation_value = cat_dict.get("annotationValue", str())
if annotation_value and annotation_value.lower() not in seen_list:
ontology_schema = d_utils.get_db_json_schema("ontology_annotation")
for k in ontology_schema:
ontology_schema = ISAHelpers().resolve_schema_key(ontology_schema, k,
"ontology_annotation",
cat_dict)
value_dict = dict(characteristicType=ontology_schema)
material_attribute_schema = d_utils.get_db_json_schema("material_attribute")
for k in material_attribute_schema:
if k == "@id":
material_attribute_schema[k] = ISAHelpers().get_id_field("characteristic_category",
dict(
name=annotation_value.replace(
" ", "_")))
else:
material_attribute_schema[k] = value_dict.get(k, ISAHelpers().get_schema_key_type(
material_attribute_schema.get(k, dict())))
characteristic_categories.append(material_attribute_schema)
seen_list.append(annotation_value.lower())
return characteristic_categories
def _unitCategories(self, spec=dict()):
unit_categories = list()
seen_list = list()
components = ["sample", "source"]
for component in components:
component_list = list(self.copo_isa_records[component])
for rec in component_list:
# get units from both characteristics and factors
combined_list = rec.get("characteristics", list()) + rec.get("factorValues", list())
for ch in combined_list:
# value...
# called up here mainly to [in]validate the 'unit' property
value_dict = ch.get("value", dict())
annotation_value = value_dict.get("annotationValue", str())
is_numeric = False
if annotation_value != "":
try:
annotation_value = float(annotation_value)
is_numeric = True
except ValueError:
pass
if is_numeric:
unit_cat = ch.get("unit", dict())
annotation_value = unit_cat.get("annotationValue", str())
if annotation_value != "" and annotation_value.lower() not in seen_list:
ontology_schema = d_utils.get_db_json_schema("ontology_annotation")
for k in ontology_schema:
if k == "@id":
ontology_schema[k] = ISAHelpers().get_id_field("unit",
dict(
name=annotation_value.replace(
" ", "_")))
else:
ontology_schema = ISAHelpers().resolve_schema_key(ontology_schema, k,
"ontology_annotation",
unit_cat)
unit_categories.append(ontology_schema)
seen_list.append(annotation_value.lower())
return unit_categories
def _comments(self, spec=dict()):
comments = d_utils.json_to_pytype(lookup.SRA_COMMENTS).get("properties", list())
return comments
def _publicReleaseDate(self, spec=dict()):
return ISAHelpers().get_schema_key_type(spec)
def _submissionDate(self, spec=dict()):
return ISAHelpers().get_schema_key_type(spec)
def _description(self, spec=dict()):
return ISAHelpers().get_schema_key_type(spec)
def _title(self, spec=dict()):
# this property is set from the Profile record
return self.copo_isa_records.get("profile").get("title", str())
def _identifier(self, spec=dict()):
return self.copo_isa_records.get("submission_token")
def _filename(self, spec=dict()):
filename = 's_' + self.copo_isa_records.get("submission_token") + '.txt'
return filename
class Assay:
def __init__(self, copo_isa_records=str()):
self.copo_isa_records = copo_isa_records
self.profile_id = str(self.copo_isa_records.get("profile").get("_id"))
self.process_sequence = list()
def get_schema(self):
component = "assay"
schemas = list()
properties = d_utils.get_db_json_schema(component)
if properties:
for k in properties:
if k == "@id":
record = dict(name=self.copo_isa_records.get("submission_token"))
properties[k] = ISAHelpers().get_id_field(component=component, record=record)
else:
try:
properties[k] = getattr(Assay, "_" + k)(self, properties[k])
except Exception as e:
print(e)
properties[k] = ISAHelpers().get_schema_key_type(properties.get(k, dict()))
schemas.append(properties)
return schemas
def _comments(self, spec=dict()):
return ISAHelpers().get_schema_key_type(spec)
def _filename(self, spec=dict()):
filename = 'a_' + self.copo_isa_records.get("submission_token") + '.txt'
return filename
def _measurementType(self, spec=dict()):
config_source = ISAHelpers().get_config_source(self.copo_isa_records.get("study_type"))
measurement_type = ISAHelpers().get_assay_file_measurement(config_source)
return measurement_type
def _technologyType(self, spec=dict()):
return self.copo_isa_records.get("technology_type")
def _technologyPlatform(self, spec=dict()):
# todo: need to find out how to set value for this property
return ISAHelpers().get_schema_key_type(spec)
def _dataFiles(self, spec=dict()):
component = "datafile"
datafiles = list(self.copo_isa_records["datafile"])
# get datafiles from the submission record
datafiles = [ISAHelpers().refactor_datafiles(element) for element in datafiles]
datafiles = ISAHelpers().get_isa_records(component, datafiles)
df = pd.DataFrame(datafiles)
remote_path = d_utils.get_ena_remote_path(self.copo_isa_records.get("submission_token"))
df["name"] = df["name"].apply(ISAHelpers().refactor_datafile_reference, args=(remote_path,))
datafiles = df.to_dict('records')
return datafiles
def _materials(self, spec=dict()):
samples = list(self.copo_isa_records["sample"])
samps = list()
other_materials = list()
if samples:
df = pd.DataFrame(samples)
samps = list(df['name'].apply(ISAHelpers().refactor_sample_reference))
other_materials = list(df['name'].apply(ISAHelpers().refactor_material))
value_dict = dict(otherMaterials=other_materials,
samples=samps
)
materials_properties = spec.get("properties", dict())
for k in materials_properties:
materials_properties[k] = value_dict.get(k, ISAHelpers().get_schema_key_type(
materials_properties.get(k, dict())))
return materials_properties
def _characteristicCategories(self, spec=dict()):
characteristicCategories = list()
return characteristicCategories
def _unitCategories(self, spec=dict()):
unitCategories = list()
return unitCategories
def _processSequence(self, spec=dict()):
# get relevant protocols
protocol_list_temp = list(self.copo_isa_records["protocol_list"])
protocol_list_temp[:] = [d for d in protocol_list_temp if d.get('name') not in ["sample collection"]]
# get pairing map, if it exists
description_token = self.copo_isa_records["submission_record"].get("description_token", str())
pairing_info = list()
if description_token:
pairing_info = Description().GET(description_token).get("attributes", dict()).get("datafiles_pairing",
list())
pairing_info = pd.DataFrame(pairing_info)
if len(pairing_info):
pairing_info.columns = ["file1","file2"]
pairing_info['combined'] = pairing_info.file1 + "," + pairing_info.file2
datafiles_df = | pd.DataFrame(self.copo_isa_records["datafile"]) | pandas.DataFrame |
import logging
import pandas as pd
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
def read_alignment(filename):
"""
Take in a multiple sequence alignment(msa) output file and output a list
Parameters
----------
filename: str
filepath to msa file
Returns
-------
seq_list: list
list containing msa
"""
seq_list = []
seq = ''
with open(filename) as f:
for line in f:
if line[0] == '>':
if len(seq) > 0:
seq_list.append((header, seq.upper()))
seq = ''
header = line[1:].strip('\n')
else:
seq = ''
header = line[1:].strip('\n')
else:
seq += line.strip('\n')
if len(seq) > 0:
seq_list.append((header, seq.upper()))
return seq_list
def load_msa_data(filename):
"""
Take in a multiple sequence alignment(msa) output file and output a dataframe
Parameters
----------
filename: str
filepath to msa file
Returns
-------
seq_df: pandas.DataFrame
Pandas dataframe containing msa
"""
seq_list = read_alignment(filename)
seq_dict = {'gisaid_epi_isl': [], 'sequence': []}
for header, seq in seq_list:
header = header.split('|')[1]
seq_dict['gisaid_epi_isl'].append(header)
seq_dict['sequence'].append(seq)
seq_df = | pd.DataFrame.from_dict(seq_dict) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, StandardScaler, MinMaxScaler
from sklearn.linear_model import LogisticRegressionCV, LinearRegression, RidgeCV, LassoCV
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from interpret.utils import unify_data, autogen_schema
from interpret.glassbox.ebm.ebm import EBMPreprocessor
from .base import MyGAMPlotMixinBase, MyCommonBase
from .EncodingBase import LabelEncodingRegressorMixin, LabelEncodingClassifierMixin, OnehotEncodingRegressorMixin, OnehotEncodingClassifierMixin
from .utils import my_interpolate
class MyTransformMixin(object):
def transform(self, X):
return X
class MyTransformClassifierMixin(MyTransformMixin):
def predict_proba(self, X):
X = self.transform(X)
return super().predict_proba(X)
class MyTransformRegressionMixin(MyTransformMixin):
def predict(self, X):
X = self.transform(X)
return super().predict(X)
class MyStandardizedTransformMixin(object):
def __init__(self, *args, **kwargs):
assert isinstance(self, (MyTransformClassifierMixin, MyTransformRegressionMixin))
super().__init__(*args, **kwargs)
self.scaler = StandardScaler()
def fit(self, X, y):
X = self.scaler.fit_transform(X)
return super().fit(X, y)
def transform(self, X):
X = self.scaler.transform(X)
return super().transform(X)
class MyMaxMinTransformMixin(object):
def __init__(self, *args, **kwargs):
assert isinstance(self, (MyTransformClassifierMixin, MyTransformRegressionMixin))
super().__init__(*args, **kwargs)
self.scaler = MinMaxScaler(feature_range=(-1, 1))
def fit(self, X, y):
X = self.scaler.fit_transform(X)
return super().fit(X, y)
def transform(self, X):
X = self.scaler.transform(X)
return super().transform(X)
class MyEBMPreprocessorTransformMixin(object):
def __init__(self, binning='uniform', **kwargs):
assert isinstance(self, (MyTransformClassifierMixin, MyTransformRegressionMixin))
super().__init__(**kwargs)
self.prepro_feature_names = None
self.feature_types = None
self.schema = None
self.binning = binning
def fit(self, X, y):
X, y, self.prepro_feature_names, _ = unify_data(
X, y
)
# Build preprocessor
self.schema_ = self.schema
if self.schema_ is None:
self.schema_ = autogen_schema(
X, feature_names=self.prepro_feature_names, feature_types=self.feature_types
)
self.preprocessor_ = EBMPreprocessor(schema=self.schema_, binning=self.binning)
self.preprocessor_.fit(X)
X = self.preprocessor_.transform(X)
return super().fit(X, y)
def transform(self, X):
X, _, _, _ = unify_data(X, None, self.prepro_feature_names, self.feature_types)
X = self.preprocessor_.transform(X)
return super().transform(X)
class MyMarginalizedTransformMixin(object):
def __init__(self, *args, **kwargs):
assert isinstance(self, (MyTransformClassifierMixin, MyTransformRegressionMixin))
super().__init__(*args, **kwargs)
self.X_mapping = {}
def fit(self, X, y):
# My marginal transformation
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
original_columns = X.columns
X['label'] = y
for col_idx, col in enumerate(original_columns):
self.X_mapping[col_idx] = X.groupby(col).label.apply(lambda x: x.mean())
X = X.drop('label', axis=1)
X = self._transform(X)
return super().fit(X, y)
def transform(self, X):
X = self._transform(X)
return super().transform(X)
def _transform(self, X):
assert len(self.X_mapping) > 0
if isinstance(X, pd.DataFrame):
X = X.values
new_X = np.empty(X.shape, dtype=np.float)
for col_idx in range(X.shape[1]):
x_unique = np.sort(np.unique(X[:, col_idx]))
x_map = self.X_mapping[col_idx]
if len(x_map) != len(x_unique) or np.any(x_map.index != x_unique):
new_y = my_interpolate(x_map.index, x_map.values, x_unique)
x_map = | pd.Series(new_y, index=x_unique) | pandas.Series |
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Jun 21, 2017
"""
from __future__ import division
import warnings
import networkx as nx
import numpy as np
import pandas as pd
import scipy.stats as stats
from ..utils.stat_utils import robust_lookup
# TODO: support categorical (non-numeric) data predecessors.
COVARIATE = "covariate"
HIDDEN = "hidden"
TREATMENT = "treatment"
OUTCOME = "outcome"
CENSOR = "censor"
EFFECT_MODIFIER = "effect_modifier"
VALID_VAR_TYPES = {COVARIATE, HIDDEN, TREATMENT, OUTCOME, CENSOR, EFFECT_MODIFIER}
CATEGORICAL = "categorical"
SURVIVAL = "survival"
CONTINUOUS = "continuous"
PROBABILITY = "probability"
DEFAULT_LINK_TYPE = "linear"
BASELINE_SURVIVAL_PARAM = 1.0
class CausalSimulator3(object):
TREATMENT_METHODS = {"random": lambda x, p, snr, params: CausalSimulator3._treatment_random(x, p),
"odds_ratio": lambda x, p, snr, params: CausalSimulator3._treatment_odds_ratio(x, p, snr),
"quantile_gauss_fit": lambda x, p, snr, params: CausalSimulator3._treatment_quantile_gauss_fit(
x, p, snr),
"logistic": lambda x, p, snr, params: CausalSimulator3._treatment_logistic_dichotomous(x, p,
params=params),
"gaussian": lambda x, p, snr, params: CausalSimulator3._treatment_gaussian_dichotomous(x, p,
snr)}
# G for general - applicable to all types of variables
G_LINKING_METHODS = {"linear": lambda x, beta=None: CausalSimulator3._linear_link(x, beta),
"affine": lambda x, beta=None: CausalSimulator3._affine_link(x, beta),
"exp": lambda x, beta=None: CausalSimulator3._exp_linking(x, beta),
"log": lambda x, beta=None: CausalSimulator3._log_linking(x, beta),
"poly": lambda x, beta=None: CausalSimulator3._poly_linking(x, beta)}
# O for outcome - outcome specific linking
O_LINKING_METHODS = {
"marginal_structural_model": lambda x, t, m, beta=None: CausalSimulator3._marginal_structural_model_link(
x, t, m, beta=beta),
None: lambda x, beta=None: x
}
def __init__(self, topology, var_types, prob_categories, link_types, snr, treatment_importances,
treatment_methods="gaussian", outcome_types=CATEGORICAL, effect_sizes=None,
survival_distribution="expon", survival_baseline=1, params=None):
"""
Constructor
Args:
topology (np.ndarray): A boolean adjacency matrix for variables (including covariates, treatment and outcome
variables of the model).
Every row is a binary vector for a variable, where v[i, j] = 1 iff j is a parent of i
var_types (Sequence[str]): Vector the size of variables stating every variable to be "covariate",
"hidden", "outcome", "treatment", "censor".
**Notes**: if type(pd.Series) variable names will be var_types.index, otherwise,
if no-key-vector - var names will be just range(num-of-variables).
prob_categories (Sequence[float|None]): vector the size of the number of variables.
if prob_categories[i] = None -> than variable i is considered continuous.
otherwise -> prob_categories[i] should be a list (or any iterable) which
size specifies number of categories variable i has, and it contains
multinomial probabilities for those categories (i.e. list non negative and
sums to 1).
link_types (str|Sequence[str]): set of string the size or string or specifying the relation between
covariate parents to the covariate itself
snr (float|Sequence[float]): Signal to noise ratio (use 1.0 to eliminate noise in the system).
May be a vector the size of number of variables for stating different snr
values for different variables.
treatment_importances (float|Sequence[float]): The effect of treatment on the outcome. A float between 0
and 1.0 stating how much weight the treatment variable have
vs. the other parents of an outcome variable.
*To support multi-treatment* - place a list the size of the
number of treatment variables (as stated in var_types).
The matching between treatment variable and its importance
will be according to the order of the treatment variables
and the order of the list. If all treatments variables has
the same importance - pass the float value.
treatment_methods (str|Sequence[str]): method for creating treatment assignment and propensities, can be
one of {"random", "gaussian", "logistic"}.
*To support multi-treatment* - place a list the size of the number of
treatment variables. The matching between treatment variable and its
creation method will be according to the order of the treatment
variables and the order of the list. If all treatment variables has the
same type - pass the str value.
outcome_types (str|Sequence[str]): outcome can either be 'survival' or 'binary'.
*To support multi-outcome* - place a list the size of the number of outcome
variables (as stated in var_types). The matching between outcome variable and
its type will be according to the order of the outcome variables and the order
of the list. If all outcome variables has the same type - pass the str value.
effect_sizes (float|Sequence[float|None]|None): The wanted mean effect size between two counterfactuals.
If None - The mean effect size will not be adjusted, but will be
whatever generated.
If float - The mean effect size will be adjusted to be approximately
the given number (considering the noise)
*To support multi-outcome* - a list the size the number of the outcome
variables (as stated in var_types). The matching between outcome
variable and its effect size will be according to the order of the
outcome variables and the order of the list.
survival_distribution (Sequence[str] or str): The distribution family from which to generate the outcome
values of outcome variables that their corresponding outcome_types is
"survival".
Default value is exponent distribution.
The same survival distribution will be used for the corresponding
censoring variable as well.
*To support multi-outcome* - place a list the size of the number of
outcome variables of type "survival" (as stated in outcome_types). The
matching between survival outcome variable and its survival distribution
will be according to the order of the outcome variables and the order of
the list. If all outcome variables has the same survival distribution -
pass the str value (if present).
*Ignore if no outcome variable is of type survival*
survival_baseline (Sequence[float] or float): The survival baseline from the CoxPH model that will be the
basics for the parameters of the corresponding survival_distribution.
The same survival baseline will be used for the corresponding censoring
variable as well (if present).
Default value is 1 (no multiplicative meaning for baseline value).
*To support multi-outcome* - place a list the size of the number of
outcome variables of type "survival" (as stated in outcome_types). The
matching between survival outcome variable and its survival distribution
will be according to the order of the outcome variables and the order of
the list. If all outcome variables has the same survival distribution -
pass the str value.
*Ignore if no outcome variable is of type survival*
params (dict | None): Various parameters related to the generation process (e.g. the slope for
sigmoid-based functions etc.).
The form of: {var_name: {param_name: param_value, ...}, ...}
"""
# Find the indices of each type of variable:
var_types = pd.Series(var_types)
self.var_names = var_types.index.to_series().reset_index(drop=True)
self.var_types = var_types
self.treatment_indices = var_types[var_types == TREATMENT].index
self.outcome_indices = var_types[var_types == OUTCOME].index
self.covariate_indices = var_types[(var_types == COVARIATE) | (var_types == HIDDEN)].index
self.hidden_indices = var_types[var_types == HIDDEN].index
self.censor_indices = var_types[var_types == CENSOR].index
self.effmod_indices = var_types[var_types == EFFECT_MODIFIER].index
self.linking_coefs = {} # will accumulate the generated coefficients. {var: Series(coef, predecessors)}
# COMPLETE topology INTO A SQUARE ADJACENCY MATRIX:
# # let M be number of total variables, H number of variables to generate and L=M-H number of variables in a
# # given baseline dataset (that generated variables can be based on). Given Topology matrix can have either a
# # shape of MxM or HxM - in the latter case the matrix is completed into MxM by adding zero rows (since L
# # given variables would not be re-genreated anyway, they will be consider independent variables).
# if topology.shape[0] != topology.shape[1]:
# rows, cols = topology.shape
# if cols > rows:
# null_submatrix = np.zeros((cols - rows, cols), dtype=bool)
# topology = np.row_stack((topology, null_submatrix))
# else:
# raise ValueError("Topology matrix has {rows} rows and {cols} columns. This is not supported since"
# "T[i,j] = 1 iff j is parent of i. ")
if topology.shape[0] != len(var_types):
raise ValueError("Number of variables in topology graph do not correspond to the number of variables states"
" in the variable types")
self.m = len(var_types) # number of variables
# Create a graph out of matrix topology:
self.topology = topology
self.graph_topology = nx.from_numpy_matrix(topology.transpose(), create_using=nx.DiGraph()) # type:nx.DiGraph
self.graph_topology = nx.relabel_nodes(self.graph_topology,
dict(list(zip(list(range(self.m)), self.var_names))))
# check that outcome variable is not dependant on more than 1 treatment variable
for i in self.outcome_indices:
predecessors = list(self.graph_topology.predecessors(i))
treatment_predecessors = self.treatment_indices.intersection(predecessors)
if len(treatment_predecessors) > 1: # outcome variable is dependent on more than one treatment
raise ValueError(
"Outcome {outcome} should have only one treatment affecting it. The current topology has outcome"
" variable dependant on {n_parent_treat} treatment parents which are: "
"{treatment_parents}".format(outcome=i, n_parent_treat=len(treatment_predecessors),
treatment_parents=treatment_predecessors))
elif len(treatment_predecessors) == 0: # outcome variable is dependent on exactly one treatment
warnings.warn("Outcome variable {} has no treatment effecting it".format(i), UserWarning)
# check that outcome variable is dependant on most 1 censor variable
for i in self.outcome_indices:
predecessors = list(self.graph_topology.predecessors(i))
censor_predecessors = self.censor_indices.intersection(predecessors)
if len(censor_predecessors) > 1: # outcome variable is dependent on more than one treatment
raise ValueError(
"Outcome {outcome} should have at most one censor variable affecting it. The current topology has "
"outcome variable dependant on {n_parent_cens} treatment parents which are: "
"{cens_parents}".format(outcome=i, n_parent_cens=len(censor_predecessors),
cens_parents=censor_predecessors))
# check that effect modifier is independent on treatment and affects only the outcome:
for i in self.effmod_indices:
successors = self.graph_topology.successors(i)
if len(successors) == 0 or self.outcome_indices.intersection(successors).size < 1:
raise ValueError("Effect modifier variable {name} must affect an outcome variable".format(name=i))
ancestors = nx.ancestors(self.graph_topology, i)
if self.treatment_indices.intersection(ancestors).size > 0:
raise ValueError("Effect modifier variable {name} must not be affected by "
"treatment variable (which is one of {ances})".format(name=i, ances=ancestors))
# convert scalars to vectors if necessary.
self.prob_categories = self._convert_scalars_to_vectors(x=prob_categories, default_value=None,
x_type="prob_categories")
self.prob_categories = self.prob_categories.map(lambda x: pd.Series(x) if x is not None else x)
if self.prob_categories.isnull().all():
warnings.warn("Got all Nones in prob_categories. If simulation has Treatment variables in it, "
"this will throw an exception, as treatment variables must be categorical", UserWarning)
# Check that all treatment variables are categorical:
for i in self.treatment_indices:
if self.prob_categories[i] is None:
raise ValueError("Only categorical treatment is currently supported. However, treatment variable {t} "
"is not categorical. Please specify corresponding category_probabilities".format(t=i))
self.snr = self._convert_scalars_to_vectors(x=snr, default_value=1, x_type="snr")
self.link_types = self._convert_scalars_to_vectors(x=link_types, default_value=DEFAULT_LINK_TYPE,
x_type="link_type")
# if not all([x in self.VALID_LINK_TYPES for x in self.link_types]):
all_linking_types = list(self.G_LINKING_METHODS.keys()) + list(self.O_LINKING_METHODS.keys())
if not self.link_types.isin(all_linking_types).all():
raise ValueError("link type must be one of {}, "
"got {} instead.".format(list(all_linking_types),
list(set(link_types) - set(all_linking_types))))
self.treatment_methods = self._map_properties_to_variables(values=treatment_methods,
keys=self.treatment_indices, var_type="treatment",
value_type="methods")
# if not all([x in TREATMENT_METHODS.keys() for x in self.treatment_methods.values()]):
if not self.treatment_methods.isin(list(self.TREATMENT_METHODS.keys())).all():
raise ValueError("link type must be one of {}, "
"got {} instead.".format(list(self.TREATMENT_METHODS.keys()),
list(
set(treatment_methods) - set(self.TREATMENT_METHODS.keys()))))
self.treatment_importances = self._map_properties_to_variables(values=treatment_importances,
keys=self.treatment_indices,
var_type="treatment", value_type="importance")
self.outcome_types = self._map_properties_to_variables(values=outcome_types, keys=self.outcome_indices,
var_type="outcome", value_type="type")
for i in self.outcome_indices:
if self.outcome_types[i] is CONTINUOUS and self.prob_categories[i] is not None:
raise ValueError("Continuous outcome must be associated with None category probability. "
"This was not the case in variable {outcome_var}. "
"Might lead to undefined behaviour.".format(outcome_var=i))
if self.outcome_types[i] is CATEGORICAL and self.prob_categories[i] is None:
raise ValueError("Categorical outcome must be associated with category probability. However, None was"
"associated with variable {outcome_var}".format(outcome_var=i))
self.effect_sizes = self._map_properties_to_variables(values=effect_sizes, keys=self.outcome_indices,
var_type="outcome", value_type="effect size")
# map survival_related properties to survival outcome and their corresponding censor variables.
survival_outcome_variables = self.outcome_types[self.outcome_types.eq("survival")].index
self.survival_distribution = self._map_properties_to_variables(values=survival_distribution,
keys=survival_outcome_variables,
var_type="outcome",
value_type="survival_distribution")
self.survival_distribution[self.survival_distribution.isnull()] = "expon" # default is exponent distribution
self.survival_baseline = self._map_properties_to_variables(values=survival_baseline,
keys=survival_outcome_variables, var_type="outcome",
value_type="survival_baseline")
self.survival_baseline[self.survival_baseline.isnull()] = np.abs(np.random.normal(
loc=0.0, scale=1.0, size=self.survival_baseline.isnull().sum()))
for i in survival_outcome_variables:
topology_predecessors = list(self.graph_topology.predecessors(i))
censor_predecessors = self.censor_indices.intersection(topology_predecessors)
if len(censor_predecessors) > 0:
censor_predecessors = censor_predecessors[0]
# match between the outcome value and it's matching censor variable:
self.survival_distribution[censor_predecessors] = self.survival_distribution[i]
self.survival_baseline[censor_predecessors] = self.survival_baseline[i]
# self.params = params if params is not None else dict(zip(self.var_names, [None] * self.var_names.size))
self.params = params if params is not None else {}
# ### Initializing helper functions ### #
def _convert_scalars_to_vectors(self, x, default_value, x_type):
"""
Converts scalars (e.g. float, int, str, etc.) into vectors. Mapping between variable names to the desired value.
In context: If arguments given to the class init are scalar (i.e. float, int, str, etc.), converts them into
vector shape - mapping every variable to the given value
Args:
x (Any): the value wished to map to the variables.
if supplied with some sequence (e.g. list, array, Series, etc.) it will map the sequence to
variable names. if supplied with a scalar - it will duplicate the single value to all vars.
default_value (str|float|int|None): in case x=None (no value is supplied), map default_value to all vars
x_type (str): The type of value that currently being processed (e.g. the variable name in the python code),
so in case there is an error, it can display the python-variable that caused the error.
Returns:
x (pd.Series): A Series mapping between variable name and a some wanted value.
Raises:
ValueError: If a sequence is given, but its length doesn't match the number of variables in topology.
"""
if np.isscalar(x) or x is None: # a scalar, not a sequence
if x is None: # put default value
x = pd.Series(data=default_value, index=self.var_names)
else: # a scalar is given, map it to all variables
x = pd.Series(data=x, index=self.var_names)
else:
# a sequence has been provided:
if len(x) != self.m:
raise ValueError("{x_type} should have same size as number of variables."
"Got {emp} instead of {sup}".format(x_type=x_type, emp=len(x), sup=self.m))
if isinstance(x, pd.Series) and x.index.difference(self.var_names).empty:
# if supplied with a Series which has it own indexing, and it matches the the topology variables, then
# keep it as is.
x = x
else:
# either a simpler sequence or a Series with bad indexing, map to variable names.
x = pd.Series(data=x, index=self.var_names)
return x
@staticmethod
def _map_properties_to_variables(values, keys, var_type, value_type):
"""
Maps between covariate variables properties to these properties.
Args:
values (Any): some property of some variable (e.g. 0.7 for treatment_importance or
"binary" for outcome_type)
keys (Sequence[Any]): The names indices to map the given properties (values) (e.g. treatment_indices)
var_type (str {"covariate", "hidden", "treatment", "outcome", "censor"}): The type of variable the
properties being mapped to (e.g. "treatment", "outcome", "covariate")
value_type (str): The name type that the property belongs to. (e.g. the variable name in the python code),
so in case there's an error, it can display the python-variable that caused the error.
Returns:
res (pd.Series): A map between the given keys (some covariate variable names indices) to the given values
Raises:
ValueError: When a Sequence is given as values (e.g. list of properties) but it does not match the length
of the keys.
Warnings:
UserWarning: If a values is a dict, it can may not be touched, unless its keys' do not match the variable
names. A warning is issued.
Examples:
Where effect_sizes is a Sequence or a float, outcome_indices are the indices names of the outcome variables
in the graph. the variable type discussed is "outcome" (since it is effect-size). The python variable name
is effect_size, thus the value_type is effect_size.
map_properties_to_variables(values=effect_sizes, keys=self.outcome_indices, var_type="outcome",
value_type="effect size")
"""
if np.isscalar(values) or values is None:
# values is a single value (i.e. int ot string), map its value to all given treatment variables:
res = dict(list(zip(keys, [values] * len(keys))))
else:
# some sequence provided
if len(keys) != len(values):
raise ValueError("The number of {var_t} variables: {n_keys} does not match the size of the list "
"depicting the {val_t} of creating each {var_t} variable: "
"{n_vals}".format(var_t=var_type, n_keys=len(keys),
val_t=value_type, n_vals=len(values)))
# values = values.values() if isinstance(values, dict) else values
if isinstance(values, dict):
# if given property is given by a dictionary, make sure this dict keys matches to the indices it
# suppose to map to:
res = values
if list(values.keys()) != keys:
warnings.warn("{var_t} {val_t} was given as dictionary but its keys ({val}) does not match the "
"{var_t} indices provided in topology ({keys}). You may expect "
"undefined behaviour".format(var_t=var_type, val_t=value_type,
val=list(values.keys()), keys=keys), UserWarning)
else:
res = dict(list(zip(keys, values)))
res = pd.Series(res, dtype=np.dtype(object))
res = res.infer_objects()
return res
# ### Main functionality ### #
def generate_data(self, X_given=None, num_samples=None, random_seed=None):
"""
Generates tables of dataset given the object's initial parameters.
Args:
num_samples (int): Number of samples that will be in the dataset.
X_given (pd.DataFrame): A baseline dataset to generate from. This dataset may contain only some of variables
stated in the initialized topology. The rest of the dataset (variables which are
stated in the topology and not in this dataset) will be generated.
**Notes**: The data given will not be overwritten and will be taken as is. It is
user responsibility to see that the given table has no dependant variables since
they will not be re-generated according to the graph.
random_seed (int): A seed for the pseudo-random-number-generator in order to reproduce results.
Returns:
(pd.DataFrame, pd.DataFrame, pd.DataFrame): 3-element tuple containing:
- **X** (*pd.DataFrame*): A (num_samples x num_covariates) matrix of all covariates
(including treatments and outcomes) over samples.
- **propensities** (*pd.DataFrame*): A (num_samples x num_treatments) matrix (or vector) of propensity
values of every treatment.
- **counterfactuals** (*pd.DataFrame*): A (num_samples x num_outcomes) matrix -
"""
if random_seed is not None:
np.random.seed(random_seed)
if num_samples is None and X_given is None:
raise ValueError("Must supply either a dataset (X) or number of samples to generate")
if num_samples is not None and X_given is not None:
warnings.warn("Got both number of samples (num_samples) and a baseline dataset (X_given). "
"Number of samples will be ignored and only X_given will be used.", UserWarning)
if X_given is None:
num_samples = num_samples
patients_index = list(range(num_samples))
else:
num_samples = X_given.index.size
patients_index = X_given.index
# generate latent continuous covariates - every variable is guaranteed to have a population variance of 1.0
# X_latent = pd.DataFrame(index=patients_index, columns=self.var_types.index)
X = pd.DataFrame(index=patients_index, columns=self.var_types.index)
if X_given is not None: # if a dataset is given, integrate it to the current dataset being build.
X.loc[:, X_given.columns] = X_given
for col in X_given.columns:
X.loc[:, col] = X[col].astype(X_given.dtypes[col]) # insist of keeping original types.
propensities = pd.DataFrame(index=patients_index,
columns=pd.MultiIndex.from_tuples([(i, j) for i in self.treatment_indices
for j in self.prob_categories[i].index]))
cf_columns = []
for outcome in self.outcome_indices:
predecessors = list(self.graph_topology.predecessors(outcome))
treatment_predecessor = self.treatment_indices.intersection(predecessors)
if not treatment_predecessor.empty:
treatment_predecessor = treatment_predecessor[0]
for j in self.prob_categories[treatment_predecessor].index:
cf_columns.append((outcome, j))
else:
cf_columns.append((outcome, "null"))
counterfactuals = pd.DataFrame(index=patients_index, columns=pd.MultiIndex.from_tuples(cf_columns))
# create the variables according to their topological order to avoid creating variables before their
# dependencies are created:
for i in nx.topological_sort(self.graph_topology):
# i = self.var_names[i] # get the name corresponding to the i'th location in topology
if X.loc[:, i].notnull().any():
# current column has non-NAN values meaning it has some data in it so it will not be overwritten
continue
var_type = self.var_types[i]
X_parents = X.loc[:, self.topology[self.var_names[self.var_names == i].index[0], :]]
if var_type == COVARIATE or var_type == HIDDEN or var_type == EFFECT_MODIFIER:
X_signal, beta = self.generate_covariate_col(X_parents=X_parents, link_type=self.link_types[i],
snr=self.snr[i], prob_category=self.prob_categories[i],
num_samples=num_samples, var_name=i)
elif var_type == TREATMENT:
X_signal, propensity, beta = self.generate_treatment_col(X_parents=X_parents,
link_type=self.link_types[i],
snr=self.snr[i],
method=self.treatment_methods[i],
prob_category=self.prob_categories[i],
var_name=i)
propensities[i] = propensity
elif var_type == OUTCOME:
X_signal, cf, beta = self.generate_outcome_col(X_parents=X_parents, link_type=self.link_types[i],
snr=self.snr[i], prob_category=self.prob_categories[i],
effect_size=self.effect_sizes[i],
outcome_type=self.outcome_types[i],
survival_distribution=self.survival_distribution.get(i),
survival_baseline=self.survival_baseline.get(i),
var_name=i)
counterfactuals[i] = cf
# print 'mean treatment effect: %0.3f' % (np.mean(cf1 - cf0))
elif var_type == CENSOR:
outcome_successor = self.outcome_indices.intersection(self.graph_topology.successors(i))[0]
treatment_predecessor = self.treatment_indices.intersection(self.graph_topology.predecessors(i))
treatment_predecessor = treatment_predecessor[0] if len(treatment_predecessor) > 0 else None
X_signal, beta = self.generate_censor_col(X_parents=X_parents, link_type=self.link_types[i],
snr=self.snr[i], prob_category=self.prob_categories[i],
outcome_type=self.outcome_types[outcome_successor],
treatment_importance=self.treatment_importances.
get(treatment_predecessor),
survival_distribution=self.survival_distribution.get(i),
survival_baseline=self.survival_baseline.get(i),
var_name=i)
else:
raise ValueError("{c_type} is not supported type of variable. "
"Supported types are {s_types}".format(c_type=var_type, s_types=VALID_VAR_TYPES))
X.loc[:, i] = X_signal
self.linking_coefs[i] = beta
# print X_latent.var(axis=0, ddof=1)
# print X.var(axis=0, ddof=1)
return X, propensities, counterfactuals
def generate_covariate_col(self, X_parents, link_type, snr, prob_category, num_samples, var_name=None):
"""
Generates a single signal (covariate) column
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A vector which length states the number of classes (number of discrete
values) and every value is fractional - the probability of the corresponding
class.
**Notes**: vector must sum to 1 If None - the covariate column is left
untouched (i.e. continuous)
num_samples (int): number of samples to generate
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.Series): 2-element tuple containing:
- **X_final** (*pd.Series*): The final (i.e. noised and discretize [if needed]) covariate column.
- **beta** (*pd.Series*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if the given link_type is not a valid link_type. (Supported link types are placed in
self.G_LINKING_METHODS)
"""
# if variable has no parents - just sample from normal Gaussian distribution:
if X_parents.empty:
X_new = pd.Series(np.random.normal(loc=0.0, scale=1.0, size=num_samples), index=X_parents.index)
beta = pd.Series(dtype=np.float64)
else:
# generate covariate column based on the parents' variables
linking_method = self.G_LINKING_METHODS.get(link_type)
if linking_method is None:
raise KeyError("link type must be one of {},got {} instead.".format(list(self.G_LINKING_METHODS.keys()),
link_type))
beta = self.linking_coefs.get(var_name)
X_new, beta = linking_method(X_parents, beta=beta)
# noise the sample
X_noised_cont, _, _ = self._noise_col(X_new, snr=snr)
# discretize variables if required:
X_final = self._discretize_col(X_noised_cont, prob_category)
return X_final, beta
def generate_treatment_col(self, X_parents, link_type, snr, prob_category, method="logistic", var_name=None):
"""
Generates a single treatment variable column.
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A k-length distribution vector over k-1 treatments with the probability
of being untreated in prob_category[0] (prob_category.iloc[0]) and all
other k-1 probabilities corresponds to k-1 treatments.
**Notes**: vector must sum to 1. If None - the covariate column is left
untouched (i.e. continuous)
method (str): A type of method to generate the treatment signal and the corresponding propensities.
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.DataFrame, pd.Series): 3-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment to each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
- **beta** (*pd.Series*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if prob_category is None (treatment must be categorical)
ValueError: If prob_category is not a legitimate probability vector (non negative, sums to 1)
"""
# Check input validity:
if prob_category is None:
raise ValueError("Treatment variable must be categorical, therefore it must have a legitimate distribution "
"over its possible values. Got None instead.")
CausalSimulator3._check_for_legitimate_probabilities(prob_category)
# generate only the continuous signal since it is later processed (therefore prob_category = None)
x_continuous, beta = self.generate_covariate_col(X_parents=X_parents, link_type=link_type, snr=snr,
prob_category=None, num_samples=X_parents.index.size,
var_name=var_name)
generation_method = self.TREATMENT_METHODS.get(method)
if generation_method is None:
raise KeyError("The given method {method} is not supported, "
"only {valid_methods}.".format(valid_methods=list(self.TREATMENT_METHODS.keys()),
method=method))
else:
params = self.params.get(var_name, {})
propensity, treatment = generation_method(x_continuous, prob_category, snr=snr, params=params)
return treatment.astype(int), propensity.astype(float), beta
def generate_outcome_col(self, X_parents, link_type, snr, prob_category, outcome_type, treatment_importance=None,
effect_size=None, survival_distribution=None, survival_baseline=None, var_name=None):
"""
Generates a single outcome variable column.
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
treatment_importance (float): The effect power of the treatment on the current generated outcome variable,
as opposed to other variables that may influence on it.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A k-length distribution vector over k-1 treatments with the probability
of being untreated in prob_category[0] (prob_category.iloc[0]) and all
other k-1 probabilities corresponds to k-1 treatments.
**Notes**: vector must sum to 1. If None - the covariate column is left
untouched (i.e. continuous)
effect_size (float): wanted mean effect size.
outcome_type (str): Type of outcome variable. Either categorical (and continuous) or survival
survival_distribution (str): The type of the distribution of which to sample the survival time from.
relevant only if outcome_type is "survival"
survival_baseline: The baseline value of the the cox ph model. relevant only if outcome_type is "survival"
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.DataFrame, pd.DataFrame): 3-element tuple containing:
- **x_outcome** (*pd.Series*): Outcome assignment for each sample.
- **cf** (*pd.DataFrame*): Holding the counterfactuals for every possible treatment category of the
outcome's treatment predecessor variable.
- **beta** (*pd.DataFrame*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if the given link_type is not a valid link_type. (Supported link types are placed in
self.G_LINKING_METHODS)
ValueError: if prob_category is neither None nor a legitimate distribution vector.
"""
# drop censor indices as they do not affect the actual values of the outcome, only the masking later:
X_parents = X_parents.drop(self.censor_indices, axis='columns') # type: pd.DataFrame
if X_parents.columns.size == 0:
raise ValueError("Outcome variable cannot be independent variable (i.e. have no parent in graph topology)")
# get effect modifiers:
effect_modifier = self.effmod_indices.intersection(X_parents.columns)
X_effmod = X_parents.loc[:, effect_modifier] # type: pd.DataFrame
X_covariates = X_parents.drop(effect_modifier, axis="columns") # type: pd.DataFrame
# get the treatment variable that affect current outcome.
treatment_parent = self.treatment_indices.intersection(X_covariates.columns)
if len(treatment_parent) > 1: # outcome variable is dependent on more than one treatment
raise ValueError(
"Outcome should have only one treatment affecting it. The current topology has outcome"
" variable dependant on {n_parent_treat} treatment parents which are: "
"{treatment_parents}".format(n_parent_treat=len(treatment_parent),
treatment_parents=treatment_parent))
else:
try: # len(treatment_parents) == 0 outcome variable is dependent on exactly one treatment
treatment_parent = treatment_parent[0]
X_treatment = X_covariates.loc[:, treatment_parent] # type: pd.Series
X_covariates = X_covariates.drop(treatment_parent, axis="columns") # type: pd.DataFrame
except IndexError: # len(treatment_parents) == 0 outcome variable is independent of treatment variables
treatment_parent = None
X_treatment = pd.Series(dtype=np.float64)
has_treatment_parent = not X_treatment.empty
treatment_importance = treatment_importance or self.treatment_importances.get(treatment_parent)
original_treatment_categories = X_treatment.unique().astype(int) # before being manipulated
# convexly re-weight variables according if treatment has different importance than the covariates:
if treatment_importance is not None:
# !knowingly not weighting (especially weighting-down) effect modifiers! (so only re-weighting covariates)
X_treatment *= treatment_importance # how much the treatment affects the outcome
if not X_covariates.columns.empty: # how much non-treatments (regular covariates) affect outcome
X_covariates *= float(float(1 - treatment_importance) / X_covariates.columns.size)
X_parents = pd.concat([X_covariates, X_effmod, X_treatment], axis="columns", ignore_index=False)
if link_type in list(self.G_LINKING_METHODS.keys()):
# generate counterfactuals
treatment_importance = 1 if treatment_importance is None else treatment_importance
cf = {}
for treatment_cat in original_treatment_categories:
cf[treatment_cat] = X_parents.drop(treatment_parent, axis="columns")
cf[treatment_cat].loc[:, treatment_parent] = treatment_cat * treatment_importance
linking_method = self.G_LINKING_METHODS.get(link_type)
beta = self.linking_coefs.get(var_name)
x_outcome, beta = linking_method(X_parents, beta=beta)
cf = {i: linking_method(cf[i], beta=beta)[0] for i in list(cf.keys())}
elif link_type in self.O_LINKING_METHODS:
linking_method = self.O_LINKING_METHODS.get(link_type)
beta = self.linking_coefs.get(var_name)
x_outcome, cf, beta = linking_method(X_covariates, X_effmod, X_treatment, beta=beta)
cf = {col: cf[col] for col in cf.columns}
else:
raise KeyError("link type: {lt} is not a supported type of linking".format(lt=link_type))
# noise the sample:
x_outcome, cov_std, noise = self._noise_col(x_outcome, snr=snr)
cf = {i: self._noise_col(cf[i], snr, cov_std, noise)[0] for i in list(cf.keys())}
if effect_size is not None:
warnings.warn("Stating effect size is not yet supported. Supplying it has no effect on results",
UserWarning)
# TODO: support given effect size
pass
# aggregate according to type:
if outcome_type == CATEGORICAL:
x_outcome, bins = self._discretize_col(x_outcome, prob_category, retbins=True)
# redefine bins edges so it could accommodate for values in the cfs that weren't present in the outcome:
bins.iloc[0] = -np.inf
bins.iloc[-1] = np.inf
cf = {i: self._discretize_col(cf[i], prob_category, bins=bins) if has_treatment_parent else cf[i]
for i in list(cf.keys())}
elif outcome_type == CONTINUOUS:
pass
elif outcome_type == PROBABILITY:
x_outcome = self._sigmoid(x_outcome)
cf = {i: self._sigmoid(cf[i]) for i in list(cf.keys())}
elif outcome_type == SURVIVAL:
if survival_distribution == "expon":
rnd_state = np.random.randint(low=0, high=999999)
param = survival_baseline * np.exp(x_outcome)
x_outcome = pd.Series(
stats.expon(loc=0.0, scale=(1.0 / param)).rvs(x_outcome.size, random_state=rnd_state),
index=x_outcome.index)
cf = {i: pd.Series(
stats.expon(loc=0.0, scale=(1 / (survival_baseline * np.exp(cf[i])))).rvs(x_outcome.size,
random_state=rnd_state),
index=x_outcome.index)
if has_treatment_parent else cf[i] for i in list(cf.keys())}
# Supplying the random state assures that the resulting outcome and cfs is consistent while sampling rvs
else:
raise ValueError("survival distribution: {0}, is not supported".format(survival_distribution))
else:
raise ValueError("outcome type: {0}, is not supported outcome type".format(outcome_type))
if not cf: # dictionary is empty - outcome variable has no treatment parent
cf = {"null": pd.DataFrame(data=None, index=X_parents.index, columns=["null"])}
cf = pd.DataFrame(cf)
return x_outcome, cf, beta
def generate_censor_col(self, X_parents, link_type, snr, prob_category, outcome_type,
treatment_importance=None, survival_distribution=None, survival_baseline=None,
var_name=None):
"""
Generates a single censor variable column.
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (Sequence | None): A k-length distribution vector over k-1 treatments with the probability
of being untreated in prob_category[0] (prob_category.iloc[0]) and all
other k-1 probabilities corresponds to k-1 treatments.
**Notes**: vector must sum to 1. If None - the covariate column is left
untouched (i.e. continuous)
outcome_type (str): The type of the outcome variable that is dependent on the current censor variable.
The censoring mechanism varies given different types of outcome variables.
treatment_importance (float): The effect power of the treatment on the current generated outcome
variable, as opposed to other variables that may influence on it.
survival_distribution (str): The type of the distribution of which to sample the survival time from.
relevant only if outcome_type is "survival"
survival_baseline: The baseline value of the the cox ph model. relevant only if outcome_type is "survival"
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.Series): 2-element tuple containing:
- **x_censor** (*pd.Series*): a column describing the censor variable
- **beta** (*pd.Series*): The coefficients used to generate current variable from it predecessors.
"""
if prob_category is None or len(prob_category) != 2:
raise ValueError("Censor mechanism must be dichotomous (either censored or not-censored). However, Got the "
"following category probabilities instead: {0}".format(prob_category))
if treatment_importance is not None:
warnings.warn("treatment importance is not yet supported in generating censor variables", UserWarning)
X_parents = X_parents.copy(deep=True) # type: pd.DataFrame
X_parents.loc[:, self.treatment_indices] *= treatment_importance
non_treatment_parents = X_parents.columns.drop(self.treatment_indices)
if not non_treatment_parents.empty:
X_parents.loc[:, non_treatment_parents] *= float((float(1 - treatment_importance) /
non_treatment_parents.size))
if outcome_type in {CATEGORICAL, CONTINUOUS}:
x_censor, beta = self.generate_covariate_col(X_parents=X_parents, link_type=link_type, snr=snr,
prob_category=prob_category, num_samples=X_parents.index.size,
var_name=var_name)
elif outcome_type == SURVIVAL:
x_signal, beta = self.generate_covariate_col(X_parents=X_parents, link_type=link_type, snr=snr,
prob_category=None, num_samples=X_parents.index.size,
var_name=var_name)
if survival_distribution == "expon":
# param = survival_baseline * (prob_category.iloc[0]/prob_category.loc[1]) * np.exp(x_signal) # Cox ph
param = survival_baseline * np.exp(x_signal) # Cox ph model
survival_distribution = stats.expon(loc=0.0, scale=(1.0 / param))
x_censor = pd.Series(survival_distribution.rvs(size=x_signal.size), index=x_signal.index)
# scale values with censoring proportions - 0 is non censored, 1 is censored:
x_censor *= (prob_category.iloc[0] / prob_category.loc[1])
elif survival_distribution == "logistic":
survival_distribution = stats.expon(loc=0.0, scale=(1.0 / survival_baseline))
if X_parents.empty: # censor variable is independent
probabilities = pd.Series(data=np.random.uniform(low=0, high=1, size=X_parents.index.size),
index=X_parents.index)
else:
x_signal, _ = self.generate_covariate_col(X_parents=X_parents, link_type=link_type, snr=snr,
prob_category=None, num_samples=X_parents.index.size)
t = x_signal.quantile(prob_category.iloc[1], interpolation="higher")
probabilities = 1.0 / (1 + np.exp(x_signal - np.repeat(t, x_signal.size)))
x_censor = survival_distribution.ppf(probabilities)
else:
raise ValueError("survival distribution: {0}, is not supported".format(survival_distribution))
else:
raise ValueError("Unsupported censoring mechanism for type of outcome: {0}".format(outcome_type))
return x_censor, beta
# ### TREATMENT GENERATION METHODS ### #
@staticmethod
def _treatment_random(x_continuous, prob_category):
"""
Assign treatment to samples completely at random.
Args:
x_continuous (pd.Series): Aggregated signal (a scalar per sample) based on the variable's predecessor
variables.
prob_category (pd.Series): Probability vector the size of number of treatment categories with every entry is
the corresponding probability of that category.
Returns:
(pd.DataFrame, pd.DataFrame): 2-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment for each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
"""
index_names = x_continuous.index
columns_names = prob_category.index
propensity = pd.DataFrame(data=np.tile(prob_category, (len(index_names), 1)),
index=index_names, columns=columns_names)
treatment = pd.Series(data=np.random.choice(a=prob_category.index, size=len(index_names), replace=True,
p=prob_category), index=index_names)
return propensity, treatment
@staticmethod
def _treatment_gaussian_dichotomous(x_continuous, prob_category, snr):
"""
Assign treatment to samples by sampling percentiles from a normal distribution
Args:
x_continuous (pd.Series): Aggregated signal (a scalar per sample) based on the variable's predecessor
variables.
prob_category (pd.Series): Probability vector the size of number of treatment categories with every entry is
the corresponding probability of that category.
snr (float): signal to noise ratio.
Returns:
(pd.DataFrame, pd.DataFrame): 2-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment for each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
Raises:
ValueError: If given more than to categories. This method supports dichotomous treatment only.
"""
if prob_category.size != 2: # this method suited for dichotomous outcome only
raise ValueError("logistic method supports only binary treatment. Got the distribution vector "
"{p_vec} of length {n_cat}".format(n_cat=prob_category.size, p_vec=prob_category))
index_names = x_continuous.index
columns_names = prob_category.index
propensity = pd.DataFrame(index=index_names, columns=columns_names)
# compute propensities:
t = stats.norm(loc=0, scale=1).ppf(prob_category.iloc[1]) # percentile given a distribution
cur_propensity = stats.norm(loc=x_continuous, scale=(1 - snr)).sf(t) # sf is 1 - CDF
# discretize values:
treatment = CausalSimulator3._discretize_col(x_continuous, prob_category)
propensity.loc[:, columns_names[1]] = cur_propensity
propensity.loc[:, columns_names[0]] = np.ones(cur_propensity.size) - cur_propensity
return propensity, treatment
@staticmethod
def _treatment_logistic_dichotomous(x_continuous, prob_category, params=None):
"""
Assign treatment to samples using a logistic model.
Args:
x_continuous (pd.Series): Aggregated signal (a scalar per sample) based on the variable's predecessor
variables.
prob_category (pd.Series): Probability vector the size of number of treatment categories with every entry is
the corresponding probability of that category.
params (dict | None): Parameters that will be used in the generation function, e.g. sigmoid slope.
Returns:
(pd.DataFrame, pd.DataFrame): 2-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment for each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
Raises:
ValueError: If given more than to categories. This method supports dichotomous treatment only.
"""
if prob_category.size != 2: # this method suited for dichotomous outcome only
raise ValueError("logistic method supports only binary treatment. Got the distribution vector "
"{p_vec} of length {n_cat}".format(n_cat=prob_category.size, p_vec=prob_category))
index_names = x_continuous.index
columns_names = prob_category.index
propensity = pd.DataFrame(index=index_names, columns=columns_names)
# compute propensities:
t = x_continuous.quantile(prob_category.iloc[1], interpolation="higher")
slope = params.get("slope", 1.0) if params is not None else 1.0
cur_propensity = 1.0 / (1 + np.exp(slope * (x_continuous - np.repeat(t, x_continuous.size))))
# assign the propensity values:
propensity.loc[:, columns_names[1]] = cur_propensity
propensity.loc[:, columns_names[0]] = np.ones(cur_propensity.size) - cur_propensity
treatment = CausalSimulator3._sample_from_row_stochastic_matrix(propensity)
return propensity, treatment
@staticmethod
def _treatment_odds_ratio(x_continuous, prob_category, snr):
"""
Assign treatment proportional to the odds ratio of the categories.
Each category is assigned with it's odds ratio independently (based on logistic function) and are later sampled
proportional to these odds ratio.
Args:
x_continuous (pd.Series): Aggregated signal (a scalar per sample) based on the variable's predecessor
variables.
prob_category (pd.Series): Probability vector the size of number of treatment categories with every entry is
the corresponding probability of that category.
snr (float) - signal to noise ratio.
Returns:
(pd.DataFrame, pd.DataFrame): 2-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment for each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
"""
index_names = x_continuous.index
columns_names = prob_category.index
propensity = pd.DataFrame(index=index_names, columns=columns_names)
# start with filling up the odds ratio:
for cur_category, p in prob_category.iteritems():
t = x_continuous.quantile(p, interpolation="higher")
cur_propensity = (1.0 / (1 + np.exp((x_continuous - np.repeat(t, x_continuous.size))))) # type: pd.Series
cur_propensity = cur_propensity.div(np.ones_like(cur_propensity) - cur_propensity)
cur_propensity += np.abs(np.random.normal(loc=0.0, scale=1 - snr, size=cur_propensity.size))
# cur_propensity += np.random.exponential(scale=np.sqrt(snr), size=cur_propensity.size)
propensity.loc[:, cur_category] = cur_propensity
# normalize into probabilities:
propensity = propensity.div(propensity.sum(axis="columns"), axis="rows")
# treatment assignment is drawn according to marginal propensities:
treatment = CausalSimulator3._sample_from_row_stochastic_matrix(propensity)
return propensity, treatment
@staticmethod
def _treatment_quantile_gauss_fit(x_continuous, prob_category, snr):
"""
Assign treatment by quantiling and shuffling.
The signal is divided into quantiles according to the given probability (proportions). A gaussian distribution
is fitted for each quantile. A score is calculated for each sample based on the pdf of the fitted gaussian.
The scores are then rescaled to function as propensities to that category, while the complement (one minus the
propensity) is distributed proportionally among the rest of the categories.
Args:
x_continuous (pd.Series): Aggregated signal (a scalar per sample) based on the variable's predecessor
variables.
prob_category (pd.Series): Probability vector the size of number of treatment categories with every entry is
the corresponding probability of that category.
snr(float): signal to noise ratio.
Returns:
(pd.DataFrame, pd.DataFrame): 2-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment for each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
"""
index_names = x_continuous.index
columns_names = prob_category.index
propensity = pd.DataFrame(index=index_names, columns=columns_names)
# section the signal into bins based on the probabilities (quantiles)
bins = pd.qcut(x=x_continuous, q=np.cumsum(pd.Series(0, index=["null"]).append(prob_category)),
labels=columns_names)
for cur_category in columns_names:
cur_samples_mask = (bins == cur_category)
cur_samples = x_continuous[cur_samples_mask]
fit_mu, fit_sigma = stats.norm.fit(cur_samples)
# fits.loc[cur_category, :] = {"mean": fit_mu, "var": fit_sigma}
cur_pdfs = cur_samples.apply(stats.norm(loc=fit_mu, scale=fit_sigma).pdf) # type:pd.Series
# rescale:
max_p = 1.0 - (1.0 - snr)
min_p = cur_pdfs.div(cur_pdfs.sum()).min()
cur_propensity = (max_p - min_p) * (cur_pdfs - cur_pdfs.min()) / \
(cur_pdfs.max() - cur_pdfs.min()) + min_p # type: pd.Series
# assign the propensity to the assigned category:
propensity.loc[cur_samples_mask, cur_category] = cur_propensity
# assign the propensity to the other, not assigned, categories:
left_over_ps = prob_category.drop(cur_category) # type: pd.Series
left_over_ps = left_over_ps.div(left_over_ps.sum())
not_propensity = pd.DataFrame(data=np.tile(np.ones_like(cur_propensity) - cur_propensity,
(left_over_ps.size, 1)).transpose(),
index=cur_propensity.index, columns=left_over_ps.index)
not_propensity = not_propensity.mul(left_over_ps)
propensity.loc[cur_samples_mask, left_over_ps.index] = not_propensity
# propensity = propensity.astype(np.float)
# treatment assignment is drawn according to marginal propensities:
treatment = CausalSimulator3._sample_from_row_stochastic_matrix(propensity)
return propensity, treatment
# ### HELPER FUNCTIONS ### #
@staticmethod
def _sample_from_row_stochastic_matrix(propensity):
"""
Given a row-stochastic matrix (DataFrame) sample one support from each row.
Args:
propensity (pd.DataFrame): A row-stochastic DataFrame (i.e. all rows sums to one and non negative).
Returns:
treatment (pd.Series): A vector (length of propensity.index) of the resulted sampling.
"""
categories_names = propensity.columns
prop_cdf = propensity.cumsum(axis="columns")
r = np.random.uniform(low=0, high=1, size=(propensity.index.size, 1))
categories = prop_cdf.le(np.tile(r, (1, categories_names.size))).sum(axis="columns")
treatment = | pd.Series(categories_names[categories].values, index=propensity.index) | pandas.Series |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
#%matplotlib inline
#check dependencies
from utils import pyreq
pyreq.require("matplotlib,pandas")
#gather command line arguments
import argparse
parser = argparse.ArgumentParser()
#data file specification parameters (change which files are loaded)
parser.add_argument('-path', type=str, metavar='PATH', default = '', help='path to files - default : none (will read files in current directory)', required=False)
parser.add_argument('-conditions', type=str, metavar=('CONDITION'), default = [''], help='names of condition directories - default: none (will use files in path directly)',nargs='+', required=False)
parser.add_argument('-files', type=str, metavar='FILE(s)', default = ['pop.csv'], help='file name(s) - default: pop.csv dominant.csv', nargs='+', required=False)
parser.add_argument('-repRange', type=int, metavar=('FIRST','LAST'), default = [1,0], help='replicate range - default: none (will use files in path directly)', nargs=2, required=False)
parser.add_argument('-repList', type=str, metavar='REP', default = [], help='replicate list. useful if you are missing a replicate. cannot be used with repRange - default: none (will use files in path directly)', nargs='+', required=False)
#data filtering parameters (change what data is displayed)
parser.add_argument('-data', type=str, metavar='COLUMN_NAME', default = [''], help='column names of data to be graphed. Can contain wildcards(*) but then arguments should be closed in single quotes(\'\')- default : none (will attempt to graph all columns from first file, and those columns in all other files)',nargs='+', required=False)
parser.add_argument('-dataFromFile', type=str, metavar='FILE_NAME', default = '', help='this file will be used to determine with column names of data will be graphed. If this file is not in files, then all data will be plotted - default : NONE', required=False)
parser.add_argument('-ignoreData', type=str, metavar='COLUMN_NAME', default = [''], help='column names of data to be ignored (this will override data). Can contain wildcards(*) but then arguments should be closed in single quotes(\'\')- default : none (will attempt to graph all columns from first file, and those columns in all other files)',nargs='+', required=False)
parser.add_argument('-whereValue', type=str, default = 'update', help='only plot data where this column has values defined by whereRange - default : update', required=False)
parser.add_argument('-whereRange', type=int, default = [], help='only plot data where column with name set by whereValue has values defined this range. Single value, just this value. Two values, inclusive range. Three values, inclusive range with step. - default : none', nargs='+', required=False)
parser.add_argument('-whereRangeLimitToData', action='store_true', default = False, help='set whereRange max based on rep with least data - default : OFF', required=False)
parser.add_argument('-lastOnly', action='store_true', default = False, help='shows only the last data point of all conditions - default (if not set) : OFF', required=False)
#data display parameters (change how data is displayed)
parser.add_argument('-xAxis', type=str, metavar='COLUMN_NAME', default = 'update', help='column name of data to be used on x axis - default : update', required=False)
parser.add_argument('-dataIndex', type=str, metavar='COLUMN_NAME', default = 'update', help='column name of data to be used as index when generating averages - default : update', required=False)
parser.add_argument('-yRange', type=float, default = [], help='if set, determines the range on the y axis; expects 2 values - default : none', nargs='+', required=False)
parser.add_argument('-xRange', type=float, default = [], help='if set, determines the range on the x axis; expects 2 values - default : none', nargs='+', required=False)
parser.add_argument('-pltWhat', type=str, metavar='{ave,std,sem,95conf,99conf,reps}',choices=('ave','std','sem','95conf','99conf','reps'), default = ['ave','95conf'], help='what should be ploted. ave (averages), std (Standard Deviation), sem (Standard Error from the Mean), 95conf (95 percent confidence intervals), 99conf (99 percent confidence intervals), reps (show data for all reps) - default : ave 95conf', nargs='+', required=False)
parser.add_argument('-integrate', type=str, default = [], metavar='_DATA', help='attempt to integrate associated data with _AVE data of same name (e.g. if "_VAR", plot "_VAR" with "_AVE" so "score_VAR" plots with "score_AVE"). Integrated data will not appear in its own plot. This will only work on single reps. -integrate will work with combine conditions (two or more reps can be loaded as conditions).', nargs='+', required=False)
parser.add_argument('-combineConditions', action='store_true', default = False, help='if ploting multiple conditions, adding this flag will combine data from files with same name - default (if not set) : OFF', required=False)
parser.add_argument('-combineData', action='store_true', default = False, help='if ploting multiple data lines, adding this flag will combine data into one plot - default (if not set) : OFF', required=False)
#plot parameters (changes aspects of the plot independent of data)
parser.add_argument('-title', type=str, default = 'NONE', help='title of image - default: none (MGraph will make something up)', required=False)
parser.add_argument('-conditionNames', type=str, metavar=('CONDITION_NAME'), default = [''], help='names to dispaly. must have same number of elements as conditions if defined - default: none (will use conditionNames)', nargs='+',required=False)
parser.add_argument('-imageSize', type=float, default = [10,10], help='size of image to be created - default : 10 10', nargs=2, required=False)
parser.add_argument('-pltStyle', type=str, choices=('line','point','randomLine','randomPoint'), default = 'line', help='plot style. Random is useful if plotting multiple data on the same plot - default : line', required=False)
parser.add_argument('-errorStyle', type=str, choices=('region','bar','barX','barXY'), default = 'region', help='how error is ploted - default : region', required=False)
parser.add_argument('-numCol', type=str, metavar='#', default = '3', help='if ploting a multi plot (default), how many columns in plot - default : 3', required=False)
parser.add_argument('-legendLocation', type=str, choices=('ur','ul','lr','ll','cr','cl','lc','uc','c','off'), default = 'lr', help='if legends are needed this is determins placement (first letter u = upper, c = center, l = lower. second letter l = left, c = center, r = right, off = off) - default : lr (lower right)', required=False)
parser.add_argument('-legendLineWeight', type=int, default = -1, help='changes line thickness in legend - default : lineWeight', required=False)
parser.add_argument('-lineWeight', type=int, default = 1, help='changes line thickness of lines in plots - default : 1', required=False)
parser.add_argument('-grid', action='store_true', default = False, help='if set, this flag cause a grid to be displayed on plots - default : OFF', required=False)
parser.add_argument('-fontSizeMajor', type=int, default = 15, help='size of "Major" fonts (main title) - default : 15', required=False)
parser.add_argument('-fontSizeMinor', type=int, default = 10, help='size of "Minor" fonts (subplot titles and lables) - default : 10', required=False)
parser.add_argument('-fontSizeTicks', type=int, default = 8, help='size of font for axis ticks - default : 8', required=False)
parser.add_argument('-fontSizeLegend', type=int, default = 8, help='size of font in legend - default : 8', required=False)
#utility parameters (does not change data or display)
parser.add_argument('-showDataNames', action='store_true', default = False, help='print the names of the columns in the first file listed - default : OFF', required=False)
parser.add_argument('-verbose', action='store_true', default = False, help='adding this flag will provide more text output while running (useful if you are working with a lot of data to make sure that you are not hanging) - default (if not set) : OFF', required=False)
parser.add_argument('-save', type=str, choices=('pdf','png'), default = '', help='save files rather then display as either pdf or png - default: none (display image)', required=False)
parser.add_argument('-saveName', type=str, default = '', help='if saveFile is png or pdf, and only one file is being created, use this for the file name. - default: none (mGraph will make up a name)', required=False)
args = parser.parse_args()
# check for invalid argument settings
if args.repRange != [1,0] and args.repList != []:
print ('Error in input. -repRange and -repList are mutually exclusive, please only define one!')
exit()
if args.saveName != "" and args.save == "png" and len(args.files) > 1:
print("\n\n-saveName was provided, but more then one image file will be created because more then input file was listed and -save is png.\n\n either save as type pdf (for a combined image file),\n or run mGraph for each input file\n or remove -saveName")
exit()
#continue with script...
if args.save != "":
from matplotlib import use
use('Agg')
# imports
from pandas import read_csv, concat
import pandas
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
from math import ceil
from fnmatch import fnmatchcase
def isolate_condition(df, con):
return df[df['con'] == con]
def add_error_bars(ErrorStyle, x_axis_values, aveLine, errorLineY, PltColor):
if (ErrorStyle == 'bar'): plt.errorbar(x_axis_values, aveLine, yerr=errorLineY, color=PltColor, alpha=0.5, fmt='.')
if (ErrorStyle == 'barX'): plt.errorbar(x_axis_values, aveLine, xerr=x_axis_values, color=PltColor, alpha=0.5, fmt='.')
if (ErrorStyle == 'barXY'): plt.errorbar(x_axis_values, aveLine, xerr=x_axis_values, yerr=errorLineY, color=PltColor, alpha=0.5, fmt='.')
if (ErrorStyle == 'region'): plt.fill_between(x_axis_values, aveLine-errorLineY, aveLine+errorLineY, color=PltColor, alpha=0.15)
def MultiPlot(data, NamesList, ConditionsList, dataIndex, CombineData = False, PltWhat = ['ave','95conf'], PltStyle = 'line', ErrorStyle = 'region', Reps = [''], XCoordinateName = '', Columns = 3, title = '', legendLocation = "lower right", xRange = [], yRange = [], integrateNames = [], imageSize = [10,10]):
global args
MajorFontSize = args.fontSizeMajor
MinorFontSize = args.fontSizeMinor
TickFontSize = args.fontSizeTicks
LegendFontSize = args.fontSizeLegend
colorMap = cm.gist_rainbow
styleListPoint = ['o','*','s','D','^','.']
styleListLine = ['-']
styleListRandomLine = ['-^','-.','-o','-*','-s','-D']
styleListRandomPoint = ['^','.','o','*','s','D']
PltColor = (0,0,0)
if PltStyle == 'line':
styleList = styleListLine
PltStyle = '-'
if PltStyle == 'point':
styleList = styleListPoint
PltStyle = 'o'
if PltStyle == 'randomLine':
styleList = styleListRandomLine
PltStyle = '-'
if PltStyle == 'randomPoint':
styleList = styleListRandomPoint
PltStyle = 'o'
while len(ConditionsList)*len(NamesList) > len(styleList): #HACK len(ConditionsList)*len(NamesList) is not a proven upper bound
styleList = styleList + styleList
fig = plt.figure(figsize=(imageSize[0],imageSize[1])) # create a new figure
fig.subplots_adjust(hspace=.35)
if XCoordinateName in NamesList:
NamesList.remove(XCoordinateName)
if args.verbose:
print('removing xAxis column: ',XCoordinateName,' from list of columns to be plotted.',flush=True)
if dataIndex in NamesList:
NamesList.remove(dataIndex)
if args.verbose:
print('removing dataAxis column: ',dataIndex,' from list of columns to be plotted.',flush=True)
if args.lastOnly:
title += ' x axis = conditions'
else:
title += ' x axis = ' + XCoordinateName
if (args.title != 'NONE'):
title = args.title
plt.suptitle(title, fontsize=MajorFontSize, fontweight='bold')
allNamesList = NamesList
for integrateName in integrateNames: # remove all integrateName columns
NamesList = [x for x in NamesList if not integrateName in x]
if len(NamesList) == 1:
Columns = 1
if (len(NamesList) == 2) and (int(Columns) > 2):
Columns = 2
Rows = ceil(float(len(NamesList))/float(Columns)) # calculate how many rows we need
for conditionCount, con in enumerate(ConditionsList):
df_cond = isolate_condition(data, con).groupby(dataIndex)
### this is a hack. df_cond.mean() removes any columns that can not be averaged (i.e. lists, strings, etc...)
### so, we will run it all the time. That way, invalid columns will be removed.
if True:#any([x in PltWhat for x in ['ave', 'std', 'sem', '95conf', '99conf']]):
df_mean = df_cond.mean()
if 'std' in PltWhat:
df_std = df_cond.std()
if any([x in PltWhat for x in ['sem', '95conf', '99conf']]):
df_sem = df_cond.sem()
x_axis_values = isolate_condition(data, con).pivot(index = dataIndex, columns ='repName', values = XCoordinateName).mean(axis=1) #HACK needs to be optimized
for nameCount, name in enumerate(NamesList):
ThisLabel= ''
if not CombineData:
ThisLabel = con
ax = plt.subplot(Rows, Columns, nameCount + 1)#BUG , label=ThisLabel) #label ensures unique axes are created
plt.title(name, fontsize=MinorFontSize) # set the title for this plot
ax.title.set_position([.5, 1])
if (len(ConditionsList) > 1):
PltStyle = styleList[conditionCount]
PltColor = colorMap(conditionCount/len(ConditionsList)) #styleListColor[conditionCount]
elif len(ConditionsList) > 1 or len(NamesList) > 1:
PltStyle = styleList[conditionCount + (nameCount * len(ConditionsList))]
PltColor = colorMap((conditionCount + (nameCount * len(ConditionsList)))/(len(ConditionsList)+len(NamesList))) #styleListColor[conditionCount + (nameCount * len(ConditionsList))]
if (len(ConditionsList) == 1):
ThisLabel = name
else:
ThisLabel = con + ' ' + name
if args.grid:
plt.grid(b=True, which='major', color=(0,0,0), linestyle='-', alpha = .25)
if not name in df_mean.columns.values:
if name == dataIndex:
print('warrning: it appears you are attempting to plot ',name,' which is the data index. This is not allowed. Actually if you fix this I will give you $10.',flush=True)
if not CombineData:
plt.title(name+'\n(invalid, dataIndex...\nsee command line output)', fontsize=MinorFontSize) # set the title for this plot
else:
print('warrning: it appears that ',name,' is non-numeric (perhaps a list) so its values are not being plotted.',flush=True)
if not CombineData:
plt.title(name+' (INVALID DATA FORMAT)', fontsize=MinorFontSize) # set the title for this plot
else:
if args.lastOnly:
quantity = df_mean.loc[:, name].tail(1).iloc[0]
# quantity = quantity.iloc[0]
if 'std' in PltWhat:
quantityErr = df_std.loc[:, name].tail(1)
plt.bar([conditionCount], [quantity], yerr=quantityErr) #TODO should allow sem, 95conf, 99conf
else:
plt.bar([conditionCount], [quantity])
else:
if 'reps' in PltWhat:
firstRep = 1
for Rep in Reps:
if firstRep == 1:
firstRep = 0
plt.plot(data[data["repName"] == Rep][data["con"] == con][XCoordinateName], data[data["repName"] == Rep][data["con"] == con][name], PltStyle, alpha = .25, color = PltColor, label = ThisLabel + "_rep")
else:
plt.plot(data[data["repName"] == Rep][data["con"] == con][XCoordinateName], data[data["repName"] == Rep][data["con"] == con][name], PltStyle, alpha = .25, color = PltColor, label = '_nolegend_')
# any plot that is dependant on the average line must trigger this 'if' statment
if any([x in PltWhat for x in ['ave', 'std', 'sem', '95conf', '99conf']]):
aveLine = df_mean.loc[:, name]
for integrateName in integrateNames: # remove all integrateName columns
VARNAME = name[0:-4]+integrateName
if VARNAME in allNamesList:
if args.verbose: print(' '+VARNAME+' found, adding to plot for data: ' + name + ' condition: ' + con,flush=True)
errorLineY = df_mean.loc[:, VARNAME]
plt.fill_between(x_axis_values, aveLine - errorLineY,aveLine + errorLineY, color = PltColor, alpha = .15)
if 'std' in PltWhat:
errorLineY = df_std.loc[:, name]
add_error_bars(ErrorStyle, x_axis_values, aveLine, errorLineY, PltColor)
if ('ave' in PltWhat):
plt.plot(x_axis_values, aveLine, PltStyle, markersize = 10, color = PltColor, linewidth = args.lineWeight, label = ThisLabel)
if ('sem' in PltWhat):
errorLineY = df_sem.loc[:, name]
add_error_bars(ErrorStyle, x_axis_values, aveLine, errorLineY, PltColor)
if ('95conf' in PltWhat):
errorLineY = df_sem.loc[:, name].multiply(1.96)
add_error_bars(ErrorStyle, x_axis_values, aveLine, errorLineY, PltColor)
if ('99conf' in PltWhat):
errorLineY = df_sem.loc[:, name].multiply(2.58)
add_error_bars(ErrorStyle, x_axis_values, aveLine, errorLineY, PltColor)
if ((len(ConditionsList) > 1) or (CombineData))and legendLocation != '':
if args.lastOnly:
plt.xlabel('Conditions', fontsize=MinorFontSize)
else:
plt.xlabel(XCoordinateName, fontsize=MinorFontSize)
leg = plt.legend(fontsize=LegendFontSize,loc=legendLocation) # add a legend
if (args.legendLineWeight > 0):
for legobj in leg.legendHandles:
legobj.set_linewidth(args.legendLineWeight)
if args.lastOnly: ## combineConditions
plt.xticks(range(len(ConditionsList)), ConditionsList, rotation=45, ha='right')
else:
plt.ticklabel_format(useOffset=False, style='plain')
plt.tick_params(labelsize=TickFontSize)
if len(xRange) == 2:
plt.xlim(xRange[0],xRange[1])
if len(yRange) == 2:
plt.ylim(yRange[0],yRange[1])
return plt.gcf() # gcf = get current figure - return that.
def get_rep_list(args):
rangeStart = args.repRange[0]
rangeEnd = args.repRange[1]
if args.repList:
reps = args.repList
else:
reps = range(rangeStart, rangeEnd + 1)
if not reps:
reps = ['']
else:
reps = [str(i) + '/' for i in reps]
return reps
def get_con_names(args):
folder_names = args.conditions
if folder_names != ['']:
for i, folder_name in enumerate(folder_names):
if folder_name[-1] is not '/':
folder_names[i] += '/'
if args.conditionNames == ['']:
user_names = [name[:-1] for name in folder_names]
else:
user_names = args.conditionNames
if (len(user_names) != len(folder_names)):
print ('Error in input. -conditions and -conditionNames must have the same number of arguments')
exit()
return folder_names, user_names
if args.dataFromFile == '':
args.dataFromFile = args.files[0]
def get_data_names(args, condition_folder_names, replicates):
exemplar_path = args.path + condition_folder_names[0] + replicates[0] + args.dataFromFile
if args.verbose: print('getting column names from',exemplar_path,flush=True)
with open(exemplar_path, 'r') as fileReader:
names_from_file = fileReader.readline().strip().split(",")
if args.showDataNames:
print('showing data column names:')
print(*names_from_file, sep=",")
exit()
namesList = []
if args.data != ['']:
user_names = args.data
for u_name in user_names:
if '*' in u_name or '[' in u_name or ']' in u_name:
if args.verbose: print("found column name with wildcard: " + u_name,flush=True)
for f_name in names_from_file:
if fnmatchcase(f_name,u_name):
if args.verbose: print(" ... found match, adding " + f_name + " to data.",flush=True)
namesList.append(f_name)
else:
namesList.append(u_name)
else:
namesList = names_from_file
if args.xAxis in namesList:
namesList.remove(args.xAxis)
for u_name in args.ignoreData:
if '*' in u_name or '[' in u_name or ']' in u_name:
if args.verbose: print("found ignore data with wildcard: " + u_name,flush=True)
for f_name in namesList:
if fnmatchcase(f_name,u_name):
if args.verbose: print(" ... found match, removing " + f_name + ".",flush=True)
namesList.remove(f_name)
return namesList
def find_alternate_data_names(search_from, match_to):
alternate_names = []
for name in match_to:
if name in search_from:
alternate_names.append(name)
else:
if args.verbose: print(" can't find: '" + name + "'",flush=True)
if name[-4:]=="_AVE":
short_name = name[0:-4]
if short_name in search_from:
if args.verbose: print(" but I did find: '" + short_name + "'",flush=True)
alternate_names.append(short_name)
return alternate_names
def load_data(args, condition_folder_names, condition_user_names, replicates, file_names, data_names):
df_dict = {}
updateMin = 'undefined'
for f in file_names:
df_dict[f] = []
alt_names = []
for c_f, c_u in zip(condition_folder_names, condition_user_names):
for r in replicates:
complete_path = args.path + c_f + r + f
if args.verbose: print ("loading file: " + complete_path,flush=True)
df_all = read_csv(complete_path)
last_x_value = df_all[args.xAxis].iat[-1]
if updateMin == 'undefined':
updateMin = last_x_value
if (last_x_value < updateMin):
updateMin = last_x_value
if args.verbose: print(c_u + " " + r + " has data until: " + str(last_x_value) + " new shortest!",flush=True)
if args.xAxis == args.dataIndex:
extraColumns = [args.dataIndex]
else:
extraColumns = [args.xAxis]+[args.dataIndex]
if f == args.dataFromFile:
df_keep = df_all[list(set([data_name for data_name in data_names]+extraColumns))]
else:
if not alt_names:
alt_names = find_alternate_data_names(df_all.columns,data_names)
df_keep = pandas.DataFrame(df_all[list(set([alt_name for alt_name in alt_names]+extraColumns))])
df_keep = pandas.DataFrame(df_keep)
df_keep["repName"] = r
df_keep["con"] = c_u
df_dict[f].append(df_keep)
return {f: | concat([df for df in df_dict[f]], ignore_index=True) | pandas.concat |
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self, using_array_manager):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
cols = right.columns[[1, 2, 3, 5]]
right[cols] = right[cols].astype(df["C"].dtype)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index4(self):
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) MultiIndex bug
def test_unstack_nan_index5(self):
# GH9497 - multiple unstack with nulls
df = DataFrame(
{
"1st": [1, 2, 1, 2, 1, 2],
"2nd": date_range("2014-02-01", periods=6, freq="D"),
"jim": 100 + np.arange(6),
"joe": (np.random.randn(6) * 10).round(2),
}
)
df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")
df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan
df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan
left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])
assert left.notna().values.sum() == 2 * len(df)
for col in ["jim", "joe"]:
for _, r in df.iterrows():
key = r["1st"], (col, r["2nd"], r["3rd"])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)])
ecols = MultiIndex.from_tuples([(t, "A")])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
tm.assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(
np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex,
)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(
df.columns.to_numpy(), names=df.columns.names
)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
for multiindex_columns in (
[0, 1, 2, 3, 4],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 1],
[0, 2],
[0, 3],
[0],
[2],
[4],
):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame(
[[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[[0, 1], ["u", "x", "y", "z"]],
codes=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, "Lower"],
),
columns=Index(["B", "C"], name="Upper"),
dtype=df.dtypes[0],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("labels", [list("yxz"), list("yxy")])
def test_stack_preserve_categorical_dtype(self, ordered, labels):
# GH13854
cidx = pd.CategoricalIndex(labels, categories=list("xyz"), ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"labels,data",
[
(list("xyz"), [10, 11, 12, 13, 14, 15]),
(list("zyx"), [14, 15, 12, 13, 10, 11]),
],
)
def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data):
# GH-36991
cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)
cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)
midx = MultiIndex.from_product([cidx, cidx2])
df = DataFrame([sorted(data)], columns=midx)
result = df.stack([0, 1])
s_cidx = pd.CategoricalIndex(sorted(labels), ordered=ordered)
expected = Series(data, index=MultiIndex.from_product([[0], s_cidx, cidx2]))
tm.assert_series_equal(result, expected)
def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(["a", "a", "b", "c"])
df = DataFrame({"A": cat, "B": cat})
result = df.stack()
index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])
expected = Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, columns",
[
([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
],
)
def test_stack_multi_columns_non_unique_index(self, index, columns):
# GH-28301
df = DataFrame(index=index, columns=columns).fillna(1)
stacked = df.stack()
new_index = MultiIndex.from_tuples(stacked.index.to_numpy())
expected = DataFrame(
stacked.to_numpy(), index=new_index, columns=stacked.columns
)
tm.assert_frame_equal(stacked, expected)
stacked_codes = np.asarray(stacked.index.codes)
expected_codes = np.asarray(new_index.codes)
tm.assert_numpy_array_equal(stacked_codes, expected_codes)
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"])
df = DataFrame(
{
"A": pd.array([0, 1, None], dtype="Int64"),
"B": pd.Categorical(["a", "a", "b"]),
},
index=index,
)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
expected_dtypes = Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, "baz"])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"])
df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])
df.columns.name = "foo"
expected = DataFrame(
[[3, 1, 2, 0]],
columns=MultiIndex.from_tuples(
[("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]
),
)
expected.index.name = "bar"
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = Series(["a", "b", "c", "a"], dtype="object")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value="d")
expected = DataFrame(
{"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = DataFrame(
{
"timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],
"a": ["a"],
"b": ["b"],
"c": ["c"],
},
columns=["timestamp", "a", "b", "c"],
)
result = df.set_index(["a", "b"]).unstack()
expected = DataFrame(
[[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],
index=Index(["a"], name="a"),
columns=MultiIndex(
levels=[["timestamp", "c"], ["b"]],
codes=[[0, 1], [0, 0]],
names=[None, "b"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_timezone_aware_values():
# GH 19420
ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York")
df = DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack()
expected = Series(
ts,
index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_stack_empty_frame(dropna):
# GH 36113
expected = Series(index=MultiIndex([[], []], [[], []]), dtype=np.float64)
result = DataFrame(dtype=np.float64).stack(dropna=dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_stack_unstack_empty_frame(dropna, fill_value):
# GH 36113
result = (
DataFrame(dtype=np.int64).stack(dropna=dropna).unstack(fill_value=fill_value)
)
expected = DataFrame(dtype=np.int64)
tm.assert_frame_equal(result, expected)
def test_unstack_single_index_series():
# GH 36113
msg = r"index must be a MultiIndex to unstack.*"
with pytest.raises(ValueError, match=msg):
Series(dtype=np.int64).unstack()
def test_unstacking_multi_index_df():
# see gh-30740
df = DataFrame(
{
"name": ["Alice", "Bob"],
"score": [9.5, 8],
"employed": [False, True],
"kids": [0, 0],
"gender": ["female", "male"],
}
)
df = df.set_index(["name", "employed", "kids", "gender"])
df = df.unstack(["gender"], fill_value=0)
expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0)
result = df.unstack(["employed", "kids"], fill_value=0)
expected = DataFrame(
[[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]],
index=Index(["Alice", "Bob"], name="name"),
columns=MultiIndex.from_tuples(
[
("score", "female", False, 0),
("score", "female", True, 0),
("score", "male", False, 0),
("score", "male", True, 0),
],
names=[None, "gender", "employed", "kids"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_positional_level_duplicate_column_names():
# https://github.com/pandas-dev/pandas/issues/36353
columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"])
df = DataFrame([[1, 1, 1, 1]], columns=columns)
result = df.stack(0)
new_columns = Index(["y", "z"], name="a")
new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"])
expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns)
tm.assert_frame_equal(result, expected)
class TestStackUnstackMultiLevel:
def test_unstack(self, multiindex_year_month_day_dataframe_random_data):
# just check that it works for now
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack()
unstacked.unstack()
# test that ints work
ymd.astype(int).unstack()
# test that int32 work
ymd.astype(np.int32).unstack()
@pytest.mark.parametrize(
"result_rows,result_columns,index_product,expected_row",
[
(
[[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]],
["ix1", "ix2", "col1", "col2", "col3", "col4"],
2,
[None, None, 30.0, None],
),
(
[[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
2,
[None, None, 30.0],
),
(
[[1, 1, None, None, 30.0], [2, None, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
None,
[None, None, 30.0],
),
],
)
def test_unstack_partial(
self, result_rows, result_columns, index_product, expected_row
):
# check for regressions on this issue:
# https://github.com/pandas-dev/pandas/issues/19351
# make sure DataFrame.unstack() works when its run on a subset of the DataFrame
# and the Index levels contain values that are not present in the subset
result = DataFrame(result_rows, columns=result_columns).set_index(
["ix1", "ix2"]
)
result = result.iloc[1:2].unstack("ix2")
expected = DataFrame(
[expected_row],
columns=MultiIndex.from_product(
[result_columns[2:], [index_product]], names=[None, "ix2"]
),
index=Index([2], name="ix1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples(
[(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)]
)
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
def test_stack(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
# regular roundtrip
unstacked = ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
unlexsorted = ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
# columns unsorted
unstacked = ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
# more than 2 levels in the columns
unstacked = ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = ymd.unstack(0).stack(-2)
expected = ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert left.index.is_unique is False
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(
np.arange(12).reshape(4, 3),
index=list("abab"),
columns=["1st", "2nd", "3rd"],
)
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd", "3rd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile(np.arange(3), 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ["1st", "2nd", "1st"]
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ("a", 2), ("b", 1), ("a", 1), ("b", 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(
levels=[["a", "b"], [1, 2], ["1st", "2nd"]],
codes=[
np.tile(np.arange(2).repeat(3), 2),
np.repeat([1, 0, 1], [3, 6, 3]),
np.tile([0, 1, 0], 4),
],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thu,Dinner,No,3.0,1
Thu,Lunch,No,117.32,44
Thu,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(["day", "time", "smoker"])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T
df["foo", "four"] = "foo"
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df["foo"].stack().sort_index()
tm.assert_series_equal(stacked["foo"], result, check_names=False)
assert result.name is None
assert stacked["bar"].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame(
{
"state": ["naive", "naive", "naive", "active", "active", "active"],
"exp": ["a", "b", "b", "b", "a", "a"],
"barcode": [1, 2, 3, 4, 1, 3],
"v": ["hi", "hi", "bye", "bye", "bye", "peace"],
"extra": np.arange(6.0),
}
)
result = df.groupby(["state", "exp", "barcode", "v"]).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack()
assert unstacked.index.name == "first"
assert unstacked.columns.names == ["exp", "second"]
restacked = unstacked.stack()
assert restacked.index.names == frame.index.names
@pytest.mark.parametrize("method", ["stack", "unstack"])
def test_stack_unstack_wrong_level_name(
self, method, multiindex_dataframe_random_data
):
# GH 18303 - wrong level name should raise
frame = multiindex_dataframe_random_data
# A DataFrame with flat axes:
df = frame.loc["foo"]
with pytest.raises(KeyError, match="does not match index name"):
getattr(df, method)("mistake")
if method == "unstack":
# Same on a Series:
s = df.iloc[:, 0]
with pytest.raises(KeyError, match="does not match index name"):
getattr(s, method)("mistake")
def test_unstack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.unstack("second")
expected = frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack("second")
result = unstacked.stack("exp")
expected = frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = frame.stack("exp")
expected = frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
expected = ymd.unstack("year").unstack("month")
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = ymd["A"]
s_unstacked = s.unstack(["year", "month"])
tm.assert_frame_equal(s_unstacked, expected["A"])
restacked = unstacked.stack(["year", "month"])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
| tm.assert_frame_equal(restacked, ymd) | pandas._testing.assert_frame_equal |
import pandas as pd
from pandas.io.json import json_normalize
import json
from SPARQLWrapper import SPARQLWrapper , JSON
def getLabel(id):
query = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX wd: <http://www.wikidata.org/entity/> SELECT ?label WHERE { wd:"+id+" rdfs:label ?label . FILTER (langMatches( lang(?label), \"EN\" ) ) } LIMIT 1"
sparql = SPARQLWrapper("http://query.wikidata.org/sparql", agent = 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36')
sparql.setQuery(query)
try:
sparql.setReturnFormat(JSON)
uri = sparql.query().convert()['results']['bindings'][0]['label']['value']
return uri
except Exception as e:
print("type error: " + str(e))
dataset_file = open('Dataset_test.json','r')
dataset_decode = json.load(dataset_file)
tab = | json_normalize(dataset_decode) | pandas.io.json.json_normalize |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 5 18:05:11 2019
@author: colin
"""
import pandas as pd
import matplotlib.pyplot as plt
import math
def least_squares(data):
'''Calculate the least squares (linear) regression for a data set
the data should be a single list containing two sublists, the first sublist
should be the x data and the second the y data'''
x_sum = 0
y_sum = 0
x_sq_sum = 0
xy_sum = 0
# the list of data should have two equal length columns
assert len(data[0]) == len(data[1])
assert len(data) == 2
n = len(data[0])
# least squares regression calculation
for i in range(0, n):
x = data[0][i]
y = data[1][i]
x_sum = x_sum + x
y_sum = y_sum + y
x_sq_sum = x_sq_sum + (x**2)
xy_sum = xy_sum + (x*y)
m = ((n * xy_sum) - (x_sum * y_sum))
m = m / ((n * x_sq_sum) - (x_sum ** 2))
c = (y_sum - m * x_sum) / n
print("Results of linear regression:")
print("x_sum=", x_sum, "y_sum=", y_sum, "x_sq_sum=", x_sq_sum, "xy_sum=",
xy_sum)
print("m=", m, "c=", c)
return m, c
def measure_error(data1, data2):
'''Measure the RMS error between data1 and data2'''
assert len(data1) == len(data2)
err_total = 0
for i in range(0, len(data1)):
err_total = err_total + (data1[i] - data2[i]) ** 2
err = math.sqrt(err_total / len(data1))
return err
def make_graph(x_data, y_data, y_model):
# rotate the x labels to fit better
plt.xticks(rotation=90)
# calculate the minimum and maximum life expectancy
# floor rounds down, ceil rounds up
min_y = math.floor(min(y_data))
max_y = math.ceil(max(y_data))
# evenly space y axis, interval of 1, between the min and max life exp
# set to 1000 when using linearised data
# logarthmic version
# yticks = list(range(0, max_y, 1))
# linearised version
yticks = list(range(0, max_y, 5000))
labels = []
for y in yticks:
# comment out if using linearisd data
# labels.append(int(math.exp(y)))
# uncomment if using linearised data
labels.append(int(y))
print(yticks, labels)
plt.yticks(yticks, labels)
plt.xlim(min(x_data), max(x_data))
# pad by 2% of the total range underneath to make the graph clearer
# plt.ylim(min_y-((max_y-min_y)*0.02), max_y + ((max_y-min_y)*0.02))
# sometimes 0 or a small negative value instead of min_y looks better
# label axes
plt.ylabel("GDP (US $)")
plt.xlabel("Life Expectancy")
# draw the line
plt.scatter(x_data, y_data, label="Original Data")
plt.plot(x_data, y_model, c='orange', label="Line of best fit")
plt.grid()
# enable the legend
plt.legend()
# draw the graph
plt.show()
def read_data(gdp_file, life_expectancy_file, year):
df_gdp = | pd.read_csv(gdp_file, index_col="Country Name") | pandas.read_csv |
import numpy as np
import os
import csv
import requests
import pandas as pd
import time
import datetime
from stockstats import StockDataFrame as Sdf
from ta import add_all_ta_features
from ta.utils import dropna
from ta import add_all_ta_features
from ta.utils import dropna
from config import config
def load_dataset(*, file_name: str) -> pd.DataFrame:
"""
load csv dataset from path
:return: (df) pandas dataframe
"""
#_data = pd.read_csv(f"{config.DATASET_DIR}/{file_name}")
_data = pd.read_csv(file_name)
return _data
def data_split(df,start,end):
"""
split the dataset into training or testing using date
:param data: (df) pandas dataframe, start, end
:return: (df) pandas dataframe
"""
data = df[(df.datadate >= start) & (df.datadate < end)]
data=data.sort_values(['datadate','tic'],ignore_index=True)
#data = data[final_columns]
data.index = data.datadate.factorize()[0]
return data
def calcualte_price(df):
"""
calcualte adjusted close price, open-high-low price and volume
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
data = df.copy()
data = data[['datadate', 'tic', 'prccd', 'ajexdi', 'prcod', 'prchd', 'prcld', 'cshtrd']]
data['ajexdi'] = data['ajexdi'].apply(lambda x: 1 if x == 0 else x)
data['adjcp'] = data['prccd'] / data['ajexdi']
data['open'] = data['prcod'] / data['ajexdi']
data['high'] = data['prchd'] / data['ajexdi']
data['low'] = data['prcld'] / data['ajexdi']
data['volume'] = data['cshtrd']
data = data[['datadate', 'tic', 'adjcp', 'open', 'high', 'low', 'volume']]
data = data.sort_values(['tic', 'datadate'], ignore_index=True)
return data
def add_technical_indicator(df):
"""
calcualte technical indicators
use stockstats package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
stock = Sdf.retype(df.copy())
stock['close'] = stock['adjcp']
unique_ticker = stock.tic.unique()
macd = pd.DataFrame()
rsi = | pd.DataFrame() | pandas.DataFrame |
# -------------------------------------------------- ML 02/10/2019 ----------------------------------------------------#
#
# here we have models. poly fit and loess and other models that are being used to estimate lambda
# -------------------------------------------------------------------------------------------------------------------- #
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
import numpy as np
import math
from scipy.optimize import newton
from scipy.special import digamma
import pandas as pd
from sklearn.metrics.scorer import make_scorer
def cude_error(y_true, y_pred):
# sum_cube_abs = np.mean(np.exp(np.abs(y_true - y_pred)))
sum_cube_abs = np.abs((y_true - y_pred)**2).mean()
return sum_cube_abs
my_scorer = make_scorer(cude_error, greater_is_better=False)
def best_poly_fit(y,x,max_deg,verbose=1, scoring = 'neg_root_mean_squared_error'):
# reshaping the input and output variables to have appropriate shape
y = y.reshape(-1, 1)
try:
print('Number of features:', x.shape[1])
except:
x = x.reshape(-1, 1)
if scoring == 'my_scorer':
scoring = my_scorer
# this preprocesses the data
# numeric_features = ['age', 'fare']
# numeric_transformer = Pipeline(steps=[
# ('imputer', SimpleImputer(strategy='median')),
# ('scaler', StandardScaler())])
# categorical_features = ['embarked', 'sex', 'pclass']
# categorical_transformer = Pipeline(steps=[
# ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
# ('onehot', OneHotEncoder(handle_unknown='ignore'))])
# preprocessor = ColumnTransformer(
# transformers=[
# ('num', numeric_transformer)
# #('cat', categorical_transformer)
# ])
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree), LinearRegression(**kwargs))
# clf = Pipeline(steps=[#('preprocessor', preprocessor),
# ('linearregression', PolynomialRegression())])
#
# param_grid = {#'preprocessor__num__imputer__strategy': ['mean'],
# 'linearregression__polynomialfeatures__degree': np.arange(10),
# 'linearregression__fit_intercept': [True, False],
# 'linearregression__normalize': [True, False]}
param_grid = {'polynomialfeatures__degree': np.arange(max_deg)}
if verbose==0: verbose = 1
poly_grid = GridSearchCV(PolynomialRegression(), param_grid,
cv=10,
scoring=scoring,
verbose=verbose-1)
# doing grid search
poly_grid.fit(x,y)
# fit on the best parameters
poly_grid.best_estimator_.fit(x,y)
pred = poly_grid.predict(x)
var = np.var(pred-y)
poly_grid._total_var = var
poly_grid._total_std = np.sqrt(var)
return poly_grid
def get_gpr_fit(y,x):
# reshaping the input and output variables to have appropriate shape
y = y.reshape(-1, 1)
try:
print('Number of features:', x.shape[1])
except:
x = x.reshape(-1, 1)
gpr_fit = GaussianProcessRegressor(random_state = 0).fit(x, y)
# print(gpr_fit.score(x, y))
# aaaa = gpr_fit.predict(x, return_std=True)
return gpr_fit
def loc_eval(x, b):
"""
Evaluate `x` using locally-weighted regression parameters.
Degree of polynomial used in loess is inferred from b. `x`
is assumed to be a scalar.
"""
loc_est = 0
for i in enumerate(b): loc_est+=i[1]*(x**i[0])
return(loc_est)
def loess_fit(xvals, yvals, alpha, poly_degree=1, robustify=False):
"""
link - http://www.jtrive.com/loess-nonparametric-scatterplot-smoothing-in-python.html#Footnotes:
Perform locally-weighted regression via xvals & yvals.
Variables used within `loess` function:
n => number of data points in xvals
m => nbr of LOESS evaluation points
q => number of data points used for each
locally-weighted regression
v => x-value locations for evaluating LOESS
locsDF => contains local regression details for each
location v
evalDF => contains actual LOESS output for each v
X => n-by-(poly_degree+1) design matrix
W => n-by-n diagonal weight matrix for each
local regression
y => yvals
b => local regression coefficient estimates.
b = `(X^T*W*X)^-1*X^T*W*y`. Note that `@`
replaces np.dot in recent numpy versions.
local_est => response for local regression
"""
# sort dataset by xvals:
all_data = sorted(zip(xvals, yvals), key=lambda x: x[0])
xvals, yvals = zip(*all_data)
locsDF = pd.DataFrame(
columns=[
'loc','x','weights','v','y','raw_dists',
'scale_factor','scaled_dists'
])
evalDF = pd.DataFrame(
columns=[
'loc','est','b','v','g'
])
n = len(xvals)
m = n + 1
q = int(np.floor(n * alpha) if alpha <= 1.0 else n)
avg_interval = ((max(xvals)-min(xvals))/len(xvals))
v_lb = max(0,min(xvals)-(.5*avg_interval))
v_ub = (max(xvals)+(.5*avg_interval))
v = enumerate(np.linspace(start=v_lb, stop=v_ub, num=m), start=1)
# Generate design matrix based on poly_degree.
xcols = [np.ones_like(xvals)]
for j in range(1, (poly_degree + 1)):
xcols.append([i ** j for i in xvals])
X = np.vstack(xcols).T
for i in v:
iterpos = i[0]
iterval = i[1]
# Determine q-nearest xvals to iterval.
iterdists = sorted([(j, np.abs(j-iterval)) \
for j in xvals], key=lambda x: x[1])
_, raw_dists = zip(*iterdists)
# Scale local observations by qth-nearest raw_dist.
scale_fact = raw_dists[q-1]
scaled_dists = [(j[0],(j[1]/scale_fact)) for j in iterdists]
weights = [(j[0],((1-np.abs(j[1]**3))**3 \
if j[1]<=1 else 0)) for j in scaled_dists]
# Remove xvals from each tuple:
_, weights = zip(*sorted(weights, key=lambda x: x[0]))
_, raw_dists = zip(*sorted(iterdists, key=lambda x: x[0]))
_, scaled_dists = zip(*sorted(scaled_dists,key=lambda x: x[0]))
iterDF1 = pd.DataFrame({
'loc' :iterpos,
'x' :xvals,
'v' :iterval,
'weights' :weights,
'y' :yvals,
'raw_dists' :raw_dists,
'scale_fact' :scale_fact,
'scaled_dists':scaled_dists
})
locsDF = pd.concat([locsDF, iterDF1])
W = np.diag(weights)
y = yvals
b = np.linalg.inv(X.T @ W @ X) @ (X.T @ W @ y)
local_est = loc_eval(iterval, b)
try:
actual_y = y[iterpos]
diff = local_est-actual_y
except:
diff = 0
iterDF2 = pd.DataFrame({
'loc':[iterpos],
'b' :[b],
'v' :[iterval],
'g' :[local_est],
'diff':[diff]
})
evalDF = pd.concat([evalDF, iterDF2])
# Reset indicies for returned DataFrames.
locsDF.reset_index(inplace=True)
locsDF.drop('index', axis=1, inplace=True)
locsDF['est'] = 0; evalDF['est'] = 0
locsDF = locsDF[['loc','est','v','x','y','raw_dists',
'scale_fact','scaled_dists','weights']]
if robustify==True:
cycle_nbr = 1
robust_est = [evalDF]
while True:
# Perform iterative robustness procedure for each local regression.
# Evaluate local regression for each item in xvals.
#
# e1_i => raw residuals
# e2_i => scaled residuals
# r_i => robustness weight
revalDF = pd.DataFrame(
columns=['loc','est','v','b','g']
)
for i in robust_est[-1]['loc']:
prevDF = robust_est[-1]
locDF = locsDF[locsDF['loc']==i]
b_i = prevDF.loc[prevDF['loc']==i,'b'].item()
w_i = locDF['weights']
v_i = prevDF.loc[prevDF['loc']==i, 'v'].item()
g_i = prevDF.loc[prevDF['loc']==i, 'g'].item()
e1_i = [k-loc_eval(j,b_i) for (j,k) in zip(xvals,yvals)]
e2_i = [j/(6*np.median(np.abs(e1_i))) for j in e1_i]
r_i = [(1-np.abs(j**2))**2 if np.abs(j)<1 else 0 for j in e2_i]
w_f = [j*k for (j,k) in zip(w_i, r_i)] # new weights
W_r = np.diag(w_f)
b_r = np.linalg.inv(X.T @ W_r @ X) @ (X.T @ W_r @ y)
riter_est = loc_eval(v_i, b_r)
riterDF = pd.DataFrame({
'loc':[i],
'b' :[b_r],
'v' :[v_i],
'g' :[riter_est],
'est':[cycle_nbr]
})
revalDF = | pd.concat([revalDF, riterDF]) | pandas.concat |
from os.path import exists, join
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import trange
from math import ceil
from traitlets import Dict, List
from ctapipe.core import Tool
from targetpipe.fitting.spe_sipm import sipm_spe_fit
from targetpipe.fitting.chec import CHECSSPEFitter, CHECSSPEMultiFitter
from targetpipe.plots.official import ThesisPlotter
from IPython import embed
def get_params(lambda_=1):
params = dict(
norm=1000,
eped=-0.6,
eped_sigma=0.4,
spe=1.4,
spe_sigma=0.2,
lambda_=lambda_,
opct=0.6,
pap=0.3,
dap=0.4
)
return params.copy()
def get_params_multi(params1, params2, params3):
params_multi = dict(
norm1=params1['norm'],
norm2=params2['norm'],
norm3=params3['norm'],
eped=params1['eped'],
eped_sigma=params1['eped_sigma'],
spe=params1['spe'],
spe_sigma=params1['spe_sigma'],
lambda_1=params1['lambda_'],
lambda_2=params2['lambda_'],
lambda_3=params3['lambda_'],
opct=params1['opct'],
pap=params1['pap'],
dap=params1['dap']
)
return params_multi.copy()
def get_initial(lambda_=1):
params = dict(
norm=None,
eped=-0.5,
eped_sigma=0.5,
spe=1,
spe_sigma=0.1,
lambda_=lambda_,
opct=0.5,
pap=0.5,
dap=0.5
)
return params.copy()
def get_initial_multi(initial1, initial2, initial3):
params_multi = dict(
norm1=initial1['norm'],
norm2=initial2['norm'],
norm3=initial3['norm'],
eped=initial1['eped'],
eped_sigma=initial1['eped_sigma'],
spe=initial1['spe'],
spe_sigma=initial1['spe_sigma'],
lambda_1=initial1['lambda_'],
lambda_2=initial2['lambda_'],
lambda_3=initial3['lambda_'],
opct=initial1['opct'],
pap=initial1['pap'],
dap=initial1['dap']
)
return params_multi.copy()
def sample_distribution(x, params, n=30000):
y = sipm_spe_fit(x, **params)
samples = np.random.choice(x, 30000, p=y / y.sum())
return samples, y
class FitPlotter(ThesisPlotter):
def __init__(self, config, tool, **kwargs):
super().__init__(config, tool, **kwargs)
self.figures = dict()
def plot(self):
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter1.initial = get_initial(1)
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter2.initial = get_initial(2)
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter3.initial = get_initial(3)
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
fitter_multi.initial = get_initial_multi(fitter1.initial, fitter2.initial, fitter3.initial)
# Generate the functions
found_good = False
found_bad = False
i = 0
while not found_good or not found_bad:
self.log.info("FitPlotter: Attempt {}".format(i))
i += 1
params1 = get_params(1.2)
params2 = get_params(1.7)
params3 = get_params(3.1)
x = np.linspace(-3, 10, 1000)
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
params_multi = get_params_multi(params1, params2, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
fitter2.apply(samples2)
p2 = fitter2.p_value
fitter3.apply(samples3)
p3 = fitter3.p_value
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
print(pm, p1, p2, p3)
if (pm > p1) & (pm > p2) & (pm > p3) & (p1 < 0.0001):
if found_good:
continue
self.log.info("FitPlotter: Found good")
found_good = True
desc = "good"
elif (pm < 0.001) & (p3 > 0.001):
if found_bad:
continue
self.log.info("FitPlotter: Found bad")
found_bad = True
desc = "bad"
else:
continue
fig_individual = plt.figure(figsize=(13, 6))
fig_individual.suptitle("Individual Fit")
ax1 = plt.subplot2grid((3, 2), (0, 0))
ax1_t = plt.subplot2grid((3, 2), (0, 1))
ax2 = plt.subplot2grid((3, 2), (1, 0))
ax2_t = plt.subplot2grid((3, 2), (1, 1))
ax3 = plt.subplot2grid((3, 2), (2, 0))
ax3_t = plt.subplot2grid((3, 2), (2, 1))
self.individual_plot(x, y1, params1, samples1, fitter1, ax1, ax1_t, True)
self.individual_plot(x, y2, params2, samples2, fitter2, ax2, ax2_t)
self.individual_plot(x, y3, params3, samples3, fitter3, ax3, ax3_t)
name = "fit_" + desc + "_individual"
self.figures[name] = fig_individual
fig_multi = plt.figure(figsize=(13, 6))
fig_multi.suptitle("Multi Fit")
ax1 = plt.subplot2grid((3, 2), (0, 0))
ax2 = plt.subplot2grid((3, 2), (1, 0))
ax3 = plt.subplot2grid((3, 2), (2, 0))
ax_mt = plt.subplot2grid((3, 2), (0, 1), rowspan=3)
self.multi_plot(x, [y1, y2, y3], params_multi, [samples1, samples2, samples3], fitter_multi, [ax1, ax2, ax3], ax_mt)
name = "fit_" + desc + "_multi"
self.figures[name] = fig_multi
def save(self, output_path=None):
for name, fig in self.figures.items():
self.fig = fig
self.figure_name = name
super().save(output_path)
@staticmethod
def individual_plot(x, y, params, samples, fitter, ax_p, ax_t, legend=False):
hist = fitter.hist
edges = fitter.edges
between = fitter.between
coeff = fitter.coeff.copy()
coeffl = fitter.coeff_list.copy()
initial = fitter.initial.copy()
fit = fitter.fit_function(x, **coeff)
rc2 = fitter.reduced_chi2
pval = fitter.p_value
ax_p.plot(x, y, label="Base")
ax_p.hist(between, bins=edges, weights=hist, histtype='step', label="Hist")
ax_p.plot(x, fit, label="Fit")
td = [['%.3f' % params[i], initial[i], '%.3f' % coeff[i]] for i in coeffl]
td.append(["", "", '%.3g' % rc2])
td.append(["", "", '%.3g' % pval])
tr = coeffl
tr.append("Reduced Chi^2")
tr.append("P-Value")
tc = ['Base', 'Initial', 'Fit']
ax_t.axis('off')
table = ax_t.table(cellText=td, rowLabels=tr, colLabels=tc, loc='center')
table.set_fontsize(6)
table.scale(0.7, 0.7)
if legend:
ax_p.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
@staticmethod
def multi_plot(x, y_list, params, samples_list, fitter, ax_list, ax_t):
y1, y2, y3 = y_list
samples1, samples2, samples3 = samples_list
ax1, ax2, ax3 = ax_list
hist1, hist2, hist3 = fitter.hist
edges = fitter.edges
between = fitter.between
coeff = fitter.coeff.copy()
coeffl = fitter.coeff_list.copy()
initial = fitter.initial.copy()
fit1, fit2, fit3 = fitter.fit_function(x, **coeff)
rc2 = fitter.reduced_chi2
pval = fitter.p_value
ax1.plot(x, y1, label="Base")
ax1.hist(between, bins=edges, weights=hist1, histtype='step', label="Hist")
ax1.plot(x, fit1, label="Fit")
ax1.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
ax2.plot(x, y2, label="Base")
ax2.hist(between, bins=edges, weights=hist2, histtype='step', label="Hist")
ax2.plot(x, fit2, label="Fit")
ax3.plot(x, y3, label="Base")
ax3.hist(between, bins=edges, weights=hist3, histtype='step', label="Hist")
ax3.plot(x, fit3, label="Fit")
ax_t.axis('off')
td = [['%.3f' % params[i], initial[i], '%.3f' % coeff[i]] for i in coeffl]
td.append(["", "", '%.3g' % rc2])
td.append(["", "", '%.3g' % pval])
tr = coeffl
tr.append("Reduced Chi^2")
tr.append("P-Value")
tc = ['Base', 'Initial', 'Fit']
table = ax_t.table(cellText=td, rowLabels=tr, colLabels=tc, loc='center')
table.set_fontsize(6)
class NoInitialPlotter(ThesisPlotter):
def __init__(self, config, tool, **kwargs):
super().__init__(config, tool, **kwargs)
self.figures = dict()
self.dataset_path = self.output_path + "_data.h5"
self.initial1 = 1
self.initial2 = 1
self.initial3 = 1
self.figures = {}
def plot(self):
df = self.load_dataset()
df = df[df > 0.01].groupby('x').count().reset_index()
x = df['x']
y1 = df['p1']
y2 = df['p2']
y3 = df['p3']
ym = df['pm']
x = ['%.3f\n%.3f\n%.3f\n' % (i[0], i[1], i[2]) for i in x]
self.fig, self.ax = self.create_figure()
self.add_points(x, y1, "Individual1")
self.add_points(x, y2, "Individual2")
self.add_points(x, y3, "Individual3")
self.add_points(x, ym, "Multi")
self.ax.set_xlabel("lambda")
self.ax.set_ylabel("Number of signficant p-values")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_p"] = self.fig
def add_points(self, x, y, label, p='-'):
x_i = np.arange(len(x))
self.ax.plot(x_i, y, p, label=label)
self.ax.set_xticks(x_i)
self.ax.set_xticklabels(x)
def add_points_err(self, x, y, y_err, label):
x_i = np.arange(len(x))
(_, caps, _) = self.ax.errorbar(x_i, y, xerr=None, yerr=y_err, fmt='o',
mew=0.5, label=label,
markersize=3, capsize=3)
for cap in caps:
cap.set_markeredgewidth(1)
self.ax.set_xticks(x_i)
self.ax.set_xticklabels(x)
def save(self, output_path=None):
for name, fig in self.figures.items():
self.figure_name = name
self.fig = fig
super().save(output_path)
def load_dataset(self):
if exists(self.dataset_path):
store = pd.HDFStore(self.dataset_path)
df = store['df']
else:
df = self.create_dataset()
store = pd.HDFStore(self.dataset_path)
store['df'] = df
return df
def create_dataset(self):
df_list = []
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter1.initial = get_initial(1)
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter2.initial = get_initial(1)
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter3.initial = get_initial(1)
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
fitter_multi.initial = get_initial_multi(fitter1.initial, fitter2.initial, fitter3.initial)
lambda_1 = np.linspace(0.3, 1.5, 10)
lambda_2 = np.linspace(0.5, 3, 10)
lambda_3 = np.linspace(0.7, 4.5, 10)
for i in trange(10):
params1 = get_params(lambda_1[i])
params2 = get_params(lambda_2[i])
params3 = get_params(lambda_3[i])
params_multi = get_params_multi(params1, params2, params3)
x = np.linspace(-3, 10, 1000)
for j in trange(100):
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
fitter2.apply(samples2)
p2 = fitter2.p_value
fitter3.apply(samples3)
p3 = fitter3.p_value
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
df_list.append(dict(x=(lambda_1[i], lambda_2[i], lambda_3[i]),
p1=p1, p2=p2, p3=p3, pm=pm))
df = pd.DataFrame(df_list)
return df
class WithInitialPlotter(NoInitialPlotter):
def create_dataset(self):
df_list = []
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
lambda_1 = np.linspace(0.3, 1.5, 10)
lambda_2 = np.linspace(0.5, 3, 10)
lambda_3 = np.linspace(0.7, 4.5, 10)
for i in trange(10):
params1 = get_params(lambda_1[i])
params2 = get_params(lambda_2[i])
params3 = get_params(lambda_3[i])
fitter1.initial = get_initial(round(lambda_1[i]))
fitter2.initial = get_initial(round(lambda_2[i]))
fitter3.initial = get_initial(round(lambda_3[i]))
fitter_multi.initial = get_initial_multi(fitter1.initial,
fitter2.initial,
fitter3.initial)
params_multi = get_params_multi(params1, params2, params3)
x = np.linspace(-3, 10, 1000)
for j in trange(100):
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
fitter2.apply(samples2)
p2 = fitter2.p_value
fitter3.apply(samples3)
p3 = fitter3.p_value
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
df_list.append(dict(x=(lambda_1[i], lambda_2[i], lambda_3[i]),
p1=p1, p2=p2, p3=p3, pm=pm))
df = pd.DataFrame(df_list)
return df
class CeilInitialPlotter(NoInitialPlotter):
def plot(self):
super().plot()
df = self.load_dataset()
u_i, u = | pd.factorize(df['x']) | pandas.factorize |
"""Script for training for incremental learing."""
import time
from itertools import zip_longest
from copy import deepcopy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.gaussian_process.kernels import RBF
from sklearn.metrics.pairwise import rbf_kernel
from dppy.finite_dpps import FiniteDPP
import torch
import torch.nn as nn
import torchvision
import torch.utils.data
from tensorboardX import SummaryWriter
from tqdm import tqdm
from alphapose.models import builder
from alphapose.opt import cfg, logger, opt
from alphapose.utils.logger import board_writing, debug_writing
from alphapose.utils.metrics import DataLogger, calc_accuracy
from animal_data_loader import AnimalDatasetCombined, ToTensor
from utils import *
def train(opt, train_loader, m, criterion, optimizer, writer, phase="Train"):
loss_logger = DataLogger()
acc_logger = DataLogger()
m.train()
train_loader = tqdm(train_loader, dynamic_ncols=True)
for i, (inps, labels, label_masks, _) in enumerate(train_loader):
if isinstance(inps, list):
inps = [inp.cuda().requires_grad_() for inp in inps]
else:
inps = inps.cuda().requires_grad_()
labels = labels.cuda()
label_masks = label_masks.cuda()
output = m(inps)
if cfg.LOSS.TYPE == "SmoothL1":
loss = criterion(output.mul(label_masks), labels.mul(label_masks))
if cfg.LOSS.get("TYPE") == "MSELoss":
loss = 0.5 * criterion(output.mul(label_masks), labels.mul(label_masks))
acc = calc_accuracy(output.mul(label_masks), labels.mul(label_masks))
if isinstance(inps, list):
batch_size = inps[0].size(0)
else:
batch_size = inps.size(0)
loss_logger.update(loss.item(), batch_size)
acc_logger.update(acc, batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
opt.trainIters += 1
# Tensorboard
if opt.board:
board_writing(
writer, loss_logger.avg, acc_logger.avg, opt.trainIters, phase
)
# Debug
if opt.debug and not i % 10:
debug_writing(writer, output, labels, inps, opt.trainIters)
# TQDM
train_loader.set_description(
"loss: {loss:.8f} | acc: {acc:.4f}".format(
loss=loss_logger.avg, acc=acc_logger.avg
)
)
train_loader.close()
return loss_logger.avg, acc_logger.avg
def validate(m, val_loader, opt, cfg, writer, criterion, batch_size=1):
loss_logger_val = DataLogger()
acc_logger = DataLogger()
m.eval()
val_loader = tqdm(val_loader, dynamic_ncols=True)
for inps, labels, label_masks, _ in val_loader:
if isinstance(inps, list):
inps = [inp.cuda() for inp in inps]
else:
inps = inps.cuda()
labels = labels.cuda()
label_masks = label_masks.cuda()
output = m(inps)
loss = criterion(output.mul(label_masks), labels.mul(label_masks))
acc = calc_accuracy(output.mul(label_masks), labels.mul(label_masks))
loss_logger_val.update(loss, batch_size)
acc_logger.update(acc, batch_size)
# TQDM
val_loader.set_description(
"Loss: {loss:.4f} acc: {acc:.4f}".format(
loss=loss_logger_val.avg, acc=acc_logger.avg
)
)
val_loader.close()
return loss_logger_val.avg, acc_logger.avg
def train_balanced(
opt,
old_train_loader,
new_train_loader,
m,
m_prev,
criterion,
optimizer,
writer,
phase="Balanced_Finetuning",
):
loss_logger = DataLogger()
acc_logger = DataLogger()
m.train()
m_prev.eval()
train_loader = tqdm(zip(old_train_loader, new_train_loader), dynamic_ncols=True)
for j, (data1, data2) in enumerate(train_loader):
inps_old, labels_old, label_masks_old, _ = data1
inps_new, labels_new, label_masks_new, _ = data2
if isinstance(inps_old, list):
inps_old = [inp.cuda().requires_grad_() for inp in inps_old]
else:
inps_old = inps_old.cuda().requires_grad_()
if isinstance(inps_old, list):
inps_new = [inp.cuda().requires_grad_() for inp in inps_new]
else:
inps_new = inps_new.cuda().requires_grad_()
labels_old = labels_old.cuda()
label_masks_old = label_masks_old.cuda()
labels_new = labels_new.cuda()
label_masks_new = label_masks_new.cuda()
# print(labels_old.shape, label_masks_old.shape, inps_old.shape)
# print(labels_new.shape, label_masks_new.shape, inps_new.shape)
output_old = m(inps_old)
output_new = m(inps_new)
output_teacher = m_prev(inps_new)
# print(output_old.shape, output_new.shape, output_teacher.shape)
# print(labels_new.mul(label_masks_new))
loss_orig_old = 0.5 * criterion(
output_old.mul(label_masks_old), labels_old.mul(label_masks_old)
)
loss_orig_new = 0.5 * criterion(
output_new.mul(label_masks_new), labels_new.mul(label_masks_new)
)
loss_kd = 0.5 * criterion(
output_new.mul(label_masks_new), output_teacher.mul(label_masks_new)
)
acc = (
calc_accuracy(
output_old.mul(label_masks_old), labels_old.mul(label_masks_old)
)
+ calc_accuracy(
output_new.mul(label_masks_new), labels_new.mul(label_masks_new)
)
) / 2
loss = loss_orig_old + loss_orig_new + loss_kd
# loss = loss_kd
if isinstance(inps_new, list):
batch_size = inps_new[0].size(0) * 2
else:
batch_size = inps_new.size(0) * 2
loss_logger.update(loss.item(), batch_size)
acc_logger.update(acc, batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
opt.trainIters += 1
# Tensorboard
if opt.board:
board_writing(
writer, loss_logger.avg, acc_logger.avg, opt.trainIters, phase
)
# Debug
if opt.debug and not j % 10:
debug_writing(writer, output_new, labels_new, inps_new, opt.trainIters)
# TQDM
train_loader.set_description(
"loss: {loss:.8f} | acc: {acc:.4f}".format(
loss=loss_logger.avg, acc=acc_logger.avg
)
)
train_loader.close()
return loss_logger.avg, acc_logger.avg
def train_kd(opt, train_loader, m, m_prev, criterion, optimizer, writer, phase="Train"):
loss_logger = DataLogger()
acc_logger = DataLogger()
m.train()
train_loader = tqdm(train_loader, dynamic_ncols=True)
for i, (inps, labels, label_masks, _) in enumerate(train_loader):
if isinstance(inps, list):
inps = [inp.cuda().requires_grad_() for inp in inps]
else:
inps = inps.cuda().requires_grad_()
labels = labels.cuda()
label_masks = label_masks.cuda()
output = m(inps)
output_teacher = m_prev(inps)
loss_orig = 0.5 * criterion(output.mul(label_masks), labels.mul(label_masks))
loss_kd = 0.5 * criterion(
output.mul(label_masks), output_teacher.mul(label_masks)
)
acc = calc_accuracy(output.mul(label_masks), labels.mul(label_masks))
loss = loss_orig + loss_kd
if isinstance(inps, list):
batch_size = inps[0].size(0)
else:
batch_size = inps.size(0)
loss_logger.update(loss.item(), batch_size)
acc_logger.update(acc, batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
opt.trainIters += 1
# Tensorboard
if opt.board:
board_writing(
writer, loss_logger.avg, acc_logger.avg, opt.trainIters, phase
)
# Debug
if opt.debug and not i % 10:
debug_writing(writer, output, labels, inps, opt.trainIters)
# TQDM
train_loader.set_description(
"loss: {loss:.8f} | acc: {acc:.4f}".format(
loss=loss_logger.avg, acc=acc_logger.avg
)
)
train_loader.close()
return loss_logger.avg, acc_logger.avg
def train_icarl(
opt,
new_train_loader,
old_train_loader,
m,
m_prev,
criterion,
optimizer,
writer,
phase="Train",
):
loss_logger = DataLogger()
acc_logger = DataLogger()
m.train()
train_loader = tqdm(
zip_longest(new_train_loader, old_train_loader, fillvalue=None),
total=max(len(new_train_loader), len(old_train_loader)),
dynamic_ncols=True,
)
for i, (data_new, data_old) in enumerate(train_loader):
if data_new:
inps_new, labels_new, label_masks_new, _ = data_new
if isinstance(inps_new, list):
inps_new = [inp_new.cuda().requires_grad_() for inp_new in inps_new]
else:
inps_new = inps_new.cuda().requires_grad_()
labels_new = labels_new.cuda()
label_masks_new = label_masks_new.cuda()
output_new = m(inps_new)
if data_old:
inps_old, labels_old, label_masks_old, _ = data_old
if data_old:
if isinstance(inps_old, list):
inps_old = [inp_old.cuda().requires_grad_() for inp_old in inps_old]
else:
inps_old = inps_old.cuda().requires_grad_()
labels_old = labels_old.cuda()
label_masks_old = label_masks_old.cuda()
output_old = m(inps_old)
output_teacher = m_prev(inps_old)
if data_new:
loss_new = 0.5 * criterion(
output_new.mul(label_masks_new), labels_new.mul(label_masks_new)
)
acc_new = 0.5 * calc_accuracy(
output_new.mul(label_masks_new), labels_new.mul(label_masks_new)
)
if data_old:
loss_kd = criterion(
output_old.mul(label_masks_old), output_teacher.mul(label_masks_old)
)
acc_kd = calc_accuracy(
output_old.mul(label_masks_old), labels_old.mul(label_masks_old)
)
if isinstance(inps_old, list):
batch_size = inps_old[0].size(0)
else:
batch_size = inps_old.size(0)
if data_new and data_old:
loss = loss_new + loss_kd
acc = (acc_new + acc_kd) / 2
loss_logger.update(loss.item(), 2 * batch_size)
acc_logger.update(acc, 2 * batch_size)
elif data_new and not data_old:
loss = loss_new
acc = acc_new
loss_logger.update(loss.item(), batch_size)
acc_logger.update(acc, batch_size)
elif not data_new and data_old:
loss = loss_kd
loss_logger.update(loss.item(), batch_size)
acc_logger.update(acc, batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
opt.trainIters += 1
# Tensorboard
if opt.board:
board_writing(
writer, loss_logger.avg, acc_logger.avg, opt.trainIters, phase
)
# Debug
if opt.debug and not i % 10:
debug_writing(writer, output_new, labels_new, inps_new, opt.trainIters)
# TQDM
train_loader.set_description(
"loss: {loss:.8f} | acc: {acc:.4f}".format(
loss=loss_logger.avg, acc=acc_logger.avg
)
)
train_loader.close()
return loss_logger.avg, acc_logger.avg
def train_kd_mixup(
opt, train_loader, m, m_prev, criterion, optimizer, writer, phase="Train"
):
loss_logger = DataLogger()
acc_logger = DataLogger()
m.train()
m_prev.eval()
train_loader = tqdm(train_loader, dynamic_ncols=True)
for i, (inps, labels, label_masks, _) in enumerate(train_loader):
inps_flipped = torch.flip(inps, (3,))
t = np.random.uniform(0, 1, size=(inps.shape[0],))
inps_mix_up = []
for j in range(inps.shape[0]):
inps_mix_up.append(
t[j] * inps[j].detach().cpu().numpy()
+ (1 - t[j]) * inps_flipped[j].detach().cpu().numpy()
)
inps_mix_up = np.array(inps_mix_up)
inps_mix_up = torch.FloatTensor(inps_mix_up)
if isinstance(inps, list):
inps = [inp.cuda().requires_grad_() for inp in inps]
inps_mix_up = [inp.cuda().requires_grad_() for inp in inps_mix_up]
else:
inps = inps.cuda().requires_grad_()
inps_mix_up = inps_mix_up.cuda().requires_grad_()
labels = labels.cuda()
label_masks = label_masks.cuda()
output = m(inps)
loss_gt = criterion(output.mul(label_masks), labels.mul(label_masks))
output_teacher = m_prev(inps_mix_up)
output = m(inps_mix_up)
loss_kd = criterion(output.mul(label_masks), output_teacher.mul(label_masks))
acc = calc_accuracy(output.mul(label_masks), labels.mul(label_masks))
loss = 0.25 * loss_kd + 0.5 * loss_gt
if isinstance(inps, list):
batch_size = 2 * inps[0].size(0)
else:
batch_size = 2 * inps.size(0)
loss_logger.update(loss.item(), batch_size)
acc_logger.update(acc, batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
opt.trainIters += 1
# Tensorboard
if opt.board:
board_writing(
writer, loss_logger.avg, acc_logger.avg, opt.trainIters, phase
)
# Debug
if opt.debug and not i % 10:
debug_writing(writer, output, labels, inps, opt.trainIters)
# TQDM
train_loader.set_description(
"loss: {loss:.8f} | acc: {acc:.4f}".format(
loss=loss_logger.avg, acc=acc_logger.avg
)
)
train_loader.close()
return loss_logger.avg, acc_logger.avg
def cluster_dpp(keypoints_list, keypoints_to_fname, samples_per_class, animal_class):
n_clusters = int(np.ceil(samples_per_class / 51))
if animal_class == "horse":
n_clusters += 1
k_orig = int(samples_per_class / n_clusters)
print(f"Samples expected: {samples_per_class}")
km = KMeans(n_clusters=n_clusters)
km.fit(keypoints_list)
keypoint_list_clusters = []
for clus in range(n_clusters):
temp1 = keypoints_list[ClusterIndicesNumpy(clus, km.labels_)]
# print(temp1.shape)
k = min(k_orig, np.linalg.matrix_rank(temp1))
Phi = temp1.dot(temp1.T)
DPP = FiniteDPP("likelihood", **{"L": Phi})
# for _ in range(5):
DPP.sample_exact_k_dpp(size=k)
max_det = 0
index_of_samples = DPP.list_of_samples[0]
# for j in range(5):
# matrix = np.array(Phi)
# submatrix = matrix[np.ix_(DPP.list_of_samples[j], DPP.list_of_samples[j])]
# try:
# det = np.linalg.det(submatrix)
# if det > max_det:
# max_det = det
# index_of_samples = DPP.list_of_samples[j]
# except:
# continue
temp = temp1[index_of_samples]
for j in temp:
keypoint_list_clusters.append(j)
images_list = []
for j in keypoint_list_clusters:
images_list.append(keypoints_to_fname[str(j)])
return images_list
def rbf_dpp(keypoints_list, keypoints_to_fname, samples_per_class, gamma=50):
since = time.time()
Phi = rbf_kernel(keypoints_list, gamma=gamma)
k = samples_per_class
eig_vals, eig_vecs = np.linalg.eigh(Phi)
# DPP = FiniteDPP("likelihood", **{"L": Phi})
DPP = FiniteDPP("likelihood", **{"L_eig_dec": (eig_vals, eig_vecs)})
for _ in range(5):
DPP.sample_exact_k_dpp(size=k)
max_det = 0
index_of_samples = DPP.list_of_samples[0]
print(f"Time Taken for sampling points: {time.time() - since}")
for j in range(5):
matrix = np.array(Phi)
submatrix = matrix[np.ix_(DPP.list_of_samples[j], DPP.list_of_samples[j])]
try:
det = np.linalg.det(submatrix)
if det > max_det:
max_det = det
index_of_samples = DPP.list_of_samples[j]
except:
continue
temp = keypoints_list[index_of_samples]
images_list = []
for j in temp:
images_list.append(keypoints_to_fname[str(j)])
print(len(images_list))
return images_list
def herding(keypoints_list, animal_list, samples_per_class):
animal_avg = np.mean(keypoints_list, axis=0)
final_animal_vec = calc_dist(animal_avg, animal_list)
final_animal_vec.sort(key=lambda x: x[1])
images_list = []
for vec in final_animal_vec[:samples_per_class]:
images_list.append(vec[0][0])
return images_list
def cluster(keypoints_list, keypoints_to_fname, samples_per_class, cfg):
plotX = pd.DataFrame(np.array(keypoints_list))
plotX.columns = np.arange(0, np.array(keypoints_list).shape[1])
if cfg.SAMPLING.N_CLUSTERS == 0:
n_clusters = int(np.ceil(samples_per_class / 51))
else:
n_clusters = cfg.SAMPLING.N_CLUSTERS
km = KMeans(n_clusters=n_clusters)
km.fit(keypoints_list)
pca = PCA(n_components=2)
PCs_2d = pd.DataFrame(pca.fit_transform(plotX))
PCs_2d.columns = ["PC1_2d", "PC2_2d"]
plotX = pd.concat([plotX, PCs_2d], axis=1, join="inner")
clusters = km.predict(keypoints_list)
plotX["Cluster"] = clusters
if cfg.SAMPLING.N_CLUSTERS == 0:
samples_per_cluster = min(samples_per_class, 51)
else:
samples_per_cluster = int(samples_per_class / n_clusters)
keypoint_list_clusters = []
clusters_data = {}
for clus in range(n_clusters):
if cfg.SAMPLING.CLUSTER_PROPORTION != "same":
samples_per_cluster = samples_per_class * int(
len(keypoints_list[ClusterIndicesNumpy(clus, km.labels_)])
/ len(keypoints_list)
+ 1
)
if cfg.SAMPLING.CLUSTER_SAMPLING == "random":
temp = keypoints_list[ClusterIndicesNumpy(clus, km.labels_)][
:samples_per_cluster
]
elif cfg.SAMPLING.CLUSTER_SAMPLING == "dist":
d = km.transform(keypoints_list)[:, clus]
dist_tup = list(enumerate(d))
l = sorted(dist_tup, key=lambda i: i[1])
rng = l[-1][1] - l[0][1]
temp1, temp2, temp3, temp4 = [], [], [], []
for dist in l:
if dist[1] < l[0][1] + 0.25 * rng:
temp1.append(keypoints_list[dist[0]])
elif dist[1] >= l[0][1] + 0.25 * rng and dist[1] < l[0][1] + 0.50 * rng:
temp2.append(keypoints_list[dist[0]])
elif dist[1] >= l[0][1] + 0.50 * rng and dist[1] < l[0][1] + 0.75 * rng:
temp3.append(keypoints_list[dist[0]])
else:
temp4.append(keypoints_list[dist[0]])
total_len = len(temp1) + len(temp2) + len(temp3) + len(temp4)
samples_1 = round(samples_per_cluster * (len(temp1) / total_len))
samples_2 = round(samples_per_cluster * (len(temp2) / total_len))
samples_3 = round(samples_per_cluster * (len(temp3) / total_len))
samples_4 = round(samples_per_cluster * (len(temp4) / total_len))
temp1 = temp1[:samples_1]
temp2 = temp2[:samples_2]
temp3 = temp3[:samples_3]
temp4 = temp4[:samples_4]
temp3.extend(temp4)
temp2.extend(temp3)
temp1.extend(temp2)
temp = temp1
elif cfg.SAMPLING.CLUSTER_SAMPLING == "dpp":
temp1 = keypoints_list[ClusterIndicesNumpy(clus, km.labels_)]
Phi = temp1.dot(temp1.T)
DPP = FiniteDPP("likelihood", **{"L": Phi})
k = 50
for _ in range(5):
DPP.sample_exact_k_dpp(size=k)
max_det = 0
index_of_samples = DPP.list_of_samples[0]
for j in range(5):
matrix = np.array(Phi)
submatrix = matrix[
np.ix_(DPP.list_of_samples[j], DPP.list_of_samples[j])
]
try:
det = np.linalg.det(submatrix)
if det > max_det:
max_det = det
index_of_samples = DPP.list_of_samples[j]
except:
continue
temp = temp1[index_of_samples]
else:
d = km.transform(keypoints_list)[:, clus]
ind = np.argsort(d)[::][:samples_per_cluster]
temp = keypoints_list[ind]
clusters_data[str(clus)] = plotX[plotX["Cluster"] == clus]
for j in temp:
keypoint_list_clusters.append(j)
fig, ax = plt.subplots()
for key in clusters_data.keys():
ax.scatter(
clusters_data[key]["PC1_2d"], clusters_data[key]["PC2_2d"], label=key,
)
centroids = km.cluster_centers_
centroids = pca.transform(np.array(centroids))
ax.scatter(centroids[:, 0], centroids[:, 1], s=80)
plotS = pd.DataFrame(np.array(keypoint_list_clusters))
PCs_2dS = pd.DataFrame(pca.transform(plotS))
PCs_2dS.columns = ["PC1_2d", "PC2_2d"]
plotS = | pd.concat([plotS, PCs_2dS], axis=1, join="inner") | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 21 17:26:33 2020
@author: pedro
"""
from typing import List
import pandas as pd
from representation.individual import Individual
from sklearn.tree import _tree
import numpy as np
class evoltree(object):
"""
evoltree object.
"""
def __init__(self):
self.params = {}
self.population = []
self.stats = {}
self.fitted = False
def fit(self, X: pd.DataFrame, y: pd.Series, pos_label: str,
X_val: pd.DataFrame = None, y_val: pd.Series = None, pop : int =100,
gen : int =100, lamarck : bool =True, multicore: bool =True,
**extra_params):
"""
Build a population of Evolutionary Decision Trees from the training set (X, y).
It uses AUC and Tree complexity as metrics.
Parameters
----------
X : pd.DataFrame
The training input samples.
y : pd.Series
The target values (class labels) as integers or strings.
pos_label : str
Positive class label. Used to compute AUC.
X_val : pd.DataFrame, optional
Validation input samples, used to sort the population. The default is None.
y_val : pd.Series, optional
Validation target values (class values), used to sort the population. The default is None.
pop : int, optional
Number of Evolutionary Decision Trees in the population. The default is 100.
gen : int, optional
Number of generations (iterations) of training. The default is 100.
lamarck : bool, optional
If Lamarckian Evolutiona is used. The default is True.
multicore : bool, optional
If parallelism is used. The default is True.
**extra_params : TYPE
Extra parameters. For details, please check: https://github.com/PonyGE/PonyGE2/wiki/Evolutionary-Parameters.
Returns
-------
self
Fitted Evolutionary Decision Trees.
"""
from .algorithm.parameters import params, set_params
from .stats.stats import get_stats, stats
from .operators.initialisation import initialisation
from .fitness.evaluation import evaluate_fitness
from tqdm import tqdm
from multiprocessing import Pool
from .utilities.algorithm.initialise_run import pool_init
new_params = {'X_train': X, 'y_train': y,
'X_test': X_val, 'y_test': y_val,
'POPULATION_SIZE': pop, 'GENERATIONS': gen,
'LAMARCK': lamarck, 'MULTICORE': multicore,
'POS_LABEL': pos_label}
params.update(new_params)
param_list = list_params(pop, gen, lamarck, multicore, **extra_params)
set_params(param_list)
if params["MULTICORE"]:
if "POOL" in params.keys():
params["POOL"].close()
params["POOL"] = None
# initialize pool once, if mutlicore is enabled
params['POOL'] = Pool(processes=params['CORES'],
initializer=pool_init,
initargs=(params,))
self.params.update(params)
# Initialise population
self.population = initialisation(params['POPULATION_SIZE'])
# Evaluate initial population
self.population = evaluate_fitness(self.population)
stats['gen'] = 0
# Generate statistics for run so far
get_stats(self.population)
population = self.population
mlflow = get_mlflow(params['EXPERIMENT_NAME'])
total_gens = params['GENERATIONS']+1
range_generations = tqdm(range(1, total_gens))
population = evolve(params, range_generations, mlflow, population)
get_stats(population, end=True)
store_pop(population)
self.stats = stats
self.population = population
self.fitted = True
def refit(self, gen: int):
"""
Continues the training process for <gen> iterations.
Parameters
----------
gen : int
Number of generations (iterations).
Raises
------
Exception
If Evolutionary Decision Trees are not trained yet, please use evoltree.fit.
Returns
-------
self
Re-fitted Evolutionary Decision Trees.
"""
if not self.fitted:
raise Exception("evoltree needs to be fitted first. Use evoltree.fit")
from .algorithm.parameters import params
from .stats.stats import get_stats, stats
from tqdm import tqdm
population = self.population
# Generate statistics for run so far
stats['gen'] = params['GENERATIONS']
get_stats(population)
mlflow = get_mlflow(params['EXPERIMENT_NAME'])
total_gens = params['GENERATIONS'] + 1 + gen
range_generations = tqdm(range(params['GENERATIONS'] + 1, total_gens))
population = evolve(params, range_generations, mlflow,
population, refit=True)
get_stats(population, end=True)
store_pop(population)
self.stats = stats
self.population = population
def fit_new_data(self, X, y, X_val=None, y_val=None, pop=100, gen=100,
lamarck=True, multicore=True, **extra_params) -> None:
"""
Fit Evolutionary Decision Trees to a new data set (X,Y), considering the same positive label.
It uses the previous solutions as starting point.
Function used for Online Learning environments.
Parameters
----------
X : pd.DataFrame
The training input samples.
y : pd.Series
The target values (class labels) as integers or strings.
X_val : pd.DataFrame, optional
Validation input samples, used to sort the population. The default is None.
y_val : pd.Series, optional
Validation target values (class values), used to sort the population. The default is None.
pop : int, optional
Number of Evolutionary Decision Trees in the population. The default is 100.
gen : int, optional
Number of generations (iterations) of training. The default is 100.
lamarck : bool, optional
If Lamarckian Evolutiona is used. The default is True.
multicore : bool, optional
If parallelism is used. The default is True.
**extra_params : TYPE
Extra parameters. For details, please check: https://github.com/PonyGE/PonyGE2/wiki/Evolutionary-Parameters.
Raises
------
Exception
If Evolutionary Decision Trees are not trained yet, please use evoltree.fit.
Returns
-------
self
Evolutionary Decision Trees re-fitted to the new data set.
"""
if not self.fitted:
raise Exception("evoltree needs to be fitted first. Use evoltree.fit")
from .algorithm.parameters import params, set_params
from .stats.stats import get_stats, stats
from tqdm import tqdm
new_params = {'X_train': X, 'y_train': y,
'X_test': X_val, 'y_test': y_val,
'POPULATION_SIZE': pop, 'GENERATIONS': gen,
'LAMARCK': lamarck, 'MULTICORE': multicore}
params.update(new_params)
param_list = list_params(pop, gen, lamarck, multicore, **extra_params)
set_params(param_list)
self.params = params
population = self.population
mlflow = get_mlflow(params['EXPERIMENT_NAME'])
total_gens = params['GENERATIONS']+1 + gen
range_generations = tqdm(range(params['GENERATIONS']+1, total_gens))
population = evolve(params, range_generations, mlflow, population)
get_stats(population, end=True)
store_pop(population)
self.stats = stats
self.population = population
def predict(self, x: pd.DataFrame, mode: str="best") -> np.array:
"""
Predict class probabilities of the input samples X.
Parameters
----------
x : pd.DataFrame
The input samples.
mode : str, optional
Specifies which Evolutionar Decision Tree is used to perform predictions.
The default is "best". Possibilities:
- "best": uses the tree with highest AUC estimated using validation data.
- "simplest": uses the tree with lowest complexity.
- "all": uses all the trees and returns all the predicted probabilities.
- "balanced": chooses the tree with the greatest Euclidean Distance\
to the reference point (0, 1), where 0 is the worst possible AUC\
and 1 is the worst possible complexity (normalized values).
Returns
-------
preds : np.array
The class probabilities of the input samples.
"""
if mode == "all":
preds = [ind.predict(x) for ind in self.population]
elif mode == "best":
best = min(self.population, key=lambda x: x.fitness[0])
preds = best.predict(x)
elif mode == "simplest":
simplest = min(self.population, key=lambda x: x.fitness[1])
preds = simplest.predict(x)
elif mode == "balanced":
from math import log10
min_y = log10(min(self.population,
key=lambda x: x.fitness[1]).fitness[1])
max_y = log10(max(self.population,
key=lambda x: x.fitness[1]).fitness[1])
# get individual with greater distance to point (0, 1)
balanced = max(self.population,
key=lambda x: get_distance(x, min_y, max_y))
preds = balanced.predict(x)
return preds
def evaluate_all(self, X_test: pd.DataFrame, y_test: pd.Series) -> List:
"""
Evaluate all Evolutionary Decision Trees in terms of AUC and Complexity,\
based on data set (X_test, y_test)
Parameters
----------
X_test : pd.DataFrame
The test input samples..
y_test : pd.Series
The test target values (class labels) as integers or strings.
Returns
-------
List
List containing [AUC, Complexity] for each Evolutionary Decision Tree in the population.
"""
import pandas as pd
from .utilities.fitness.error_metric import AUC
aucs = [-1*AUC(y_test, ind.predict(X_test)) for ind in self.population]
nodes = [ind.fitness[1] for ind in self.population]
ev = pd.DataFrame([aucs, nodes]).T
ev.columns=['auc','node']
ev2 = ev.groupby('auc').agg({'node':min}).reset_index().values
return [ev2[:,0], ev2[:,1]]
def __get_tree_complexity__(self, dt: _tree, columns) -> int:
"""
Estimates the complexity of an sklearn Decision Tree.
For advanced users only.
Parameters
----------
dt : _tree
sklearn Decision Tree.
columns : TYPE
Data attributes.
Returns
-------
int
Tree complexity.
"""
nodes = get_nodes_from_tree(dt, columns, self.params)
return nodes
def __get_randForest_complexity__(self, rf, columns) -> int:
"""
Estimates the complexity of an sklearn Random Forest.
For advanced users only.
Parameters
----------
rf : TYPE
sklearn Random Forest object.
columns : TYPE
Data attributes.
Returns
-------
TYPE
Sum of Random Forest Trees complexities.
"""
nodes = [get_nodes_from_tree(dt, columns, self.params)
for dt in rf.estimators_]
return sum(nodes)
def load_offline_data(val=True) -> List:
"""
Returns an example dataset [(X_train, Y_train),
(X_test, Y_test)].
If validation = True, returns: [(X_train, Y_train),
(X_validation, Y_validation),
(X_test, Y_test)].
Used for static environment (Offline Learning).
Parameters
----------
val : TYPE, optional
If validation set is returned. The default is True.
Returns
-------
List
List of datasets in format: [(X_train, Y_train), (X_test, Y_test)].
"""
import pandas as pd
from os import path
from sklearn.model_selection import train_test_split
import pkg_resources
DATA_PATH = pkg_resources.resource_filename('evoltree', 'data')
dtr_filename = path.join(DATA_PATH, "example1_tr.csv")
dts_filename = path.join(DATA_PATH, "example1_ts.csv")
dtrain = | pd.read_csv(dtr_filename, sep=";") | pandas.read_csv |
import pickle
from copy import deepcopy
import numpy as np
import pandas as pd
from mlcomp.worker.executors import Executor
from mlcomp.worker.executors.infer import Infer
from mlcomp.worker.reports.classification import ClassificationReportBuilder
from dataset import MnistDataset
from experiment import Experiment
@Executor.register
class InferMnist(Infer):
def __init__(self, **kwargs):
cache_names = ['y']
super().__init__(cache_names=cache_names, layout='img_classify',
**kwargs)
if self.test:
self.x_source = MnistDataset(
file='data/test.csv',
transforms=Experiment.get_test_transforms(),
max_count=self.max_count,
)
else:
self.x_source = MnistDataset(
file='data/train.csv',
fold_csv='data/fold.csv',
is_test=True,
transforms=Experiment.get_test_transforms(),
max_count=self.max_count
)
self.builder = None
self.x = None
self.res = []
self.submit_res = []
def create_base(self):
self.builder = ClassificationReportBuilder(
session=self.session,
task=self.task,
layout=self.layout,
name=self.name,
plot_count=self.plot_count
)
self.builder.create_base()
def count(self):
return len(self.x_source)
def adjust_part(self, part):
self.x = deepcopy(self.x_source)
self.x.x = self.x.x[part[0]:part[1]]
if not self.test:
self.x.y = self.x.y[part[0]:part[1]]
def save(self, preds, folder: str):
self.res.extend(preds)
def save_final(self, folder):
pickle.dump(np.array(self.res),
open(f'{folder}/{self.model_name}_{self.suffix}.p', 'wb'))
def submit(self, preds):
argmax = preds.argmax(axis=1)
self.submit_res.extend(
[{'ImageId': len(self.submit_res) + i + 1, 'Label': p} for i, p in
enumerate(argmax)])
def submit_final(self, folder):
| pd.DataFrame(self.submit_res) | pandas.DataFrame |
from pathlib import Path
import copy
import pickle as pkl
from mmap import mmap
from scipy import stats as st
from scipy.stats._continuous_distns import FitDataError
import torch
from sklearn import svm
from sklearn import linear_model
import pandas as pd
import seaborn as sns
import warnings
import numpy as np
import os
import matplotlib.colors as mcolors
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from mpl_toolkits.mplot3d.art3d import juggle_axes
from matplotlib.ticker import MaxNLocator
from joblib import Memory
import math
import lyap
import model_loader_utils as loader
import initialize_and_train as train
import utils
memory = Memory(location='./memoization_cache', verbose=2)
# memory.clear()
## Functions for computing means and error bars for the plots. 68% confidence
# intervals and means are currently
# implemented in this code. The commented out code is for using a gamma
# distribution to compute these, but uses a
# custom version of seaborn plotting library to plot.
def orth_proj(v):
n = len(v)
vv = v.reshape(-1, 1)
return torch.eye(n) - ([email protected])/(v@v)
USE_ERRORBARS = True
# USE_ERRORBARS = False
LEGEND = False
# LEGEND = True
folder_root = '../results/figs/'
def ci_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return bounds[1], bounds[0]
# ci_acc = 68
# ci_acc = 95
def est_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return median
# est_acc = "mean"
def ci_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return bounds[1], bounds[0]
# ci_dim = 68
# ci_dim = 95
def est_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return median
# est_dim = "mean"
def point_replace(a_string):
a_string = str(a_string)
return a_string.replace(".", "p")
def get_color(x, cmap=plt.cm.plasma):
"""Get normalized color assignments based on input data x and colormap
cmap."""
mag = torch.max(x) - torch.min(x)
x_norm = (x.float() - torch.min(x))/mag
return cmap(x_norm)
def median_and_bound(samples, perc_bound, dist_type='gamma', loc=0., shift=0,
reflect=False):
"""Get median and probability mass intervals for a gamma distribution fit
of samples."""
samples = np.array(samples)
def do_reflect(x, center):
return -1*(x - center) + center
if dist_type == 'gamma':
if np.sum(samples[0] == samples) == len(samples):
median = samples[0]
interval = [samples[0], samples[0]]
return median, interval
if reflect:
samples_reflected = do_reflect(samples, loc)
shape_ps, loc_fit, scale = st.gamma.fit(samples_reflected,
floc=loc + shift)
median_reflected = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval_reflected = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
median = do_reflect(median_reflected, loc)
interval = do_reflect(interval_reflected, loc)
else:
shape_ps, loc, scale = st.gamma.fit(samples, floc=loc + shift)
median = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
else:
raise ValueError("Distribution option (dist_type) not recognized.")
return median, interval
## Set parameters for figure aesthetics
plt.rcParams['font.size'] = 6
plt.rcParams['font.size'] = 6
plt.rcParams['lines.markersize'] = 1
plt.rcParams['lines.linewidth'] = 1
plt.rcParams['axes.labelsize'] = 7
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.titlesize'] = 8
# Colormaps
class_style = 'color'
cols11 = np.array([90, 100, 170])/255
cols12 = np.array([37, 50, 120])/255
cols21 = np.array([250, 171, 62])/255
cols22 = np.array([156, 110, 35])/255
cmap_activation_pnts = mcolors.ListedColormap([cols11, cols21])
cmap_activation_pnts_edge = mcolors.ListedColormap([cols12, cols22])
rasterized = False
dpi = 800
ext = 'pdf'
# Default figure size
figsize = (1.5, 1.2)
ax_pos = (0, 0, 1, 1)
def make_fig(figsize=figsize, ax_pos=ax_pos):
"""Create figure."""
fig = plt.figure(figsize=figsize)
ax = fig.add_axes(ax_pos)
return fig, ax
def out_fig(fig, figname, subfolder='', show=False, save=True, axis_type=0,
name_order=0, data=None):
""" Save figure."""
folder = Path(folder_root)
figname = point_replace(figname)
# os.makedirs('../results/figs/', exist_ok=True)
os.makedirs(folder, exist_ok=True)
ax = fig.axes[0]
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_rasterized(rasterized)
if axis_type == 1:
ax.tick_params(axis='both', which='both',
# both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
left=False, top=False,
# ticks along the top edge are off
labelbottom=False,
labelleft=False) # labels along the bottom edge are off
elif axis_type == 2:
ax.axis('off')
if name_order == 0:
fig_path = folder/subfolder/figname
else:
fig_path = folder/subfolder/figname
if save:
os.makedirs(folder/subfolder, exist_ok=True)
fig_file = fig_path.with_suffix('.' + ext)
print(f"Saving figure to {fig_file}")
fig.savefig(fig_file, dpi=dpi, transparent=True, bbox_inches='tight')
if show:
fig.tight_layout()
fig.show()
if data is not None:
os.makedirs(folder/subfolder/'data/', exist_ok=True)
with open(folder/subfolder/'data/{}_data'.format(figname),
'wb') as fid:
pkl.dump(data, fid, protocol=4)
plt.close('all')
def autocorrelation(train_params, figname='autocorrelation'):
train_params_loc = train_params.copy()
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
# val_loss = params['history']['losses']['val']
# val_losses[i0, i1] = val_loss
# val_acc = params['history']['accuracies']['val']
# val_accs[i0, i1] = val_acc
train_samples_per_epoch = len(class_datasets['train'])
class_datasets['train'].max_samples = 10
torch.manual_seed(params['model_seed'])
X = class_datasets['train'][:][0]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif train_params_loc['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# X = utils.extend_input(X, 10)
loader.load_model_from_epoch_and_dir(model, run_dir, -1)
hid = []
hid += model.get_post_activations(X)[:-1]
# auto_corr_mean = []
# auto_corr_var = []
auto_corr_table = pd.DataFrame(columns=['t_next', 'autocorr'])
h = hid[0]
for i0 in range(len(hid)):
h_next = hid[i0]
overlap = torch.sum(h*h_next, dim=1)
norms_h = torch.sqrt(torch.sum(h**2, dim=1))
norms_h_next = torch.sqrt(torch.sum(h_next**2, dim=1))
corrs = overlap/(norms_h*norms_h_next)
avg_corr = torch.mean(corrs)
d = {'t_next': i0, 'autocorr': corrs}
auto_corr_table = auto_corr_table.append(pd.DataFrame(d),
ignore_index=True)
fig, ax = make_fig(figsize)
sns.lineplot(ax=ax, x='t_next', y='autocorr', data=auto_corr_table)
out_fig(fig, figname)
def snapshots_through_time(train_params, figname="snap", subdir="snaps"):
"""
Plot PCA snapshots of the representation through time.
Parameters
----------
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training.
"""
subdir = Path(subdir)
X_dim = train_params['X_dim']
FEEDFORWARD = train_params['network'] == 'feedforward'
num_pnts_dim_red = 800
num_plot = 600
train_params_loc = copy.deepcopy(train_params)
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(train_params_loc['model_seed'])
X, Y = class_datasets['train'][:]
if FEEDFORWARD:
T = 10
y = Y
X0 = X
else:
T = 30
# T = 100
X = utils.extend_input(X, T + 2)
X0 = X[:, 0]
y = Y[:, -1]
loader.load_model_from_epoch_and_dir(model, run_dir, 0, 0)
hid_0 = [X0]
hid_0 += model.get_post_activations(X)[:-1]
loader.load_model_from_epoch_and_dir(model, run_dir,
train_params_loc['num_epochs'], 0)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if FEEDFORWARD:
r = model.layer_weights[-1].detach().clone().T
else:
r = model.Wout.detach().clone()
# r0_n = r[0] / torch.norm(r[0])
# r1_n = r[1] / torch.norm(r[1])
#
# r0_n_v = r0_n.reshape(r0_n.shape[0], 1)
# r1_n_v = r1_n.reshape(r1_n.shape[0], 1)
# r0_orth = torch.eye(len(r0_n)) - r0_n_v @ r0_n_v.T
# r1_orth = torch.eye(len(r1_n)) - r1_n_v @ r1_n_v.T
# h = hid[10]
# # h_proj = h @ r_orth
# u, s, v = torch.svd(h)
# v0 = v[:, 0]
# def orth_projector(v):
# n = len(v)
# return (torch.eye(n) - v.reshape(n, 1)@v.reshape(1, n))/(v@v)
# v0_orth = (torch.eye(n) - v0.reshape(n,1)@v0.reshape(1,n))/(v0@v0)
# h_v0_orth = h @ v0_orth
# r0_e_p = orth_projector(r0_e)
# r1_e_p = orth_projector(r1_e)
# h_r0_e_p0 = h[y] @ r0_e_p
# h_r0_e_p1 = h[y] @ r1_e_p
coloring = get_color(y, cmap_activation_pnts)[:num_plot]
edge_coloring = get_color(y, cmap_activation_pnts_edge)[:num_plot]
## Now get principal components (pcs) and align them from time point to
# time point
pcs = []
p_track = 0
norm = np.linalg.norm
projs = []
for i1 in range(1, len(hid)):
# pc = utils.get_pcs_covariance(hid[i1], [0, 1])
out = utils.get_pcs_covariance(hid[i1], [0, 1], return_extra=True)
pc = out['pca_projection']
mu = out['mean']
proj = out['pca_projectors']
mu_proj = mu@proj[:, :2]
if i1 > 0:
# Check for the best alignment
pc_flip_x = pc.clone()
pc_flip_x[:, 0] = -pc_flip_x[:, 0]
pc_flip_y = pc.clone()
pc_flip_y[:, 1] = -pc_flip_y[:, 1]
pc_flip_both = pc.clone()
pc_flip_both[:, 0] = -pc_flip_both[:, 0]
pc_flip_both[:, 1] = -pc_flip_both[:, 1]
difference0 = norm(p_track - pc)
difference1 = norm(p_track - pc_flip_x)
difference2 = norm(p_track - pc_flip_y)
difference3 = norm(p_track - pc_flip_both)
amin = np.argmin(
[difference0, difference1, difference2, difference3])
if amin == 1:
pc[:, 0] = -pc[:, 0]
proj[:, 0] = -proj[:, 0]
elif amin == 2:
pc[:, 1] = -pc[:, 1]
proj[:, 1] = -proj[:, 1]
elif amin == 3:
pc[:, 0] = -pc[:, 0]
pc[:, 1] = -pc[:, 1]
proj[:, 0] = -proj[:, 0]
proj[:, 1] = -proj[:, 1]
pc = pc + mu_proj
p_track = pc.clone()
pcs.append(pc[:num_plot])
projs.append(proj)
def take_snap(i0, scats, fig, dim=2, border=False):
# ax = fig.axes[0]
hid_pcs_plot = pcs[i0][:, :dim].numpy()
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
xc = (xm + xM)/2
yc = (ym + yM)/2
hid_pcs_plot[:, 0] = hid_pcs_plot[:, 0] - xc
hid_pcs_plot[:, 1] = hid_pcs_plot[:, 1] - yc
v = projs[i0]
# u, s, v = torch.svd(h)
if r.shape[0] == 2:
r0_p = r[0]@v
r1_p = r[1]@v
else:
r0_p = r.flatten()@v
r1_p = -r.flatten()@v
if class_style == 'shape':
scats[0][0].set_offsets(hid_pcs_plot)
else:
if dim == 3:
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
else:
scats[0].set_offsets(hid_pcs_plot)
scats[1].set_offsets(r0_p[:2].reshape(1, 2))
scats[2].set_offsets(r1_p[:2].reshape(1, 2))
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
max_extent = max(xM - xm, yM - ym)
max_extent_arg = xM - xm > yM - ym
if dim == 2:
x_factor = .4
if max_extent_arg:
ax.set_xlim(
[xm - x_factor*max_extent, xM + x_factor*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim(
[ym - x_factor*max_extent, yM + x_factor*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
else:
if max_extent_arg:
ax.set_xlim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_zlim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_zlim([ym - .1*max_extent, yM + .1*max_extent])
# ax.plot([r0_p[0]], [r0_p[1]], 'x', markersize=3, color='black')
# ax.plot([r1_p[0]], [r1_p[1]], 'x', markersize=3, color='black')
ax.set_ylim([-4, 4])
if dim == 3:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
else:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
return scats,
dim = 2
hid_pcs_plot = pcs[0]
if dim == 3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim([-10, 10])
ax.set_ylim([-10, 10])
ax.set_zlim([-10, 10])
else:
fig, ax = make_fig()
ax.grid(False)
scat1 = ax.scatter(*hid_pcs_plot[:num_plot, :dim].T, c=coloring,
edgecolors=edge_coloring, s=10, linewidths=.65)
ax.plot([0], [0], 'x', markersize=7)
scat2 = ax.scatter([0], [0], marker='x', s=3, c='black')
scat3 = ax.scatter([0], [0], marker='x', s=3, color='black')
scats = [scat1, scat2, scat3]
# ax.plot([0], [0], 'o', markersize=10)
if FEEDFORWARD:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
else:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 21, 26,
31]) # snap_idx = list(range(T + 1))
for i0 in snap_idx:
take_snap(i0, scats, fig, dim=dim, border=False)
print
def _cluster_holdout_test_acc_stat_fun(h, y, clust_identity,
classifier_type='logistic_regression',
num_repeats=5, train_ratio=0.8, seed=11):
np.random.seed(seed)
num_clusts = np.max(clust_identity) + 1
num_clusts_train = int(round(num_clusts*train_ratio))
num_samples = h.shape[0]
test_accs = np.zeros(num_repeats)
train_accs = np.zeros(num_repeats)
for i0 in range(num_repeats):
permutation = np.random.permutation(np.arange(len(clust_identity)))
perm_inv = np.argsort(permutation)
clust_identity_shuffled = clust_identity[permutation]
train_idx = clust_identity_shuffled <= num_clusts_train
test_idx = clust_identity_shuffled > num_clusts_train
hid_train = h[train_idx[perm_inv]]
y_train = y[train_idx[perm_inv]]
y_test = y[test_idx[perm_inv]]
hid_test = h[test_idx[perm_inv]]
if classifier_type == 'svm':
classifier = svm.LinearSVC(random_state=3*i0 + 1)
else:
classifier = linear_model.LogisticRegression(random_state=3*i0 + 1,
solver='lbfgs')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classifier.fit(hid_train, y_train)
train_accs[i0] = classifier.score(hid_train, y_train)
test_accs[i0] = classifier.score(hid_test, y_test)
return train_accs, test_accs
def clust_holdout_over_layers(seeds, gs, train_params,
figname="clust_holdout_over_layers"):
"""
Logistic regression training and testing error on the representation
through the layers. Compares networks trained
with different choices of g_radius (specified by input parameter gs).
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
"""
if not hasattr(gs, '__len__'):
gs = [gs]
layer_label = 'layer'
@memory.cache
def generate_data_table_clust(seeds, gs, train_params):
layer_label = 'layer'
clust_acc_table = pd.DataFrame(
columns=['seed', 'g_radius', 'training', layer_label, 'LR training',
'LR testing'])
train_params_loc = copy.deepcopy(train_params)
for i0, seed in enumerate(seeds):
for i1, g in enumerate(gs):
train_params_loc['g_radius'] = g
train_params_loc['model_seed'] = seed
num_pnts_dim_red = 500
model, params, run_dir = train.initialize_and_train(
**train_params_loc)
class_datasets = params['datasets']
num_train_samples = len(class_datasets['train'])
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(params['model_seed'])
X, Y = class_datasets['train'][:]
if train_params_loc['network'] == 'feedforward':
X0 = X
else:
X0 = X[:, 0]
for epoch, epoch_label in zip([0, -1], ['before', 'after']):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if len(Y.shape) > 1:
Y = Y[:, -1]
cluster_identity = class_datasets['train'].cluster_identity
ds = []
for lay, h in enumerate(hid):
stat = _cluster_holdout_test_acc_stat_fun(h.numpy(),
Y.numpy(),
cluster_identity)
ds.extend([{
'seed': seed, 'g_radius': g,
'training': epoch_label, layer_label: lay,
'LR training': stat[0][k], 'LR testing': stat[1][k]
} for k in range(len(stat[0]))])
clust_acc_table = clust_acc_table.append(pd.DataFrame(ds),
ignore_index=True)
clust_acc_table['seed'] = clust_acc_table['seed'].astype('category')
clust_acc_table['g_radius'] = clust_acc_table['g_radius'].astype(
'category')
clust_acc_table['training'] = clust_acc_table['training'].astype(
'category')
return clust_acc_table
clust_acc_table = generate_data_table_clust(seeds, gs, train_params)
layers = set(clust_acc_table[layer_label])
for stage in ['LR training', 'LR testing']:
if stage == 'LR training':
clust_acc_table_stage = clust_acc_table.drop(columns=['LR testing'])
else:
clust_acc_table_stage = clust_acc_table.drop(
columns=['LR training'])
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator=est_acc,
ci=ci_acc, style='training',
style_order=['after', 'before'], hue='g_radius')
else:
g1 = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator=None,
units='seed', style='training',
style_order=['after', 'before'], hue='g_radius',
alpha=0.6)
g2 = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator='mean',
ci=None, style='training',
style_order=['after', 'before'], hue='g_radius')
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
ax.set_ylim([-.01, 1.01])
ax.set_xticks(range(len(layers)))
out_fig(fig, figname + '_' + stage, subfolder=train_params[
'network'] +
'/clust_holdout_over_layers/',
show=False, save=True, axis_type=0, name_order=0,
data=clust_acc_table)
plt.close('all')
def get_stats(stat_fun, train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None, *args, **kwargs):
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
style_bool = train_params_list_style is not None
if style_bool and style_key is None:
raise ValueError("Please specify a style_key.")
hue_bool = len(train_params_list_hue) > 1
if hue_bool and hue_key is None:
raise ValueError("Please specify a hue_key.")
if seeds is None:
seeds = [train_params_list_hue[0]['model_seed']]
params_cat = [[], []]
params_cat[0] = train_params_list_hue
if style_bool:
params_cat[1] = train_params_list_style
else:
params_cat[1] = [None]
table = pd.DataFrame()
if hue_bool:
table.reindex(columns=table.columns.tolist() + [hue_key])
if style_bool:
table.reindex(columns=table.columns.tolist() + [style_key])
for i0 in range(len(params_cat)): # hue params
for i1 in range(len(params_cat[i0])):
params = params_cat[i0][i1]
table_piece = stat_fun(params, hue_key, style_key, seeds,
*args, **kwargs)
table = table.append(table_piece, ignore_index=True)
if hue_key is not None:
table[hue_key] = table[hue_key].astype('category')
if style_key is not None:
table[style_key] = table[style_key].astype('category')
return table
def dim_through_training(train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None, figname='',
subdir=None, multiprocess_lock=None):
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + '/' + 'dim_over_training' + '/'
@memory.cache
def compute_dim_through_training(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
class_datasets = returned_params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
for i_epoch, epoch in enumerate(epochs):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
try:
dim = utils.get_effdim(hid[-1],
preserve_gradients=False).item()
except RuntimeError:
print("Dim computation didn't converge.")
dim = np.nan
num_updates = int(
params['num_train_samples_per_epoch']/params[
'batch_size'])*epoch
d = {
'effective_dimension': dim, 'seed': seed,
'epoch_index': i_epoch, 'epoch': epoch,
'num_updates': num_updates
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d, index=[0]),
ignore_index=True)
return table_piece
table = get_stats(compute_dim_through_training, train_params_list_hue,
train_params_list_style, seeds, hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator=est_dim, ci=ci_dim, hue=hue_key,
style=style_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator=None, units='seed', hue=hue_key,
style=style_key, alpha=.6)
g2 = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator='mean', ci=None, hue=hue_key,
style=style_key)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
ax.set_ylim([0, None])
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def dim_over_layers(train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None,
figname="dim_over_layers", subdir=None, T=0,
multiprocess_lock=None, use_error_bars=None, **plot_kwargs):
"""
Effective dimension measured over layers (or timepoints if looking at an
RNN) of the network, before and after
training.
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
T : int
Final timepoint to plot (if looking at an RNN). If 0, disregard this
parameter.
"""
if subdir is None:
subdir = train_params_list_hue[0]['network'] + '/dim_over_layers/'
if use_error_bars is None:
use_error_bars = USE_ERRORBARS
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_dim_over_layers(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
class_datasets = returned_params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 15
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# epochs, saves = loader.get_epochs_and_saves(run_dir)
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir,
params['num_epochs'])
hid = [X0]
hid += model.get_post_activations(X)[:-1]
dims = []
for h in hid:
try:
dims.append(utils.get_effdim(h,
preserve_gradients=False).item())
except RuntimeError:
dims.append(np.nan)
d = {
'effective_dimension': dims,
'layer': list(range(len(dims))), 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d),
ignore_index=True)
return table_piece
table = get_stats(compute_dim_over_layers, train_params_list_hue,
train_params_list_style, seeds, hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
# breakpoint()
# print(table)
fig, ax = make_fig((1.5, 1.2))
# table['g_radius'] = table['g_radius'].astype('float64')
# norm = plt.Normalize(table['g_radius'].min(), table['g_radius'].max())
# sm = plt.cm.ScalarMappable(cmap="viridis", norm=norm)
# sm.set_array([])
# try:
if use_error_bars:
g = sns.lineplot(ax=ax, x='layer', y='effective_dimension',
data=table, estimator=est_dim, ci=ci_dim,
style=style_key, hue=hue_key, **plot_kwargs)
# g.figure.colorbar(sm)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='layer', y='effective_dimension',
data=table, estimator=None, units='seed',
style=style_key, hue=hue_key, alpha=0.6,
**plot_kwargs)
g2 = sns.lineplot(ax=ax, x='layer', y='effective_dimension',
data=table, estimator='mean', ci=None,
style=style_key, hue=hue_key, **plot_kwargs)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
# except FitDataError:
# print("Plotting data invalid.")
layers = set(table['layer'])
if len(layers) < 12:
ax.set_xticks(range(len(layers)))
else:
ax.xaxis.set_major_locator(plt.MaxNLocator(
integer=True)) # ax.xaxis.set_major_locator(plt.MaxNLocator(10))
# ax.set_ylim([0, None])
# ax.set_ylim([0, 15])
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def orth_compression_through_layers(train_params_list_hue,
train_params_list_style=None, seeds=None,
hue_key=None, style_key=None,
figname="orth_compression_through_layers",
subdir=None, multiprocess_lock=None,
**plot_kwargs):
"""
"""
# if train_params_list_hue[0]['loss'] != 'mse_scalar':
# raise ValueError("Expected scalar mse loss.")
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + '/orth_compression_through_layers/'
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_orth_compression_through_layers(params, hue_key, style_key,
seeds):
num_pnts = 500
# num_dims = 2
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
num_dims = int(returned_params['X_dim'])
class_datasets = returned_params['datasets']
def pca(v):
out = utils.get_pcs(v, list(range(num_dims)),
return_extra=True)
h_pcs = out['pca_projection']
v = out['pca_projectors'][:, :num_dims]
return h_pcs, v
class_datasets['train'].max_samples = num_pnts
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 0
# T = 20
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
r0s = []
r1s = []
for save in saves[-2][:]:
loader.load_model_from_epoch_and_dir(model, run_dir,
epochs[-1], save)
if params['network'] == 'feedforward':
r = model.layer_weights[-1].detach().clone().T
else:
r = model.Wout.detach().clone()
r0s.append(r[0].double())
if params['loss'] != 'mse_scalar':
r1s.append(r[1].double())
r0 = torch.mean(torch.stack(r0s), dim=0)
if params['loss'] != 'mse_scalar':
r1 = torch.mean(torch.stack(r1s), dim=0)
if params['network'] == 'feedforward':
y = Y.flatten()
else:
y = Y[:, -1]
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir, 0)
hid0 = [X0]
hid0 += model.get_post_activations(X)[:-1]
loader.load_model_from_epoch_and_dir(model, run_dir,
params['num_epochs'])
hid = [X0]
hid += model.get_post_activations(X)[:-1]
rs = []
avg_ratios = []
for i0, (h, h0) in enumerate(zip(hid, hid0)):
h = h.double()
h_pcs, v = pca(h)
h0 = h0.double()
h0_pcs, v0 = pca(h0)
if params['loss'] == 'mse_scalar':
h_proj = h_pcs@orth_proj(r0@v).T
h0_proj = h0_pcs@orth_proj(r0@v0).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio = torch.mean(ratios).item()
else:
h_proj = h_pcs[y == 0]@orth_proj(
r0@v).T # todo: maybe need to use yh (net
# prediction)
h0_proj = h0_pcs[y == 0]@orth_proj(r0@v0).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio1 = torch.mean(ratios).item()
h_proj = h_pcs[y == 1]@orth_proj(r1@v).T
h0_proj = h0_pcs[y == 1]@orth_proj(r1@v).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio2 = torch.mean(ratios).item()
avg_ratio = (avg_ratio1 + avg_ratio2)/2
avg_ratios.append(avg_ratio)
# u, s, v = torch.svd(h)
# proj_mags = [(h @ r_orth.T)]
# def get_shrink(r, h, h0):
d = {
'projections_magnitude': avg_ratios,
'layer': list(range(len(avg_ratios))), 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d),
ignore_index=True)
return table_piece
table = get_stats(compute_orth_compression_through_layers,
train_params_list_hue, train_params_list_style, seeds,
hue_key, style_key)
print(table)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
fig, ax = make_fig((1.5, 1.2))
try:
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='layer', y='projections_magnitude',
data=table, estimator='mean', ci=68,
style=style_key, hue=hue_key)
else:
g = sns.lineplot(ax=ax, x='layer', y='projections_magnitude',
data=table, estimator=None, units='seed',
style=style_key, hue=hue_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
except FitDataError:
print("Invalid data.")
layers = set(table['layer'])
if len(layers) < 12:
ax.set_xticks(range(len(layers)))
else:
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.set_ylim([-.05, None])
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def orth_compression_through_training(train_params_list_hue,
train_params_list_style=None, seeds=None,
hue_key=None, style_key=None,
figname="orth_compression_through_training",
subdir=None, multiprocess_lock=None,
**plot_kwargs):
"""
"""
# if train_params_list_hue[0]['loss'] != 'mse_scalar':
# raise ValueError("Expected scalar mse loss.")
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + '/orth_compression_through_training/'
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_orth_compression_through_training(params, hue_key, style_key,
seeds):
num_pnts = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
num_dims = int(returned_params['X_dim'])
class_datasets = returned_params['datasets']
def pca(v):
out = utils.get_pcs(v, list(range(num_dims)),
return_extra=True)
h_pcs = out['pca_projection']
v = out['pca_projectors'][:, :num_dims]
return h_pcs, v
class_datasets['train'].max_samples = num_pnts
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
if params['network'] == 'feedforward':
y = Y
else:
y = Y[:, -1]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# epochs, saves = loader.get_epochs_and_saves(run_dir)
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir, 0)
# hid0 = [X0]
h0 = model.get_post_activations(X)[:-1][-1].double()
h0_pcs, v0 = pca(h0)
# avg_ratios = []
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
# saves = saves[params['num_epochs']-1]
for epoch_idx, epoch in enumerate(epochs):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
h = model.get_post_activations(X)[:-1][-1].double()
r0s = []
r1s = []
for save in saves[-2][:]:
loader.load_model_from_epoch_and_dir(model, run_dir,
epoch, save)
if params['network'] == 'feedforward':
r = model.layer_weights[
-1].detach().clone().double().T
else:
r = model.Wout.detach().double().clone()
r0s.append(r[0].double())
if params['loss'] != 'mse_scalar':
r1s.append(r[1].double())
r0 = torch.mean(torch.stack(r0s), dim=0)
if params['loss'] != 'mse_scalar':
r1 = torch.mean(torch.stack(r1s), dim=0)
h_pcs, v = pca(h)
if params['loss'] == 'mse_scalar':
h_proj = h_pcs@orth_proj(r0@v).T
h0_proj = h0_pcs@orth_proj(r0@v0).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio = torch.mean(ratios).item()
else:
h_proj = h_pcs[y == 0]@orth_proj(
r0@v).T # todo: maybe need to use yh (net
# prediction)
h0_proj = h0_pcs[y == 0]@orth_proj(r0@v0).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio1 = torch.mean(ratios).item()
h_proj = h_pcs[y == 1]@orth_proj(r1@v).T
h0_proj = h0_pcs[y == 1]@orth_proj(r1@v).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio2 = torch.mean(ratios).item()
avg_ratio = (avg_ratio1 + avg_ratio2)/2
d = {
'projections_magnitude': avg_ratio, 'epoch': epoch,
'epoch_idx': epoch_idx, 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d, index=[0]),
ignore_index=True)
return table_piece
table = get_stats(compute_orth_compression_through_training,
train_params_list_hue, train_params_list_style, seeds,
hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
# print(table)
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator='mean', ci=68, style=style_key,
hue=hue_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator=None, units='seed',
style=style_key, hue=hue_key, alpha=0.6)
g2 = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator='mean', ci=None,
style=style_key, hue=hue_key)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
ax.set_ylim([-0.05, None])
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def orth_compression_through_training_input_sep(train_params_list_hue,
train_params_list_style=None,
seeds=None, hue_key=None,
style_key=None,
figname="orth_compression_through_training_input_sep",
subdir=None,
multiprocess_lock=None,
**plot_kwargs):
"""
"""
# if train_params_list_hue[0]['loss'] != 'mse_scalar':
# raise ValueError("Expected scalar mse loss.")
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + \
'/orth_compression_through_training_input_sep/'
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_orth_compression_through_training_input_sep(params, hue_key,
style_key, seeds):
num_pnts = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
num_dims = int(returned_params['X_dim'])
class_datasets = returned_params['datasets']
def pca(v):
out = utils.get_pcs(v, list(range(num_dims)),
return_extra=True)
h_pcs = out['pca_projection']
v = out['pca_projectors'][:, :num_dims]
return h_pcs, v
class_datasets['train'].max_samples = num_pnts
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
if params['network'] == 'feedforward':
y = Y
else:
y = Y[:, -1]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# epochs, saves = loader.get_epochs_and_saves(run_dir)
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir, 0)
# hid0 = [X0]
h0 = model.get_post_activations(X)[:-1][-1].double()
h0_pcs, v0 = pca(h0)
# avg_ratios = []
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
# saves = saves[params['num_epochs']-1]
for epoch_idx, epoch in enumerate(epochs):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
h = model.get_post_activations(X)[:-1][-1].double()
# h_pcs, v = pca(h)
# class_diff = torch.mean(h_pcs[y == 0], dim=0) -
# torch.mean(
# h_pcs[y == 1], dim=0)
class_diff = torch.mean(h[y == 0], dim=0) - torch.mean(
h[y == 1], dim=0)
h_proj = h@orth_proj(class_diff).T
h0_proj = h0@orth_proj(class_diff).T
h_norms = torch.norm(h_proj, dim=1)
h0_norms = torch.norm(h0_proj, dim=1)
ratios = h_norms/h0_norms
avg_ratio = torch.mean(ratios).item()
# if params['loss'] == 'mse_scalar':
# # h_proj = h_pcs@orth_proj(class_diff@v).T
# # h0_proj = h0_pcs@orth_proj(class_diff@v).T
#
# else:
# h_proj = h_pcs[y == 0]@orth_proj(
# r0@v).T # todo: maybe need to use yh (net
# # prediction. Doesn't matter if net is perfectly )
# h0_proj = h0_pcs[y == 0]@orth_proj(r0@v0).T
# h_norms = torch.norm(h_proj, dim=1)
# h0_norms = torch.norm(h0_proj, dim=1)
# ratios = h_norms/h0_norms
# avg_ratio1 = torch.mean(ratios).item()
# h_proj = h_pcs[y == 1]@orth_proj(r1@v).T
# h0_proj = h0_pcs[y == 1]@orth_proj(r1@v).T
# h_norms = torch.norm(h_proj, dim=1)
# h0_norms = torch.norm(h0_proj, dim=1)
# ratios = h_norms/h0_norms
# avg_ratio2 = torch.mean(ratios).item()
#
# avg_ratio = (avg_ratio1 + avg_ratio2)/2
d = {
'projections_magnitude': avg_ratio, 'epoch': epoch,
'epoch_idx': epoch_idx, 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d, index=[0]),
ignore_index=True)
return table_piece
table = get_stats(compute_orth_compression_through_training_input_sep,
train_params_list_hue, train_params_list_style, seeds,
hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
# print(table)
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator='mean', ci=68, style=style_key,
hue=hue_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator=None, units='seed',
style=style_key, hue=hue_key, alpha=0.6,
**plot_kwargs)
g2 = sns.lineplot(ax=ax, x='epoch_idx', y='projections_magnitude',
data=table, estimator='mean', ci=None,
style=style_key, hue=hue_key, **plot_kwargs)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
ax.set_ylim([-0.05, None])
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def clust_holdout_over_layers(train_params_list_hue,
train_params_list_style=None,
seeds=None, hue_key=None, style_key=None,
figname="dim_over_layers", subdir=None, T=0,
multiprocess_lock=None, use_error_bars=None,
**plot_kwargs):
"""
Effective dimension measured over layers (or timepoints if looking at an
RNN) of the network, before and after
training.
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
T : int
Final timepoint to plot (if looking at an RNN). If 0, disregard this
parameter.
"""
if subdir is None:
subdir = train_params_list_hue[0]['network'] + '/dim_over_layers/'
if use_error_bars is None:
use_error_bars = USE_ERRORBARS
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_clust_holdout_over_layers(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = | pd.DataFrame() | pandas.DataFrame |
"""
Map words into vectors using different algorithms such as TF-IDF, word2vec or GloVe.
"""
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, NMF
from sklearn.cluster import KMeans, DBSCAN, MeanShift
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize as sklearn_normalize
from scipy.sparse import coo_matrix
from typing import Optional, Union, Any
from texthero import preprocessing
import logging
import warnings
# from texthero import pandas_ as pd_
"""
Helper
"""
def flatten(
s: Union[pd.Series, pd.Series.sparse],
index: pd.Index = None,
fill_missing_with: Any = 0.0,
) -> pd.Series:
"""
Transform a Pandas Representation Series to a "normal" (flattened) Pandas Series.
The given Series should have a multiindex with first level being the document
and second level being individual features of that document (e.g. tdidf scores per word).
The flattened Series has one cell per document, with the cell being a list of all
the individual features of that document.
Parameters
----------
s : Sparse Pandas Series or Pandas Series
The multiindexed Pandas Series to flatten.
index : Pandas Index, optional, default to None
The index the flattened Series should have.
fill_missing_with : Any, default to 0.0
Value to fill the NaNs (missing values) with. This _does not_ mean
that existing values that are np.nan are replaced, but rather that
features that are not present in one document but present in others
are filled with fill_missing_with. See example below.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> import numpy as np
>>> index = pd.MultiIndex.from_tuples([("doc0", "Word1"), ("doc0", "Word3"), ("doc1", "Word2")], names=['document', 'word'])
>>> s = pd.Series([3, np.nan, 4], index=index)
>>> s
document word
doc0 Word1 3.0
Word3 NaN
doc1 Word2 4.0
dtype: float64
>>> hero.flatten(s, fill_missing_with=0.0)
document
doc0 [3.0, 0.0, nan]
doc1 [0.0, 4.0, 0.0]
dtype: object
"""
s = s.unstack(fill_value=fill_missing_with)
if index is not None:
s = s.reindex(index, fill_value=fill_missing_with)
# Reindexing makes the documents for which no values
# are present in the Sparse Representation Series
# "reappear" correctly.
s = pd.Series(s.values.tolist(), index=s.index)
return s
def _check_is_valid_representation(s: pd.Series) -> bool:
"""
Check if the given Pandas Series is a Document Representation Series.
Returns true if Series is Document Representation Series, else False.
"""
# TODO: in Version 2 when only representation is accepted as input -> change "return False" to "raise ValueError"
if not isinstance(s.index, pd.MultiIndex):
return False
# raise ValueError(
# f"The input Pandas Series should be a Representation Pandas Series and should have a MultiIndex. The given Pandas Series does not appears to have MultiIndex"
# )
if s.index.nlevels != 2:
return False
# raise ValueError(
# f"The input Pandas Series should be a Representation Pandas Series and should have a MultiIndex, where the first level represent the document and the second one the words/token. The given Pandas Series has {s.index.nlevels} number of levels instead of 2."
# )
return True
# Warning message for not-tokenized inputs
_not_tokenized_warning_message = (
"It seems like the given Pandas Series s is not tokenized. This function will"
" tokenize it automatically using hero.tokenize(s) first. You should consider"
" tokenizing it yourself first with hero.tokenize(s) in the future."
)
"""
Vectorization
"""
def count(
s: pd.Series,
max_features: Optional[int] = None,
min_df=1,
max_df=1.0,
binary=False,
) -> pd.Series:
"""
Represent a text-based Pandas Series using count.
Return a Document Representation Series with the
number of occurences of a document's words for every
document.
TODO add tutorial link
The input Series should already be tokenized. If not, it will
be tokenized before count is calculated.
Use :meth:`hero.representation.flatten` on the output to get
a standard Pandas Series with the document vectors
in every cell.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default to None.
Maximum number of features to keep. Will keep all features if set to None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency (number of documents they appear in) strictly
lower than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
max_df : float in range [0.0, 1.0] or int, default=1.0
Ignore terms that have a document frequency (number of documents they appear in)
frequency strictly higher than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
binary : bool, default=False
If True, all non zero counts are set to 1.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Sentence one", "Sentence two"]).pipe(hero.tokenize)
>>> hero.count(s)
0 Sentence 1
one 1
1 Sentence 1
two 1
dtype: Sparse[int64, 0]
See Also
--------
Document Representation Series: TODO add tutorial link
"""
# TODO. Can be rewritten without sklearn.
# Check if input is tokenized. Else, print warning and tokenize.
if not isinstance(s.iloc[0], list):
warnings.warn(_not_tokenized_warning_message, DeprecationWarning)
s = preprocessing.tokenize(s)
tf = CountVectorizer(
max_features=max_features,
tokenizer=lambda x: x,
preprocessor=lambda x: x,
min_df=min_df,
max_df=max_df,
binary=binary,
)
tf_vectors_csr = tf.fit_transform(s)
tf_vectors_coo = coo_matrix(tf_vectors_csr)
s_out = pd.Series.sparse.from_coo(tf_vectors_coo)
features_names = tf.get_feature_names()
# Map word index to word name
s_out.index = s_out.index.map(lambda x: (s.index[x[0]], features_names[x[1]]))
return s_out
def term_frequency(
s: pd.Series, max_features: Optional[int] = None, min_df=1, max_df=1.0,
) -> pd.Series:
"""
Represent a text-based Pandas Series using term frequency.
Return a Document Representation Series with the
term frequencies of the terms for every
document.
TODO add tutorial link
The input Series should already be tokenized. If not, it will
be tokenized before term_frequency is calculated.
Use :meth:`hero.representation.flatten` on the output to get
a standard Pandas Series with the document vectors
in every cell.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default to None.
Maximum number of features to keep. Will keep all features if set to None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency (number of documents they appear in) strictly
lower than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
max_df : float in range [0.0, 1.0] or int, default=1.0
Ignore terms that have a document frequency (number of documents they appear in)
frequency strictly higher than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Sentence one hey", "Sentence two"]).pipe(hero.tokenize)
>>> hero.term_frequency(s)
0 Sentence 0.2
hey 0.2
one 0.2
1 Sentence 0.2
two 0.2
dtype: Sparse[float64, nan]
See Also
--------
Document Representation Series: TODO add tutorial link
"""
# Check if input is tokenized. Else, print warning and tokenize.
if not isinstance(s.iloc[0], list):
warnings.warn(_not_tokenized_warning_message, DeprecationWarning)
s = preprocessing.tokenize(s)
tf = CountVectorizer(
max_features=max_features,
tokenizer=lambda x: x,
preprocessor=lambda x: x,
min_df=min_df,
max_df=max_df,
)
tf_vectors_csr = tf.fit_transform(s)
tf_vectors_coo = coo_matrix(tf_vectors_csr)
total_count_coo = np.sum(tf_vectors_coo)
frequency_coo = np.divide(tf_vectors_coo, total_count_coo)
s_out = pd.Series.sparse.from_coo(frequency_coo)
features_names = tf.get_feature_names()
# Map word index to word name
s_out.index = s_out.index.map(lambda x: (s.index[x[0]], features_names[x[1]]))
return s_out
def tfidf(s: pd.Series, max_features=None, min_df=1, max_df=1.0,) -> pd.Series:
"""
Represent a text-based Pandas Series using TF-IDF.
*Term Frequency - Inverse Document Frequency (TF-IDF)* is a formula to
calculate the _relative importance_ of the words in a document, taking
into account the words' occurences in other documents. It consists of two parts:
The *term frequency (tf)* tells us how frequently a term is present in a document,
so tf(document d, term t) = number of times t appears in d.
The *inverse document frequency (idf)* measures how _important_ or _characteristic_
a term is among the whole corpus (i.e. among all documents).
Thus, idf(term t) = log((1 + number of documents) / (1 + number of documents where t is present)) + 1.
Finally, tf-idf(document d, term t) = tf(d, t) * idf(t).
Different from the `sklearn-implementation of
tfidf <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html>`,
this function does *not* normalize the output in any way,
so the result is exactly what you
get applying the formula described above.
Return a Document Representation Series with the
tfidf of every word in the document.
TODO add tutorial link
The input Series should already be tokenized. If not, it will
be tokenized before tfidf is calculated.
If working with big pandas Series, you might want to limit
the number of features through the max_features parameter.
Use :meth:`hero.representation.flatten` on the output to get
a standard Pandas Series with the document vectors
in every cell.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default to None.
If not None, only the max_features most frequent tokens are used.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency (number of documents they appear in) strictly
lower than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
max_df : float in range [0.0, 1.0] or int, default=1.0
Ignore terms that have a document frequency (number of documents they appear in)
frequency strictly higher than the given threshold.
This arguments basically permits to remove corpus-specific stop words.
If float, the parameter represents a proportion of documents, integer
absolute counts.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Hi Bye", "Test Bye Bye"]).pipe(hero.tokenize)
>>> hero.tfidf(s)
0 Bye 1.000000
Hi 1.405465
1 Bye 2.000000
Test 1.405465
dtype: Sparse[float64, nan]
See Also
--------
`TF-IDF on Wikipedia <https://en.wikipedia.org/wiki/Tf-idf>`_
Document Representation Series: TODO add tutorial link
"""
# Check if input is tokenized. Else, print warning and tokenize.
if not isinstance(s.iloc[0], list):
warnings.warn(_not_tokenized_warning_message, DeprecationWarning)
s = preprocessing.tokenize(s)
tfidf = TfidfVectorizer(
use_idf=True,
max_features=max_features,
min_df=min_df,
max_df=max_df,
tokenizer=lambda x: x,
preprocessor=lambda x: x,
norm=None, # Disable l1/l2 normalization.
)
tfidf_vectors_csr = tfidf.fit_transform(s)
# Result from sklearn is in Compressed Sparse Row format.
# Pandas Sparse Series can only be initialized from Coordinate format.
tfidf_vectors_coo = coo_matrix(tfidf_vectors_csr)
s_out = | pd.Series.sparse.from_coo(tfidf_vectors_coo) | pandas.Series.sparse.from_coo |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import numpy as np
import scipy.stats as st
import statsmodels.distributions.empirical_distribution as edis
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
his_temp_matrix = df_temp.values
# Import calender
calender=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Calender',header= None)
calender=calender.values
julian=calender[:,2]
###############################
# Synthetic HDD CDD calculation
# Simulation data
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0)
# Load temperature data only
cities = ['SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
sim_temperature=sim_weather[cities]
# Convert temperatures to Fahrenheit
sim_temperature= (sim_temperature*(9/5))+32
sim_temperature=sim_temperature.values
num_cities = len(cities)
num_sim_days = len(sim_temperature)
HDD_sim = np.zeros((num_sim_days,num_cities))
CDD_sim = np.zeros((num_sim_days,num_cities))
# calculate daily records of heating (HDD) and cooling (CDD) degree days
for i in range(0,num_sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-sim_temperature[i,j]))
CDD_sim[i,j] = np.max((0,sim_temperature[i,j] - 65))
# calculate annual totals of heating and cooling degree days for each city
annual_HDD_sim=np.zeros((int(len(HDD_sim)/365),num_cities))
annual_CDD_sim=np.zeros((int(len(CDD_sim)/365),num_cities))
for i in range(0,int(len(HDD_sim)/365)):
for j in range(0,num_cities):
annual_HDD_sim[i,j]=np.sum(HDD_sim[0+(i*365):365+(i*365),j])
annual_CDD_sim[i,j]=np.sum(CDD_sim[0+(i*365):365+(i*365),j])
########################################################################
#Calculate HDD and CDD for historical temperature data
num_cities = len(cities)
num_days = len(his_temp_matrix)
# daily records
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-his_temp_matrix[i,j+1]))
CDD[i,j] = np.max((0,his_temp_matrix[i,j+1] - 65))
# annual sums
annual_HDD=np.zeros((int(len(HDD)/365),num_cities))
annual_CDD=np.zeros((int(len(CDD)/365),num_cities))
for i in range(0,int(len(HDD)/365)):
for j in range(0,num_cities):
annual_HDD[i,j]=np.sum(HDD[0+(i*365):365+(i*365),j])
annual_CDD[i,j]=np.sum(CDD[0+(i*365):365+(i*365),j])
###########################################################################################
#This section is used for calculating total hydro
# Load relevant streamflow data (1953-2007)
BPA_streamflow=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Inflows',header=0)
Hoover_streamflow=pd.read_csv('Synthetic_streamflows/Hoover_hist_streamflow.csv',header=0)
CA_streamflow=pd.read_excel('Synthetic_streamflows/CA_hist_streamflow.xlsx',header=0)
Willamette_streamflow=pd.read_csv('Synthetic_streamflows/Willamette_hist_streamflow.csv',header=0)
# headings
name_Will=list(Willamette_streamflow.loc[:,'Albany':])
name_CA = list(CA_streamflow.loc[:,'ORO_fnf':])
name_BPA = list(BPA_streamflow.loc[:,'1M':])
# number of streamflow gages considered
num_BPA = len(name_BPA)
num_CA = len(name_CA)
num_Will = len(name_Will)
num_gages= num_BPA + num_CA + num_Will + 1
# Calculate historical totals for 1953-2007
years = range(1953,2008)
for y in years:
y_index = years.index(y)
BPA = BPA_streamflow.loc[BPA_streamflow['year'] ==y,'1M':]
CA = CA_streamflow.loc[CA_streamflow['year'] == y,'ORO_fnf':]
WB = Willamette_streamflow.loc[Willamette_streamflow['year'] == y,'Albany':]
HO = Hoover_streamflow.loc[Hoover_streamflow['year'] == y,'Discharge']
BPA_sums = np.reshape(np.sum(BPA,axis= 0).values,(1,num_BPA))
CA_sums = np.reshape(np.sum(CA,axis=0).values,(1,num_CA))
WB_sums = np.reshape(np.sum(WB,axis=0).values,(1,num_Will))
HO_sums = np.reshape(np.sum(HO,axis=0),(1,1))
# matrix of annual flows for each stream gage
joined = np.column_stack((BPA_sums,CA_sums,WB_sums,HO_sums))
if y_index < 1:
hist_totals = joined
else:
hist_totals = np.vstack((hist_totals,joined))
BPA_headers = np.reshape(list(BPA_streamflow.loc[:,'1M':]),(1,num_BPA))
CA_headers = np.reshape(list(CA_streamflow.loc[:,'ORO_fnf':]),(1,num_CA))
WB_headers = np.reshape(list(Willamette_streamflow.loc[:,'Albany':]),(1,num_Will))
HO_headers = np.reshape(['Hoover'],(1,1))
headers = np.column_stack((BPA_headers,CA_headers,WB_headers,HO_headers))
# annual streamflow totals for 1953-2007
df_hist_totals = pd.DataFrame(hist_totals)
df_hist_totals.columns = headers[0,:]
df_hist_totals.loc[38,'83L']=df_hist_totals.loc[36,'83L']
added_value=abs(np.min((df_hist_totals)))+5
log_hist_total=np.log(df_hist_totals+abs(added_value))
A=df_hist_totals.values
B=np.column_stack((A,annual_HDD,annual_CDD))
x,y=np.shape(B)
#data is the data matrix at all time step. The dimention would be X*Y
#data 2 is required if calculating disimilarity
#Step 1: Transform the data into emperical CDF
P=np.zeros((x,y))
for i in range(0,y):
ECDF=edis.ECDF(B[:,i])
P[:,i]=ECDF(B[:,i])
Y=2*(P-0.5)
new_cols = ['Name'] + ['type_' + str(i) for i in range(0,141)]
#remove constant zeros columns
need_to_remove=[1,17,22,24,27,32,34,36,37,38,44,107,108,109]
Y2=np.delete(Y,need_to_remove,axis=1)
Y[:,107]=1
mean=np.mean(Y,axis=0)
cov=np.cov(Y,rowvar=0)
runs=int(num_sim_days/365)*5
sim_years=int(num_sim_days/365)
N = np.random.multivariate_normal(mean,cov,runs)
T=(N/2)+0.5
T_all=np.zeros((runs,y))
for i in range(0,y):
for j in range(0,runs):
if T[j,i] <0:
T_all[j,i]=(np.percentile(B[:,i],q=0*100))*(1+T[j,i])
elif T[j,i] <=1 and T[j,i] >=0:
T_all[j,i]=np.percentile(B[:,i],q=T[j,i]*100)
else:
T_all[j,i]=(np.percentile(B[:,i],q=1*100))*T[j,i]
Sim_total=T_all[:,:112]
Sim_HDD_CDD=T_all[:,112:]
Sim_CDD=Sim_HDD_CDD[:,15:]
Sim_HDD=Sim_HDD_CDD[:,:15]
######################################
#sns.kdeplot(annual_CDD[:,0],label='His')
#sns.kdeplot(annual_CDD_sim[:,0],label='Syn')
#sns.kdeplot(Sim_HDD_CDD[:,15],label='Capula')
#plt.legend()
#
#sns.kdeplot(annual_HDD[:,0],label='His')
#sns.kdeplot(annual_HDD_sim[:,0],label='Syn')
#sns.kdeplot(Sim_HDD_CDD[:,0],label='Capula')
#plt.legend()
#########################################
HDD_CDD=np.column_stack((annual_HDD_sim,annual_CDD_sim))
year_list=np.zeros(int(num_sim_days/365))
Best_RMSE = 9999999999
CHECK=np.zeros((sim_years,runs))
for i in range(0,sim_years):
for j in range(0,runs):
RMSE = (np.sum(np.abs(HDD_CDD[i,:]-Sim_HDD_CDD[j,:])))
CHECK[i,j]=RMSE
if RMSE <= Best_RMSE:
year_list[i] = j
Best_RMSE=RMSE
else:
pass
Best_RMSE = 9999999999
sim_totals=np.zeros((sim_years,num_gages))
for i in range(0,sim_years):
sim_totals[i,:] = Sim_total[int(year_list[i]),:]
###################################################################################
#C_1=np.corrcoef(sim_totals,rowvar=0)
#C_his=np.corrcoef(A,rowvar=0)
#import seaborn as sns; sns.set()
#
#grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
#fig,ax=plt.subplots()
#plt.rcParams["font.weight"] = "bold"
#plt.rcParams["axes.labelweight"] = "bold"
#ax1=plt.subplot(121)
#sns.heatmap(C_1,vmin=0,vmax=1,cbar=False)
#plt.axis('off')
#ax.set_title('Syn')
#
#
#
#ax2=plt.subplot(122)
#cbar_ax = fig.add_axes([.92, .15, .03, .7]) # <-- Create a colorbar axes
#
#fig2=sns.heatmap(C_his,ax=ax2,cbar_ax=cbar_ax,vmin=0,vmax=1)
#cbar=ax2.collections[0].colorbar
#cbar.ax.tick_params(labelsize='large')
#
#fig2.axis('off')
#
#
#
##################################################################################
#plt.figure()
#sns.kdeplot(A[:,0],label='His')
#sns.kdeplot(sim_totals[:,0],label='Syn')
#sns.kdeplot(Sim_total[:,0],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,5],label='His')
#sns.kdeplot(sim_totals[:,5],label='Syn')
#sns.kdeplot(Sim_total[:,5],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,52],label='His')
#sns.kdeplot(sim_totals[:,52],label='Syn')
#sns.kdeplot(Sim_total[:,52],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,55],label='His')
#sns.kdeplot(sim_totals[:,55],label='Syn')
#sns.kdeplot(Sim_total[:,55],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,56],label='His')
#sns.kdeplot(sim_totals[:,56],label='Syn')
#sns.kdeplot(Sim_total[:,56],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,66],label='His')
#sns.kdeplot(sim_totals[:,66],label='Syn')
#sns.kdeplot(Sim_total[:,66],label='Capula')
#plt.legend()
##################################################################################
# impose logical constraints
mins = np.min(df_hist_totals.loc[:,:'Hoover'],axis=0)
for i in range(0,num_gages):
lower_bound = mins[i]
for j in range(0,sim_years):
if sim_totals[j,i] < lower_bound:
sim_totals[j,i] = lower_bound*np.random.uniform(0,1)
df_sim_totals = pd.DataFrame(sim_totals)
H = list(headers)
df_sim_totals.columns = H
#A1=[]
#A2=[]
#for h in H:
# a1=np.average(df_hist_totals.loc[:,h])
# a2=np.average(df_sim_totals.loc[:,h])
# A1.append(a1)
# A2.append(a2)
#
#plt.plot(A1)
#plt.plot(A2)
#####################################################################################
# This section selects daily fractions which are paired with
# annual totals to arrive at daily streamflows
# 4 cities are nearest to all 109 stream gage sites
Fraction_calculation_cities=['Spokane','Boise','Sacramento','Fresno']
# Each is weighted by average annual flow at nearby gage sites
Temperature_weights=pd.read_excel('Synthetic_streamflows/city_weights.xlsx',header=0)
# historical temperatures for those 4 cities
fraction_hist_temp=df_temp[Fraction_calculation_cities]
fraction_hist_temp_matrix=fraction_hist_temp.values
# calculate daily record of weighted temperatures across 4 cities
weighted_T=np.zeros(len(fraction_hist_temp_matrix))
for i in range(0,len(fraction_hist_temp_matrix)):
weighted_T[i]=fraction_hist_temp_matrix[i,0]*Temperature_weights['Spokane'] + fraction_hist_temp_matrix[i,1] * Temperature_weights['Boise'] + fraction_hist_temp_matrix[i,2] * Temperature_weights['Sacramento'] + fraction_hist_temp_matrix[i,3]*Temperature_weights['Fresno']
# synthetic temperatures for each of the cities
fcc = list(['SPOKANE_T','BOISE_T','SACRAMENTO_T','FRESNO_T'])
fraction_sim=sim_weather[fcc]
fraction_sim_matrix=fraction_sim.values
weighted_T_sim=np.zeros(len(fraction_sim_matrix))
# calculate synthetic weighted temperature (in Fahrenheit)
for i in range(0,len(fraction_sim_matrix)):
weighted_T_sim[i]=fraction_sim_matrix[i,0]*Temperature_weights['Spokane'] + fraction_sim_matrix[i,1] * Temperature_weights['Boise'] + fraction_sim_matrix[i,2] * Temperature_weights['Sacramento'] + fraction_sim_matrix[i,3]*Temperature_weights['Fresno']
weighted_T_sim=(weighted_T_sim * (9/5)) +32
#Sample synthetic fractions, then combine with totals
sim_years=int(len(fraction_sim_matrix)/365)
sim_T=np.zeros((365,sim_years))
hist_years=int(len(fraction_hist_temp)/365)
hist_T=np.zeros((365,hist_years))
# reshape historical and simulated weighted temperatures in new variables
for i in range(0,hist_years):
hist_T[:,i] = weighted_T[i*365:365+(i*365)]
for i in range(0,sim_years):
sim_T[:,i] = weighted_T_sim[i*365:365+(i*365)]
# aggregate weighted temperatures into monthly values
Normal_Starting=datetime(1900,1,1)
datelist=pd.date_range(Normal_Starting,periods=365)
count=0
m=np.zeros(365)
for i in range(0,365):
m[i]=int(datelist[count].month)
count= count +1
if count >364:
count=0
hist_T_monthly=np.column_stack((hist_T,m))
monthly_hist_T=np.zeros((12,hist_years))
for i in range(0,sim_years):
for j in range(1,13):
d1=hist_T_monthly[hist_T_monthly[:,hist_years]==j]
d2=d1[:,:hist_years]
monthly_hist_T[j-1,:]=np.sum(d2,axis=0)
Normal_Starting=datetime(1900,1,1)
datelist=pd.date_range(Normal_Starting,periods=365)
count=0
m=np.zeros(365)
for i in range(0,365):
m[i]=int(datelist[count].month)
count= count +1
if count >364:
count=0
sim_T_monthly=np.column_stack((sim_T,m))
monthly_sim_T=np.zeros((12,sim_years))
for i in range(0,sim_years):
for j in range(1,13):
d1=sim_T_monthly[sim_T_monthly[:,sim_years]==j]
d2=d1[:,:sim_years]
monthly_sim_T[j-1,:]=np.sum(d2,axis=0)
# select historical year with most similar spring and summer temperatures
# to new simulated years
year_list=np.zeros(sim_years)
Best_RMSE = 9999999999
CHECK=np.zeros((sim_years,hist_years))
for i in range(0,sim_years):
for j in range(0,hist_years):
RMSE = (np.sum(np.abs(monthly_sim_T[3:8,i]-monthly_hist_T[3:8,j])))
CHECK[i,j]=RMSE
if RMSE <= Best_RMSE:
year_list[i] = j
Best_RMSE=RMSE
else:
pass
Best_RMSE = 9999999999
################################################################################
#Generate streamflow
TDA=np.zeros((int(365*sim_years),2))
totals_hist=np.zeros((num_gages,hist_years))
fractions_hist=np.zeros((hist_years,365,num_gages))
totals_hist_hoover=np.zeros((1,hist_years))
output_BPA=np.zeros((sim_years*365,num_BPA))
output_Hoover=np.zeros((sim_years*365,1))
output_CA=np.zeros((sim_years*365,num_CA))
output_WI=np.zeros((sim_years*365,num_Will))
# historical daily flows
x_Hoover=Hoover_streamflow.loc[:,'Discharge'].values
x_BPA=BPA_streamflow.loc[:,'1M':].values
x_CA=CA_streamflow.loc[:,'ORO_fnf':].values
x_WI=Willamette_streamflow.loc[:,'Albany':'COT5A'].values
x=np.column_stack((x_BPA,x_CA,x_WI,x_Hoover))
x=np.reshape(x,(hist_years,365,num_gages))
# historical daily fractions
for i in range(0,hist_years):
for j in range(0,num_gages):
totals_hist[j,i] = np.sum(np.abs(x[i,:,j]))
if totals_hist[j,i] ==0:
fractions_hist[i,:,j]=0
else:
fractions_hist[i,:,j]= x[i,:,j]/totals_hist[j,i]
# sample simulated daily fractions
for i in range(0,sim_years):
for j in range(0,num_gages):
if j <=num_BPA-1:
output_BPA[(i*365):(i*365)+365,j]=fractions_hist[int(year_list[i]),:,j]*sim_totals[i,j]
elif j == num_gages-1:
output_Hoover[(i*365):(i*365)+365,0]=fractions_hist[int(year_list[i]),:,j]*sim_totals[i,j]
elif j>num_BPA-1 and j<=num_BPA+num_CA-1:
output_CA[(i*365):(i*365)+365,j-num_BPA]=fractions_hist[int(year_list[i]),:,j]*sim_totals[i,j]
else:
output_WI[(i*365):(i*365)+365,j-num_BPA-num_CA]=fractions_hist[int(year_list[i]),:,j]*sim_totals[i,j]
TDA[(i*365):(i*365)+365,0]=range(1,366)
# assign flows to the Dalles, OR
TDA[:,1]=output_BPA[:,47]
###############################################################################
# Output
np.savetxt('Synthetic_streamflows/synthetic_streamflows_FCRPS.csv',output_BPA,delimiter=',')
np.savetxt('Synthetic_streamflows/synthetic_streamflows_TDA.csv',TDA[:,1],delimiter=',')
np.savetxt('Synthetic_streamflows/synthetic_discharge_Hoover.csv',output_Hoover,delimiter=',')
CA=pd.DataFrame(output_CA,columns=name_CA)
CA.to_csv('Synthetic_streamflows/synthetic_streamflows_CA.csv')
Willamatte_Syn=pd.DataFrame(output_WI,columns=name_Will)
Willamatte_Syn.to_csv('Synthetic_streamflows/synthetic_streamflows_Willamette.csv')
#write CA synthetic flows to ORCA file
leap_cycles = int(sim_years//4)
r=np.shape(output_CA)
for i in range(0,leap_cycles):
if i < 1:
C = output_CA[0:1154,:]
B = np.empty((1,int(r[1])))
B[:] =np.nan
D = output_CA[i*1460+1154:i*1460+1154+1460]
F = np.vstack((C,B,D))
else:
D = output_CA[i*1460+1154:i*1460+1154+1460]
F = np.vstack((F,B,D))
df_leap = | pd.DataFrame(F,columns=name_CA) | pandas.DataFrame |
import slack
from flask import Response
import pandas as pd
import numpy as np
from numpy import nan
import re
import os
import networkx as nx
from pyvis.network import Network
from dotenv import load_dotenv, dotenv_values
from statsmodels.tsa.arima.model import ARIMA
# load environment variables
# config = dotenv_values(".env")
load_dotenv()
SLACK_TOKEN = os.getenv('SLACK_TOKEN')
# define slack client
client = slack.WebClient(token=SLACK_TOKEN)
# function to retrieve the display name of the user based on user id
def get_name(user_id):
try:
out = client.users_info(user=user_id)["user"]["profile"]["real_name"]
except:
out = None
return out
# function to get the channels that a user is active in
def get_user_channels(user_id):
return client.users_conversations(user=user_id)["channels"]
# send response message to user
def send_response_message(user_id):
# define message to be posted
message = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': (
":sparkles: Hey, check out our latest analysis of your team here: <https://network-analysis.azurewebsites.net|LINK> :sparkles:"
)
}
}
client.chat_postMessage(channel=user_id, blocks=[message])
# function used to retrieve network analysis data for a specific channel
def get_slack_data(user_id, text):
# define channel id
try:
channel_id = [channel["id"] for channel in get_user_channels(user_id) if channel["name"] == text][0]
except:
channel_id = "C01T6GGTBQD"
# get channel history
result = client.conversations_history(channel=channel_id, limit=1000)
# retrieve messages
conversation_history = result["messages"]
# create DataFrame
messages = pd.DataFrame(conversation_history)
# add channel id to df
messages["user_id"] = str(user_id)
# convert timestamp to datetime object
messages['date'] = pd.to_datetime(messages['ts'], unit="s").dt.date
messages['ts'] = pd.to_datetime(messages['ts'], unit="s")
# clean text column from quotation marks
messages["text"] = messages["text"].apply(lambda x: re.sub(r"\"", "", x))
# replace user ids with names of users
# messages["reply_users"] = messages["reply_users"].apply(get_name)
# messages["user"] = messages["user"].apply(get_name)
# select columns to save
messages = messages[["client_msg_id", "user_id", "reply_users", "user", "text", "date"]]
# def find_reaction_users(col):
# try:
# return col[0]["users"]
# except:
# return np.nan
# find user ids in the reactions column
#messages["reactions"] = messages["reactions"].apply(find_reaction_users)
# explode the reply_users column to get senders of replies
# messages = messages.explode("reply_users")
# explode the reactions column to get senders of reactions
#messages = messages.explode("reactions")
messages.dropna(inplace=True)
# convert reply users to string for database
messages["reply_users"] = messages["reply_users"].astype(str)
return messages
def time_series_analysis(df):
# df = df[df["reply_users"] != "nan"]
df['reply_users'] = | pd.eval(df['reply_users']) | pandas.eval |
"""Tests for _data_reading.py"""
import datetime
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import primap2
import primap2.pm2io as pm2io
import primap2.pm2io._conversion
from primap2.pm2io._data_reading import additional_coordinate_metadata
from .utils import assert_ds_aligned_equal
DATA_PATH = Path(__file__).parent / "data"
@pytest.mark.parametrize(
"unit, entity, expected_attrs",
[
("Mt", "CO2", {"units": "Mt", "entity": "CO2"}),
(
"Gg CO2",
"KYOTOGHG (AR4GWP100)",
{
"units": "Gg CO2",
"entity": "KYOTOGHG",
"gwp_context": "AR4GWP100",
},
),
(
"kg CO2",
"CH4 (SARGWP100)",
{
"units": "kg CO2",
"entity": "CH4",
"gwp_context": "SARGWP100",
},
),
],
)
def test_metadata_for_variable(unit, entity, expected_attrs):
assert (
pm2io._interchange_format.metadata_for_variable(unit, entity) == expected_attrs
)
def assert_attrs_equal(attrs_result, attrs_expected):
assert attrs_result.keys() == attrs_expected.keys()
assert attrs_result["attrs"] == attrs_expected["attrs"]
assert attrs_result["time_format"] == attrs_expected["time_format"]
assert attrs_result["dimensions"].keys() == attrs_expected["dimensions"].keys()
for entity in attrs_result["dimensions"]:
assert set(attrs_result["dimensions"][entity]) == set(
attrs_expected["dimensions"][entity]
)
@pytest.fixture
def coords_cols():
return {
"unit": "unit",
"entity": "gas",
"area": "country",
"category": "category",
"sec_cats__Class": "classification",
}
@pytest.fixture
def add_coords_cols():
return {"category_name": ["category_name", "category"]}
@pytest.fixture
def coords_defaults():
return {
"source": "TESTcsv2021",
"sec_cats__Type": "fugitive",
"scenario": "HISTORY",
}
@pytest.fixture
def coords_terminologies():
return {
"area": "ISO3",
"category": "IPCC2006",
"sec_cats__Type": "type",
"sec_cats__Class": "class",
"scenario": "general",
}
@pytest.fixture
def coords_value_mapping():
return {
"category": "PRIMAP1",
"entity": "PRIMAP1",
"unit": "PRIMAP1",
}
@pytest.fixture
def coords_value_filling():
return {
"category": { # col to fill
"category_name": { # col to fill from
"Energy": "1", # from value: to value
"IPPU": "2",
}
}
}
@pytest.fixture
def filter_keep():
return {
"f1": {"category": ["IPC0", "IPC2"]},
"f2": {"classification": "TOTAL"},
}
@pytest.fixture
def filter_remove():
return {"f1": {"gas": "CH4"}, "f2": {"country": ["USA", "FRA"]}}
class TestReadWideCSVFile:
def test_output(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
meta_data = {"references": "Just ask around."}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
meta_data=meta_data,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"references": "Just ask around.",
"sec_cats": ["Class (class)", "Type (type)"],
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"Type (type)",
"unit",
"scenario (general)",
"Class (class)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_no_sec_cats(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_add_coords(
self,
tmp_path,
coords_cols,
add_coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data_category_name.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats_cat_name.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
add_coords_cols=add_coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
| pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False) | pandas.testing.assert_frame_equal |
import numpy as np
from numpy import array_equal
import pandas as pd
from scipy import io
from sklearn.model_selection import StratifiedKFold
from fisher_score import fisher_score
from reliefF import reliefF
from random_forest import apply_RF
from SVM import apply_SVM_RFE
from Simple_cont import simple_MI
from Iterative_cont import iterative_MI
from itertools import cycle
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.stats import binom
from classify import predict
from sklearn.metrics import accuracy_score
# Load Data
dataset = 'colon.mat'
in_file = io.loadmat(dataset)
X = | pd.DataFrame(in_file['X'], dtype=float) | pandas.DataFrame |
import numpy as np
import pandas as pd
# Auxiliary functions
def get_dummies(data):
data = data.copy()
if isinstance(data, pd.Series):
data = pd.factorize(data)[0]
return data
for col in data.columns:
data.loc[:, col] = pd.factorize(data[col])[0]
return data
def learncats(data, classcol=None, continuous_ids=[]):
"""
Learns the number of categories in each variable and standardizes the data.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
classcol: int
The column index of the class variables (if any).
continuous_ids: list of ints
List containing the indices of known continuous variables. Useful for
discrete data like age, which is better modeled as continuous.
Returns
-------
ncat: numpy m
The number of categories of each variable. One if the variable is
continuous.
"""
data = data.copy()
ncat = np.ones(data.shape[1])
if not classcol:
classcol = data.shape[1]-1
for i in range(data.shape[1]):
if i != classcol and (i in continuous_ids or is_continuous(data[:, i])):
continue
else:
data[:, i] = data[:, i].astype(int)
ncat[i] = max(data[:, i]) + 1
return ncat
def get_stats(data, ncat=None):
"""
Compute univariate statistics for continuous variables.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
Returns
-------
data: numpy n x m
The normalized data.
maxv, minv: numpy m
The maximum and minimum values of each variable. One and zero, resp.
if the variable is categorical.
mean, std: numpy m
The mean and standard deviation of the variable. Zero and one, resp.
if the variable is categorical.
"""
data = data.copy()
maxv = np.ones(data.shape[1])
minv = np.zeros(data.shape[1])
mean = np.zeros(data.shape[1])
std = np.zeros(data.shape[1])
if ncat is not None:
for i in range(data.shape[1]):
if ncat[i] == 1:
maxv[i] = np.max(data[:, i])
minv[i] = np.min(data[:, i])
mean[i] = np.mean(data[:, i])
std[i] = np.std(data[:, i])
assert maxv[i] != minv[i], 'Cannot have constant continuous variable in the data'
data[:, i] = (data[:, i] - minv[i])/(maxv[i] - minv[i])
else:
for i in range(data.shape[1]):
if is_continuous(data[:, i]):
maxv[i] = np.max(data[:, i])
minv[i] = np.min(data[:, i])
mean[i] = np.mean(data[:, i])
std[i] = np.std(data[:, i])
assert maxv[i] != minv[i], 'Cannot have constant continuous variable in the data'
data[:, i] = (data[:, i] - minv[i])/(maxv[i] - minv[i])
return data, maxv, minv, mean, std
def normalize_data(data, maxv, minv):
"""
Normalizes the data given the maximum and minimum values of each variable.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
maxv, minv: numpy m
The maximum and minimum values of each variable. One and zero, resp.
if the variable is categorical.
Returns
-------
data: numpy n x m
The normalized data.
"""
data = data.copy()
for v in range(data.shape[1]):
if maxv[v] != minv[v]:
data[:, v] = (data[:, v] - minv[v])/(maxv[v] - minv[v])
return data
def standardize_data(data, mean, std):
"""
Standardizes the data given the mean and standard deviations values of
each variable.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
mean, std: numpy m
The mean and standard deviation of the variable. Zero and one, resp.
if the variable is categorical.
Returns
-------
data: numpy n x m
The standardized data.
"""
data = data.copy()
for v in range(data.shape[1]):
if std[v] > 0:
data[:, v] = (data[:, v] - mean[v])/(std[v])
# Clip values more than 6 standard deviations from the mean
data[:, v] = np.clip(data[:, v], -6, 6)
return data
def is_continuous(data):
"""
Returns true if data was sampled from a continuous variables, and false
Otherwise.
Parameters
----------
data: numpy
One dimensional array containing the values of one variable.
"""
observed = data[~np.isnan(data)] # not consider missing values for this.
rules = [np.min(observed) < 0,
np.sum((observed) != np.round(observed)) > 0,
len(np.unique(observed)) > min(30, len(observed)/3)]
if any(rules):
return True
else:
return False
def train_test_split(data, ncat, train_ratio=0.7, prep='std'):
assert train_ratio >= 0
assert train_ratio <= 1
shuffle = np.random.choice(range(data.shape[0]), data.shape[0], replace=False)
data_train = data[shuffle[:int(train_ratio*data.shape[0])], :]
data_test = data[shuffle[int(train_ratio*data.shape[0]):], :]
if prep=='norm':
data_train, maxv, minv, _, _, = get_stats(data_train, ncat)
data_test = normalize_data(data_test, maxv, minv)
elif prep=='std':
_, maxv, minv, mean, std = get_stats(data_train, ncat)
data_train = standardize_data(data_train, mean, std)
data_test = standardize_data(data_test, mean, std)
X_train, y_train = data_train[:, :-1], data_train[:, -1]
X_test, y_test = data_test[:, :-1], data_test[:, -1]
return X_train, X_test, y_train, y_test, data_train, data_test
# Preprocessing functions
def adult(data):
cat_cols = ['workclass', 'education', 'education-num', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'native-country', 'y']
cont_cols = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'capital-gain',
'capital-loss', 'hours-per-week']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def australia(data):
cat_cols = ['A1', 'A4', 'A5', 'A6', 'A7', 'A9', 'A10', 'A12', 'A13', 'class']
cont_cols = ['A2', 'A3', 'A8', 'A11', 'A14', 'A15']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
data = data.replace('?', np.nan)
ncat = learncats(data.values.astype(float), classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def bank(data):
cat_cols = ['job', 'marital', 'education', 'default', 'housing', 'loan',
'contact', 'month', 'day_of_week', 'poutcome', 'y']
cont_cols = ['age', 'duration', 'campaign', 'previous', 'emp.var.rate',
'cons.price.idx','cons.conf.idx', 'euribor3m', 'nr.employed']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
data.loc[:, 'pdays'] = np.where(data['pdays']==999, 0, 1)
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def credit(data):
cat_cols = ['SEX', 'EDUCATION', 'MARRIAGE', 'default payment next month']
cont_cols = ['LIMIT_BAL', 'AGE', 'PAY_0', 'PAY_2',
'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2',
'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',
'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def electricity(data):
cat_cols = ['day', 'class']
cont_cols = ['date', 'period', 'nswprice', 'nswdemand', 'vicprice',
'vicdemand', 'transfer']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def segment(data):
data = data.drop(columns=['region.centroid.col', 'region.pixel.count'])
cat_cols = ['short.line.density.5', 'short.line.density.2', 'class']
cont_cols = ['region.centroid.row', 'vedge.mean', 'vegde.sd', 'hedge.mean', 'hedge.sd',
'intensity.mean', 'rawred.mean', 'rawblue.mean', 'rawgreen.mean', 'exred.mean', 'exblue.mean' ,
'exgreen.mean', 'value.mean', 'saturation.mean', 'hue.mean']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def german(data):
cat_cols = [0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 18, 19, 20]
cont_cols = [1, 4, 7, 10, 12, 15, 17]
data.iloc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=cont_cols)
return data.values.astype(float), ncat
def vowel(data):
cat_cols = ['Speaker_Number', 'Sex', 'Class']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=data.shape[1]-1)
return data.values.astype(float), ncat
def cmc(data):
cat_cols = ['Wifes_education', 'Husbands_education', 'Wifes_religion', 'Wifes_now_working%3F',
'Husbands_occupation', 'Standard-of-living_index', 'Media_exposure', 'Contraceptive_method_used']
cont_cols = ['Wifes_age', 'Number_of_children_ever_born']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=data.shape[1]-1)
return data.values.astype(float), ncat
def get_data(name):
if 'wine' in name:
data_red = | pd.read_csv('../data/winequality_red.csv') | pandas.read_csv |
#############################################################################
# PF EFFICIENCY CHECKER #
# #
# This code reads into CSV Files, and checks the efficiencies of CMSSW ID #
# in differnt pT bins. #
# it should be run as follows : #
# #
# python PFPhoton-ID-Efficiency_PF <modelname> <barrel/endcap> #
# #
#############################################################################
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve,auc
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
import os
import warnings
warnings.filterwarnings('ignore')
import sys
import os
import math
###################################################################################################################################################################
os.system("")
modelname = sys.argv[1]
os.system(f"mkdir -p PFefficiency/" + modelname)
txt = open(f'PFefficiency/' + modelname + f'/Info_{modelname}.txt', "w+")
#Cut on the Neural Network plot :
#NN_cut = float(nn_cut)
##########################################################
# Settings: #
##########################################################
isNorm = True
#Do you want to debug?
isDebug = False #True -> nrows=1000
#Do you want barrel or endcap?
if sys.argv[2] == 'barrel':
isBarrel = True #True -> Barrel, False -> Endcap
elif sys.argv[2] == 'endcap':
isBarrel = False
else:
print('Please mention "barrel" or "endcap"')
train_var = ['phoHoverE', 'photrkSumPtHollow', 'phoecalRecHit','phosigmaIetaIeta','phoSigmaIEtaIEtaFull5x5','phoSigmaIEtaIPhiFull5x5', 'phoEcalPFClusterIso','phoHcalPFClusterIso', 'phohasPixelSeed','phoR9Full5x5','phohcalTower']
#variables used in the training
varnames = ['hadTowOverEm', 'trkSumPtHollowConeDR03', 'ecalRecHitSumEtConeDR03','sigmaIetaIeta','SigmaIEtaIEtaFull5x5','SigmaIEtaIPhiFull5x5', 'phoEcalPFClusterIso','phoHcalPFClusterIso', 'hasPixelSeed','R9Full5x5','hcalTowerSumEtConeDR03']
#In the same order as they are fed into the training
#removed : 'phoEcalPFClusterIso','phoHcalPFClusterIso',
##############################################################################
#READING DATA FILES :
#Columns: phoPt, phoEta, phoPhi, phoHoverE, phohadTowOverEmValid, photrkSumPtHollow, photrkSumPtSolid, phoecalRecHit, phohcalTower, phosigmaIetaIeta, phoSigmaIEtaIEtaFull5x5, phoSigmaIEtaIPhiFull5x5, phoEcalPFClusterIso, phoHcalPFClusterIso, phohasPixelSeed, phoR9Full5x5, isPhotonMatching, isPionMother, isPromptFinalState, isHardProcess, isPFPhoton
print('Reading the input files')
mycols = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22]
if isDebug == True : #take only the first 1000 photons
#file1 = pd.read_csv('../TrainingSamples/df_GJet_20to40_20.csv.gzip',compression='gzip', usecols=mycols, nrows=1000)
file2 = pd.read_csv('../TrainingSamples/df_GJet.csv.gzip',compression='gzip', usecols=mycols, nrows=1000)
#file3 = pd.read_csv('../TrainingSamples/df_GJet_40toInf_20.csv.gzip',compression='gzip', usecols=mycols, nrows=1000)
file4 = pd.read_csv('../TrainingSamples/df_QCD.csv.gzip',compression='gzip', usecols=mycols, nrows=1000)
file5 = pd.read_csv('../TrainingSamples/df_TauGun.csv.gzip',compression='gzip', usecols=mycols, nrows=1000)
else : #take all the photons
#file1 = pd.read_csv('../TrainingSamples/df_GJet_20to40_20.csv.gzip',compression='gzip', usecols=mycols)
file2 = pd.read_csv('../TrainingSamples/df_GJet.csv.gzip',compression='gzip', usecols=mycols, nrows=1000000)
#file3 = pd.read_csv('../TrainingSamples/df_GJet_40toInf_20.csv.gzip',compression='gzip', usecols=mycols)
file4 = | pd.read_csv('../TrainingSamples/df_QCD.csv.gzip',compression='gzip', usecols=mycols, nrows=250000) | pandas.read_csv |
from datetime import timedelta
import pandas as pd
from estimate_start_times.config import ConcurrencyOracleType, Configuration, ReEstimationMethod, ResourceAvailabilityType, OutlierStatistic
from estimate_start_times.estimator import StartTimeEstimator
from estimate_start_times.utils import read_csv_log
def test_estimate_start_times_only_resource():
config = Configuration(
re_estimation_method=ReEstimationMethod.SET_INSTANT,
concurrency_oracle_type=ConcurrencyOracleType.DEACTIVATED,
resource_availability_type=ResourceAvailabilityType.SIMPLE
)
event_log = read_csv_log('./tests/assets/test_event_log_1.csv', config)
# Estimate start times
start_time_estimator = StartTimeEstimator(event_log, config)
extended_event_log = start_time_estimator.estimate()
# Traces
first_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-01']
second_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-02']
third_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-03']
fourth_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-04']
# The start time of initial events is their end time (instant events)
assert first_trace.iloc[0][config.log_ids.estimated_start_time] == first_trace.iloc[0][config.log_ids.end_time]
assert fourth_trace.iloc[0][config.log_ids.estimated_start_time] == fourth_trace.iloc[0][config.log_ids.end_time]
# The start time of all other events is the availability of the resource (concurrency deactivated)
assert second_trace.iloc[3][config.log_ids.estimated_start_time] == first_trace.iloc[2][config.log_ids.end_time]
assert third_trace.iloc[3][config.log_ids.estimated_start_time] == third_trace.iloc[1][config.log_ids.end_time]
assert fourth_trace.iloc[3][config.log_ids.estimated_start_time] == third_trace.iloc[2][config.log_ids.end_time]
assert fourth_trace.iloc[4][config.log_ids.estimated_start_time] == second_trace.iloc[4][config.log_ids.end_time]
assert first_trace.iloc[2][config.log_ids.estimated_start_time] == fourth_trace.iloc[3][config.log_ids.end_time]
def test_estimate_start_times_instant():
config = Configuration(
re_estimation_method=ReEstimationMethod.SET_INSTANT,
concurrency_oracle_type=ConcurrencyOracleType.NONE,
resource_availability_type=ResourceAvailabilityType.SIMPLE,
reuse_current_start_times=True
)
event_log = read_csv_log('./tests/assets/test_event_log_1.csv', config)
# Set one start timestamp manually
event_log[config.log_ids.start_time] = pd.NaT
manually_added_timestamp = pd.to_datetime('2002-11-07 12:33:00+02:00', format='%Y-%m-%d %H:%M:%S%z', utc=True)
event_log.loc[
(event_log[config.log_ids.case] == 'trace-01') & (event_log[config.log_ids.activity] == 'C'),
config.log_ids.start_time
] = manually_added_timestamp
# Estimate start times
start_time_estimator = StartTimeEstimator(event_log, config)
extended_event_log = start_time_estimator.estimate()
# The start time of initial events is the end time (instant events)
first_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-01']
assert first_trace.iloc[0][config.log_ids.estimated_start_time] == first_trace.iloc[0][config.log_ids.end_time]
fourth_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-04']
assert fourth_trace.iloc[0][config.log_ids.estimated_start_time] == fourth_trace.iloc[0][config.log_ids.end_time]
# The start time of an event with its resource free but immediately
# following its previous one is the end time of the previous one.
second_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-02']
assert second_trace.iloc[3][config.log_ids.estimated_start_time] == second_trace.iloc[2][config.log_ids.end_time]
third_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-03']
assert third_trace.iloc[3][config.log_ids.estimated_start_time] == third_trace.iloc[2][config.log_ids.end_time]
# The start time of an event enabled for a long time but with its resource
# busy in other activities is the end time of its resource's last activity.
assert fourth_trace.iloc[3][config.log_ids.estimated_start_time] == third_trace.iloc[2][config.log_ids.end_time]
assert fourth_trace.iloc[4][config.log_ids.estimated_start_time] == second_trace.iloc[4][config.log_ids.end_time]
# The event with predefined start time was not predicted
assert first_trace.iloc[2][config.log_ids.estimated_start_time] == manually_added_timestamp
def test_bot_resources_and_instant_activities():
config = Configuration(
re_estimation_method=ReEstimationMethod.SET_INSTANT,
concurrency_oracle_type=ConcurrencyOracleType.NONE,
resource_availability_type=ResourceAvailabilityType.SIMPLE,
bot_resources={'Marcus'},
instant_activities={'H', 'I'}
)
event_log = read_csv_log('./tests/assets/test_event_log_1.csv', config)
# Estimate start times
start_time_estimator = StartTimeEstimator(event_log, config)
extended_event_log = start_time_estimator.estimate()
# The events performed by bot resources, or being instant activities are instant
second_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-02']
fourth_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-04']
assert second_trace.iloc[2][config.log_ids.estimated_start_time] == second_trace.iloc[2][config.log_ids.end_time]
assert fourth_trace.iloc[6][config.log_ids.estimated_start_time] == fourth_trace.iloc[6][config.log_ids.end_time]
assert fourth_trace.iloc[7][config.log_ids.estimated_start_time] == fourth_trace.iloc[7][config.log_ids.end_time]
# The start time of initial events (with no bot resources nor instant activities) is the end time (instant events)
assert second_trace.iloc[0][config.log_ids.estimated_start_time] == second_trace.iloc[0][config.log_ids.end_time]
assert fourth_trace.iloc[0][config.log_ids.estimated_start_time] == fourth_trace.iloc[0][config.log_ids.end_time]
# The start time of an event (no bot resource nor instant activity) with its resource
# free but immediately following its previous one is the end time of the previous one.
second_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-02']
assert second_trace.iloc[3][config.log_ids.estimated_start_time] == second_trace.iloc[2][config.log_ids.end_time]
third_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-03']
assert third_trace.iloc[3][config.log_ids.estimated_start_time] == third_trace.iloc[2][config.log_ids.end_time]
# The start time of an event (no bot resource nor instant activity) enabled for a long time
# but with its resource busy in other activities is the end time of its resource's last activity.
assert fourth_trace.iloc[3][config.log_ids.estimated_start_time] == third_trace.iloc[2][config.log_ids.end_time]
assert fourth_trace.iloc[4][config.log_ids.estimated_start_time] == second_trace.iloc[4][config.log_ids.end_time]
def test_repair_activities_with_duration_over_threshold():
config = Configuration(
re_estimation_method=ReEstimationMethod.MEDIAN,
concurrency_oracle_type=ConcurrencyOracleType.NONE,
resource_availability_type=ResourceAvailabilityType.SIMPLE,
outlier_statistic=OutlierStatistic.MEDIAN,
outlier_threshold=1.6
)
event_log = read_csv_log('./tests/assets/test_event_log_1.csv', config)
# Estimate start times
start_time_estimator = StartTimeEstimator(event_log, config)
extended_event_log = start_time_estimator.estimate()
# The start time of an event (with duration under the threshold) with its resource
# free but immediately following its previous one is the end time of the previous one.
second_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-02']
assert second_trace.iloc[3][config.log_ids.estimated_start_time] == second_trace.iloc[2][config.log_ids.end_time]
# The start time of an event (with duration under the threshold) enabled for a long time
# but with its resource busy in other activities is the end time of its resource's last activity.
third_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-03']
fourth_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-04']
assert fourth_trace.iloc[3][config.log_ids.estimated_start_time] == third_trace.iloc[2][config.log_ids.end_time]
assert fourth_trace.iloc[4][config.log_ids.estimated_start_time] == second_trace.iloc[4][config.log_ids.end_time]
# The events with estimated durations over the threshold where re-estimated
first_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-01']
assert first_trace.iloc[1][config.log_ids.estimated_start_time] == \
first_trace.iloc[1][config.log_ids.end_time] - timedelta(minutes=49.6)
assert third_trace.iloc[2][config.log_ids.estimated_start_time] == \
third_trace.iloc[2][config.log_ids.end_time] - timedelta(minutes=11.2)
assert first_trace.iloc[6][config.log_ids.estimated_start_time] == \
first_trace.iloc[6][config.log_ids.end_time] - timedelta(minutes=38.4)
def test_estimate_start_times_mode():
config = Configuration(
re_estimation_method=ReEstimationMethod.MODE,
concurrency_oracle_type=ConcurrencyOracleType.NONE,
resource_availability_type=ResourceAvailabilityType.SIMPLE
)
event_log = read_csv_log('./tests/assets/test_event_log_1.csv', config)
# Estimate start times
start_time_estimator = StartTimeEstimator(event_log, config)
extended_event_log = start_time_estimator.estimate()
# The start time of initial events is the most frequent duration
third_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-03']
first_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-01']
assert first_trace.iloc[0][config.log_ids.estimated_start_time] == first_trace.iloc[0][config.log_ids.end_time] - \
(third_trace.iloc[0][config.log_ids.end_time] - first_trace.iloc[0][config.log_ids.end_time])
second_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-02']
assert second_trace.iloc[0][config.log_ids.estimated_start_time] == second_trace.iloc[0][config.log_ids.end_time] - \
(third_trace.iloc[0][config.log_ids.end_time] - first_trace.iloc[0][config.log_ids.end_time])
def test_replace_recorded_start_times_with_estimation():
config = Configuration(
re_estimation_method=ReEstimationMethod.MODE,
concurrency_oracle_type=ConcurrencyOracleType.NONE,
resource_availability_type=ResourceAvailabilityType.SIMPLE,
replace_recorded_start_times=True
)
event_log = read_csv_log('./tests/assets/test_event_log_1.csv', config)
# Estimate start times
start_time_estimator = StartTimeEstimator(event_log, config)
extended_event_log = start_time_estimator.estimate()
# The start time of initial events is the most frequent duration
third_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-03']
first_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-01']
assert first_trace.iloc[0][config.log_ids.start_time] == first_trace.iloc[0][config.log_ids.end_time] - \
(third_trace.iloc[0][config.log_ids.end_time] - first_trace.iloc[0][config.log_ids.end_time])
second_trace = extended_event_log[extended_event_log[config.log_ids.case] == 'trace-02']
assert second_trace.iloc[0][config.log_ids.start_time] == second_trace.iloc[0][config.log_ids.end_time] - \
(third_trace.iloc[0][config.log_ids.end_time] - first_trace.iloc[0][config.log_ids.end_time])
assert config.log_ids.estimated_start_time not in extended_event_log.columns
def test_set_instant_non_estimated_start_times():
config = Configuration(
re_estimation_method=ReEstimationMethod.SET_INSTANT,
concurrency_oracle_type=ConcurrencyOracleType.NONE,
resource_availability_type=ResourceAvailabilityType.SIMPLE,
non_estimated_time=pd.to_datetime('2000-01-01T10:00:00.000+02:00', format='%Y-%m-%dT%H:%M:%S.%f%z')
)
event_log = read_csv_log('./tests/assets/test_event_log_2.csv', config)
event_log[config.log_ids.estimated_start_time] = | pd.to_datetime(event_log[config.log_ids.estimated_start_time], utc=True) | pandas.to_datetime |
'''
Created on Sep 2, 2016
@author: Gully
'''
from __future__ import print_function, division
import argparse
import argparse_config
import codecs
import os
import numpy as np
import pandas as pd
import warnings
from sets import Set
import re
from sets import Set
import re
from bokeh.plotting import figure, show, save, output_notebook, output_file
from bokeh.models import ColumnDataSource, Range1d
#
# This function checks to see if there is a boundary condition between clause 1 and clause 2
# Returns a tuple: (True / False, Explanation)
#
def checkForStartBoundary(clause1, clause2, expt_codes, tsv, c_s_lookup, s_c_lookup):
row1 = tsv.loc[clause1]
row2 = tsv.loc[clause2]
# both clauses are in the same sentence => false
if( row1['SentenceId'] == row2['SentenceId'] ):
return (False,"Same sentence")
# clause 1 is a title paragraph => true
elif( "header" in row1['Codes'] ):
return (True, "?/header")
#
# clause 1 is in a sentence where
# (A) there are hypotheses/problems/facts
# (B) there are results/implications with exLinks present
# clause 2 is in a sentence where
# (A) there are goals/methods
# (B) there are results/implications with no exLinks
#
sentence1 = c_s_lookup[clause1]
sentence2 = c_s_lookup[clause2]
go_condition_2 = False
for cs2 in s_c_lookup[sentence2]:
disc2 = tsv.loc[cs2]['Discourse Type']
inExHead2 = tsv.loc[cs2]['Codes']
if( (disc2 == 'result' or disc2 == 'implication')
and "exLink" not in inExHead2):
go_condition_2 = True
elif( disc2 == 'goal' or disc2 == 'method'):
go_condition_2 = True
if( go_condition_2 ) :
for cs1 in s_c_lookup[sentence1]:
disc1 = tsv.loc[cs1]['Discourse Type']
inExHead1 = tsv.loc[cs1]['Codes']
if(disc1 == 'hypothesis' or disc1 == 'problem' or disc1 == 'fact'):
#print(tsv.loc[cs2])
return (True, "A:"+disc1+inExHead1+"/"+disc2+inExHead2)
elif((disc1 == 'result' or disc1 != 'implication') and "exLink" in inExHead1):
#print(tsv.loc[cs2])
return (True, "B:"+disc1+inExHead1+"/"+disc2+inExHead2)
es1 = row1['ExperimentValues']
if( es1 == es1 and len(set(expt_codes).intersection(es1.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es1 + "(1)")
es2 = row2['ExperimentValues']
if( es2 == es2 and len(set(expt_codes).intersection(es2.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es2 + "(2)")
return (False,"end")
#
# This function checks to see if there is a boundary condition between clause 1 and clause 2
# Returns a tuple: (True / False, Explanation)
#
def checkForEndBoundary(clause1, clause2, expt_codes, tsv, c_s_lookup, s_c_lookup):
row1 = tsv.loc[clause1]
row2 = tsv.loc[clause2]
# both clauses are in the same sentence => false
if( row1['SentenceId'] == row2['SentenceId'] ):
return (False,"Same sentence")
# clause 2 is a title paragraph => true
elif( "header" in row2['Codes'] ):
return (True, "?/header")
#
# clause 1 is in a sentence where there are results/implications with no exLinks and
# clause 2 is in a sentence where
# (A) there are goals/methods/hypotheses/problems/facts
# (B) there are results/implications with exLinks present
#
sentence1 = c_s_lookup[clause1]
sentence2 = c_s_lookup[clause2]
go_condition_1 = False
for cs1 in s_c_lookup[sentence1]:
disc1 = tsv.loc[cs1]['Discourse Type']
inExHead1 = tsv.loc[cs1]['Codes']
if( (disc1 == 'result' or disc1 == 'implication')
and "exLink" not in inExHead1):
go_condition_1 = True
if( go_condition_1 ) :
for cs2 in s_c_lookup[sentence2]:
disc2 = tsv.loc[cs2]['Discourse Type']
inExHead2 = tsv.loc[cs2]['Codes']
if(disc1 != 'result' and disc1 != 'implication'):
#print(tsv.loc[cs2])
return (True, "C"+disc1+inExHead1+"/"+disc2+inExHead2)
elif((disc1 == 'result' or disc1 != 'implication') and "exLink" in inExHead2):
#print(tsv.loc[cs2])
return (True, "D"+disc1+inExHead1+"/"+disc2+inExHead2)
es1 = row1['ExperimentValues']
if( es1 == es1 and len(set(expt_codes).intersection(es1.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es1 + "(1)")
es2 = row2['ExperimentValues']
if( es2 == es2 and len(set(expt_codes).intersection(es2.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es2 + "(2)")
return (False,"end")
def add_spans(tsv):
c_s_lookup = {}
c_p_lookup = {}
s_c_lookup = {}
p_c_lookup = {}
fig_ref_set = Set()
expt_code_set = Set()
clause_max = -1
clause_min = 1000
for i,row in tsv.iterrows():
es = row['ExperimentValues']
dt = row['Discourse Type']
inExHead = row['Codes']
sid = row['SentenceId']
paragraph = row['Paragraph']
heading = str(row['Headings'])
floatingBox = row['FloatingBox?']
#print("i: " + str(i))
#print("refs: " + str(es))
#print("~~~~~~~~~~~~~~~~~~")
s = int(sid[1:])
if(paragraph!=paragraph):
continue
p = 0
if( paragraph == '-'):
p = 0
elif( paragraph[0:1] == 'p'):
p = int(paragraph[1:])
elif( paragraph[0:5] == 'title'):
p = int(paragraph[5:])
c_s_lookup[i] = s
c_p_lookup[i] = p
if( s_c_lookup.get(s) is None ):
s_c_lookup[s] = [i]
else:
s_c_lookup.get(s).append(i)
if( p_c_lookup.get(p) is None ):
p_c_lookup[p] = [i]
else:
p_c_lookup.get(p).append(i)
if( heading != heading ):
heading = ""
if( re.match('^Result', heading) is None or floatingBox):
continue
if( i > clause_max):
clause_max = i
if( i < clause_min):
clause_min = i
if(es!=es):
continue
try:
codes = str(es).split('|')
except AttributeError:
print(str(es) + " is not a string. Skipping...")
continue
fig_ref_set.add(i)
for c in codes:
expt_code_set.add(c)
fig_refs = sorted(fig_ref_set)
fig_spans = {}
for i_fig in fig_refs:
row = tsv.loc[i_fig]
es = row['ExperimentValues']
dt = row['Discourse Type']
inExHead = row['Codes']
sid = row['SentenceId']
paragraph = row['Paragraph']
heading = str(row['Headings'])
floatingBox = row['FloatingBox?']
try:
expt_codes = str(es).split('|')
except AttributeError:
print(str(es) + " is not a string. Skipping...")
continue
# search backwards for a boundary condition between sentences
c1 = i_fig - 1
c2 = i_fig
while( checkForStartBoundary(c1, c2, expt_codes, tsv, c_s_lookup, s_c_lookup)[0] is False ):
c1 = c1-1
c2 = c2-1
expt_start = c2
# search forwards for a boundary condition between sentences
c1 = i_fig
c2 = i_fig + 1
while( checkForEndBoundary(c1, c2, expt_codes, tsv, c_s_lookup, s_c_lookup)[0] is False ):
c1 = c1+1
c2 = c2+1
expt_end = c1
for c in range(expt_start, expt_end+1):
if( fig_spans.get(c) is None ):
fig_spans[c] = set(expt_codes)
else:
fig_spans.get(c).update(set(expt_codes))
#print("Figure Location: " + str(i_fig) )
#print("Experiment Label: " + es )
#print("Expt Start: " + str(expt_start) )
#print("Expt Start Expl: " + str(checkForStartBoundary(expt_start-1, expt_start, expt_codes, tsv, c_s_lookup, s_c_lookup)) )
#print("Expt End: " + str(expt_end) )
#print("Expt End Expl: " + str(checkForEndBoundary(expt_end, expt_end+1, expt_codes, tsv, c_s_lookup, s_c_lookup)) )
#print( "~~~~~~~~~~~~~~~~~~~~" )
for i in fig_spans:
fig_spans[i] = "|".join(fig_spans.get(i))
#print(fig_spans[i])
tsv['fig_spans'] = pd.Series(fig_spans, index=fig_spans)
return tsv
def prepare_and_draw_gannt(filename, title, tsv):
gantt_rows = []
gantt_rows2 = []
gantt_rows3 = []
dtypes = ["fact","hypothesis","problem","goal" ,"method","result","implication"]
colors = ["Snow" ,"Snow" ,"Snow" ,"LightGray","Gray" ,"LightBlue" ,"LightGreen"]
colors_s = pd.Series(colors, index=dtypes)
all_codes = Set()
clause_max = -1
clause_min = 1000
for i,row in tsv.iterrows():
fig_refs = row['ExperimentValues']
fig_spans = row['fig_spans']
dt = row['Discourse Type']
inExHead = row['Codes']
sid = row['SentenceId']
paragraph = row['Paragraph']
heading = str(row['Headings'])
floatingBox = row['FloatingBox?']
#print("i: " + str(i))
#print("refs: " + str(fig_refs))
#print("~~~~~~~~~~~~~~~~~~")
if( heading != heading ):
heading = ""
#if(not floatingBox):
# clause_max = i
if( re.match('^Result', heading) is None or floatingBox):
continue
if( i > clause_max):
clause_max = i
if( i < clause_min):
clause_min = i
if(fig_spans!=fig_spans):
continue
if(fig_refs!=fig_refs):
fig_refs = ""
fig_span_list = fig_spans.split('|')
fig_ref_list = fig_refs.split('|')
#print("i: " + str(i))
#print("spans: " + fig_spans)
#print("refs: " + fig_refs)
#print("~~~~~~~~~~~~~~~~~~")
for fs in fig_span_list:
all_codes.add(fs)
if( fs in fig_ref_list ):
gantt_rows2.append([fs, i])
if('exLink' in inExHead):
gantt_rows3.append([fs, i])
gantt_rows.append([fs, i, dt, heading])
codes_s = pd.Series(range(len(all_codes)), index=sorted(list(all_codes)))
gantt_df = | pd.DataFrame.from_records(gantt_rows, columns=['fig_span', 'clause_id','discourse_type', 'heading']) | pandas.DataFrame.from_records |
############################################################################################
# FileName [ oncokb_annotation.py ]
# PackageName [ lib/analysis ]
# Synopsis [ Actionable mutation(drug) annotation ]
# Author [ <NAME> ]
# Copyright [ 2021 9 ]
############################################################################################
from ..maf_filter import fast_read_maf
from termcolor import colored
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
COLOR_MAP = ['#266199','#b7d5ea','#acc6aa','#E0CADB','#695D73','#B88655','#DDDDDD','#71a0a5','#841D22','#E08B69']
#################################################################################################################
# #
# python3 mafAnalysis.py \ #
# -f examples/test_data/maf/TCGA_test.maf \ #
# -oncokb ../oncokb-annotator/ [your_oncokb_token] 4 examples/test_data/oncokb/clinical_input.txt \ #
# -o examples/output \ #
# -p examples/pic/ #
# #
#################################################################################################################
class OncoKBAnnotator:
'''Actionable mutation(drug) annotation
Arguments:
maf_file {string} -- The input MAF file for all data.
output_folder {string} -- The path for output files.
path {string} -- The path for oncokb-annotator folder.
token {string} -- The personal token provided from OncoKB.
clinical {string} -- The path for clinical data.
pic {string} -- The path for storing plots.
cna {string} -- The path for cna data
level {string} -- The level the user chooses (default = 4)
Parameters:
self.head {string} -- The column names of MAF file.
self.df {pd.DataFrame} -- The data for the MAF file.
self.file {string} -- The input file for plotting which is also the output file from analysis.
Outputs
maf_oncokb_output.txt
clinical_oncokb_output.txt
Pictures:
oncokb_total_pie.pdf
oncokb_freq_actionable_genes.pdf
'''
def __init__(self, maf_file):
print(colored(("\nStart OncoKB annotator(drug)...."), 'yellow'))
self.head, self.df = fast_read_maf(maf_file)
def data_analysis(self, output_folder, path, token, clinical, cna = ''):
selected_df = (self.df[['NCBI_Build','Hugo_Symbol', 'Variant_Classification', 'Tumor_Sample_Barcode', 'HGVSp_Short', 'HGVSp', 'Chromosome', 'Start_Position', 'End_Position', 'Reference_Allele', 'Tumor_Seq_Allele1', 'Tumor_Seq_Allele2']]).set_index("Hugo_Symbol")
selected_df.to_csv(output_folder + "maf_oncokb_input.txt", sep="\t")
if not os.path.isdir('oncokb-annotator'):
os.system("git clone https://github.com/oncokb/oncokb-annotator.git\n")
os.system('cp lib/auxiliary/autoChange.py oncokb-annotator\n')
os.chdir("oncokb-annotator")
os.system('python3 autoChange.py\n')
os.system('pip3 install requests\n')
p = os.popen("python3 MafAnnotator.py -i ../"+output_folder + "maf_oncokb_input.txt -o ../" + output_folder + "maf_oncokb_output.txt -c ../"+clinical+" -b " + token + "\n")
print(p.read())
p.close()
p = os.popen("python3 ClinicalDataAnnotator.py -i ../"+clinical+" -o ../"+ output_folder +"clinical_oncokb_output.txt -a ../"+output_folder+"maf_oncokb_output.txt\n")
print(p.read())
p.close()
if cna !='':
p = os.popen("python3 CnaAnnotator.py -i ../"+cna+" -o ../"+output_folder+"cna_oncokb_output.txt -c ../"+clinical+" -b "+ token + "\n")
print(p.read())
p.close()
os.chdir("..")
# os.system("rm -rf oncokb-annotator\n")
print(colored("=> Generate analysis files: ", 'green'))
print(colored((" " + output_folder + "maf_oncokb_output.txt"), 'green'))
print(colored((" " + output_folder + "clinical_oncokb_output.txt"), 'green'))
def plotting(self, output_folder, pic, level='4'):
LABEL_SIZE, TITLE_SIZE = 24,30
self.file = output_folder + "clinical_oncokb_output.txt"
df = pd.read_csv(self.file, sep="\t")
df_level = df[['HIGHEST_LEVEL']]
level_list = ['LEVEL_1', 'LEVEL_2', 'LEVEL_3A', 'LEVEL_3B', 'LEVEL_4']
level_dict = dict.fromkeys(level_list,0)
sample_size = df.shape[0]
for i in range(sample_size):
if df_level.iloc[i]['HIGHEST_LEVEL'] in level_list:
level_dict[df_level.iloc[i]['HIGHEST_LEVEL']] += 1
true_num = 0
if level == '4':
true_num = sum(level_dict.values())
elif level == '3':
true_num = sum(level_dict.values()) - level_dict['LEVEL_4']
level_list = ['LEVEL_1', 'LEVEL_2', 'LEVEL_3A', 'LEVEL_3B']
elif level == '2':
true_num = level_dict['LEVEL_1'] + level_dict['LEVEL_2']
level_list = ['LEVEL_1', 'LEVEL_2']
elif level == '1':
true_num = level_dict['LEVEL_1']
level_list = ['LEVEL_1']
# Pie Plot( Total pie plot )
size = [true_num, sample_size - true_num]
labels = "Actionable\nbiomarkers"," Current absence\n of new actionable\n biomarkers"
fig1, ax1 = plt.subplots()
_, _, autotexts = ax1.pie(size, labels=labels, autopct='%1.1f%%', startangle=90, colors=[COLOR_MAP[3],COLOR_MAP[4]] ,textprops={'fontsize': LABEL_SIZE})
autotexts[1].set_color('white')
ax1.axis('equal')
plt.savefig(pic+"oncokb_total_pie.pdf", dpi=300, bbox_inches='tight')
print(colored(("=> Generate Pie Plot: " + pic + "oncokb_total_pie.pdf"), 'green'))
# Bar Plot( Frequency of Actionable Genes )
df_drug_count = df[level_list]
drug_total_dict = {}
for i in range(sample_size):
drug_list = [[], [], [], [], [], []] # [total, level1, level2, level3A, level3B, level4]
for idx, item in enumerate(level_list):
data = df_drug_count.iloc[i][item]
if not | pd.isna(data) | pandas.isna |
# add a comment for testing Github
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
def clean_data(df):
'''
INPUT
df - pandas dataframe
OUTPUT
X - A matrix holding all of the variables you want to consider when predicting the response
y - the corresponding response vector
This function cleans df using the following steps to produce X and y:
1. Drop all the rows with no salaries
2. Create X as all the columns that are not the Salary column
3. Create y as the Salary column
4. Drop the Salary, Respondent, and the ExpectedSalary columns
5. For each numeric variable, fill the column with the mean value.
6. Create dummy columns for all the categorical variables, drop the original columns
'''
# Drop rows with missing salary values
df = df.dropna(subset=['Salary'], axis=0)
y = df['Salary']
#Drop respondent and expected salary columns
df = df.drop(['Respondent', 'ExpectedSalary', 'Salary'], axis=1)
# Fill numeric columns with the mean
num_vars = df.select_dtypes(include=['float', 'int']).columns
for col in num_vars:
df[col].fillna((df[col].mean()), inplace=True)
# Dummy the categorical variables
cat_vars = df.select_dtypes(include=['object']).copy().columns
for var in cat_vars:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(var, axis=1), pd.get_dummies(df[var], prefix=var, prefix_sep='_', drop_first=True)], axis=1)
X = df
return X, y
def find_optimal_lm_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test
def main():
df = | pd.read_csv('../Part1/stackoverflow/survey_results_public.csv') | pandas.read_csv |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
from hdrh.histogram import HdrHistogram
import seaborn as sns
import pandas
from matplotlib import pyplot as plt
import os.path
from enum import Enum
import matplotlib as mpl
from typing import *
import argparse
parser = argparse.ArgumentParser(description='Generate latency curve.')
parser.add_argument('runid', type=str)
parser.add_argument('--invocations', type=int, default=40)
args = parser.parse_args()
INVOCATIONS = args.invocations
RUNID = args.runid
assert os.path.isdir(
f'/root/bench/results/{RUNID}'), f'Incorrect runid: {RUNID}'
HFAC = 1319
HEAP = {
'lusearch': 70,
'cassandra': 347,
'h2': 1572,
'tomcat': 94,
}
DACAPO = 'dacapochopin-b00bfa9'
DATA = {
'G1': RUNID + '/{bench}.{hfac}.{heap}.jdk-lxr.g1.common.hs.latency.{dacapo}',
'Shen.': RUNID + '/{bench}.{hfac}.{heap}.jdk-lxr.shenandoah.common.hs.latency.{dacapo}',
'LXR': RUNID + '/{bench}.{hfac}.{heap}.jdk-lxr.ix.common.tph.trace2-5.srv-128.srvw.lfb-32.latency.{dacapo}',
'ZGC': RUNID + '/{bench}.{hfac}.{heap}.jdk-lxr.z.common.hs.latency.{dacapo}',
}
MAX_INVOCATIONS = max(INVOCATIONS, 40)
MIN_LATENCY_USEC = 1
MAX_LATENCY_USEC = 1000 * 1000 # 1 sec
LATENCY_SIGNIFICANT_DIGITS = 5
LABEL_FONT_SIZE = 60
LEGEND_FONT_SIZE = 60
TICK_FONT_SIZE = 50
SAVE_FILE = 'jpg'
# SAVE_FILE = 'pdf'
def load_data(invocation: int, folder: str):
path = os.path.realpath(os.path.expanduser(
'{}.{}/dacapo-latency-usec-metered.csv'.format(folder, invocation)))
if not os.path.isfile(path):
return None
df = pandas.read_csv(path, names=["start", "end"])
try:
df["latency"] = df["end"] - df["start"]
except Exception as e:
print(path)
raise e
return df
def load_data_and_plot(bench, data: Optional[Dict[str, Union[str, List[str]]]] = None, invocations=MAX_INVOCATIONS, save=SAVE_FILE, legend: Union[str, bool] = True, max_percentile='99.999'):
assert bench in HEAP
print(f'[{bench}] Loading...')
histograms = {}
# Clean up inputs
if data is None:
data = {k: v for k, v in DATA.items()}
for gc in data.keys():
if isinstance(data[gc], str):
data[gc] = [data[gc]]
data[gc] = [
f'/root/bench/results/{x}'.format(
runid=RUNID, bench=bench, hfac=HFAC, heap=HEAP[bench], dacapo=DACAPO)
for x in data[gc]
]
data: Dict[str, List[str]]
# Load data
for gc, logs in data.items():
histograms[gc] = []
for folder in logs:
for i in range(invocations):
loaded_data = load_data(i, folder)
if loaded_data is None:
continue
histogram = HdrHistogram(
MIN_LATENCY_USEC, MAX_LATENCY_USEC, LATENCY_SIGNIFICANT_DIGITS)
latencies = loaded_data["latency"]
for l in latencies:
histogram.record_value(l)
histograms[gc].append(histogram)
if len(histograms[gc]) == 0:
histogram = HdrHistogram(
MIN_LATENCY_USEC, MAX_LATENCY_USEC, LATENCY_SIGNIFICANT_DIGITS)
histogram.record_value(0)
histograms[gc].append(histogram)
# Process data
print(f'[{bench}] Processing...')
percentile_list = []
for gc, hists in histograms.items():
for j, histogram in enumerate(hists):
for i in histogram.get_percentile_iterator(5):
percentile_list.append({"GC": gc, "inv": j, "value": i.value_iterated_to,
"percentile": i.percentile_level_iterated_to / 100})
percentile_df = | pandas.DataFrame(percentile_list) | pandas.DataFrame |
"""
This script evaluates the performance of the
AKKEffProofOfRetrievability Proof of
retrievability implemented in
koppercoin.crypto.AKKEffProofOfRetrievability.py
"""
from koppercoin.crypto.AKKEffProofOfRetrievability import *
from koppercoin.crypto.AKKProofOfRetrievability import GQProofOfRetrievability as SlowGQProofOfRetrievability
import time
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import os
import jsonpickle
from pympler import asizeof
import gc
# which timings should we compute?
# set to 1 if we should compute it, otherwise 0
(gq, schnorr, okamoto, shoup, sw) = (1,0,0,1,1)
# The messagesizes which will be tested
messagesizes = range(250, 4001, 250)
# messagesizes = range(300, 501, 50)
# The number of runs per messagesize
runs = 10
if gq:
gqtimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
if schnorr:
schnorrtimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
if okamoto:
okamototimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
if shoup:
shouptimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
if sw:
swtimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
for i in range(len(messagesizes)):
print("Running PoR on messagesize " + str(messagesizes[i]) + " from " + str(list(messagesizes)))
if gq:
gqtimings['keygen'].append([])
gqtimings['encode'].append([])
gqtimings['genchallenge'].append([])
gqtimings['genproof'].append([])
gqtimings['verify'].append([])
if schnorr:
schnorrtimings['keygen'].append([])
schnorrtimings['encode'].append([])
schnorrtimings['genchallenge'].append([])
schnorrtimings['genproof'].append([])
schnorrtimings['verify'].append([])
if okamoto:
okamototimings['keygen'].append([])
okamototimings['encode'].append([])
okamototimings['genchallenge'].append([])
okamototimings['genproof'].append([])
okamototimings['verify'].append([])
if shoup:
shouptimings['keygen'].append([])
shouptimings['encode'].append([])
shouptimings['genchallenge'].append([])
shouptimings['genproof'].append([])
shouptimings['verify'].append([])
if sw:
swtimings['keygen'].append([])
swtimings['encode'].append([])
swtimings['genchallenge'].append([])
swtimings['genproof'].append([])
swtimings['verify'].append([])
for run in range(runs):
data = os.urandom(messagesizes[i]*1024)
# GQProofOfRetrievability
if gq:
print("Computing GQProofOfRetrievability")
time_pre = time.time()
(pk, sk) = GQProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
gqtimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = GQProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
gqtimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
gqtimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = GQProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
gqtimings['genproof'][i].append(duration)
time_pre = time.time()
a = GQProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
gqtimings['verify'][i].append(duration)
# SchnorrProofOfRetrievability
if schnorr:
print("Computing SchnorrProofOfRetrievability")
time_pre = time.time()
(pk, sk) = SchnorrProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = SchnorrProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = SchnorrProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['genproof'][i].append(duration)
time_pre = time.time()
a = SchnorrProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['verify'][i].append(duration)
# OkamotoProofOfRetrievability
if okamoto:
print("Computing OkamotoProofOfRetrievability")
time_pre = time.time()
(pk, sk) = OkamotoProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
okamototimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = OkamotoProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
okamototimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
okamototimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = OkamotoProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
okamototimings['genproof'][i].append(duration)
time_pre = time.time()
a = OkamotoProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
okamototimings['verify'][i].append(duration)
# ShoupProofOfRetrievability
if shoup:
print("Computing ShoupProofOfRetrievability")
time_pre = time.time()
(pk, sk) = ShoupProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
shouptimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = ShoupProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
shouptimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
shouptimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = ShoupProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
shouptimings['genproof'][i].append(duration)
time_pre = time.time()
a = ShoupProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
shouptimings['verify'][i].append(duration)
# SWProofOfRetrievability
if sw:
print("Computing SWProofOfRetrievability")
time_pre = time.time()
(pk, sk) = SWProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
swtimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = SWProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
swtimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
swtimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = SWProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
swtimings['genproof'][i].append(duration)
time_pre = time.time()
a = SWProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
swtimings['verify'][i].append(duration)
# Save the data in complicated JSON-format
if gq:
with open('timings_GQPoR.json', 'w') as f:
json_obj = jsonpickle.encode(gqtimings)
f.write(json_obj)
if schnorr:
with open('timings_SchnorrPoR.json', 'w') as f:
json_obj = jsonpickle.encode(schnorrtimings)
f.write(json_obj)
if okamoto:
with open('timings_OkamotoPoR.json', 'w') as f:
json_obj = jsonpickle.encode(okamototimings)
f.write(json_obj)
if shoup:
with open('timings_ShoupPoR.json', 'w') as f:
json_obj = jsonpickle.encode(shouptimings)
f.write(json_obj)
if sw:
with open('timings_SWPoR.json', 'w') as f:
json_obj = jsonpickle.encode(swtimings)
f.write(json_obj)
print("Running postprocessing steps")
# Transform to handy Dataframes
for algo in ['keygen', 'encode', 'genchallenge', 'genproof', 'verify']:
if gq:
gqtimings[algo] = pd.DataFrame(gqtimings[algo]).T
gqtimings[algo].columns = messagesizes
if schnorr:
schnorrtimings[algo] = pd.DataFrame(schnorrtimings[algo]).T
schnorrtimings[algo].columns = messagesizes
if okamoto:
okamototimings[algo] = | pd.DataFrame(okamototimings[algo]) | pandas.DataFrame |
# IMPORTS
import sys
sys.path.append("..")
from preprocessing.temporal_aggregation import TemporalAggregator
import numpy as np
import pandas as pd
from tools.processing import groupwise_normalise, groupwise_expansion
from misc.utils import matchfinder, fano_inequality
from tqdm import tqdm
from structures.trajectory import TrajectoriesFrame
tqdm.pandas()
import concurrent.futures as cf
from math import ceil
from random import sample
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
def num_of_distinct_locations(trajectories_frame):
"""
Returns a number of distinct location in the trajectory. First looks for 'labels' column.
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with the number of unique locations for each user
"""
if isinstance(trajectories_frame, pd.DataFrame):
return trajectories_frame.groupby(level=0).progress_apply(lambda x: len(pd.unique(x['labels'])))
else:
return trajectories_frame.groupby(level=0).progress_apply(lambda x: pd.unique(x).shape[0])
def visitation_frequency(trajectories_frame):
"""
Calculates visitiation frequency for each user in the TrajectoriesFrame
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with the visitation frequency for each user
"""
lat_col = trajectories_frame._geom_cols[0]
lon_col = trajectories_frame._geom_cols[1]
frequencies = trajectories_frame.groupby(level=0).progress_apply(
lambda x: x.groupby([lat_col, lon_col]).count()).iloc[:, 0]
frequencies = frequencies.groupby(level=0).progress_apply(lambda x: x.sort_values(ascending=False))
frequencies = groupwise_normalise(frequencies)
return frequencies
def _filter_distinct_locations(trajectories_frame):
to_concat = []
for ind, vals in trajectories_frame.groupby(level=0):
if len(vals) == 1:
to_concat.append(uniq)
continue
else:
uniq = vals.loc[vals['geometry'].drop_duplicates().index]
to_concat.append(uniq)
return pd.concat(to_concat)
def distinct_locations_over_time(trajectories_frame, time_unit='30min', reaggregate=False):
"""
Calculates the number of distinct location visited in the movement trajectory over time.
:param trajectories_frame: TrajectoriesFrame class object
:param time_unit: determines time unit
:param reaggregate: if true, data are first reagregated to given time unit
:return: a Series with the number of unique locations visited up to each time step in the movement trajectory
"""
if reaggregate:
temp_agg = TemporalAggregator(time_unit)
trajectories_frame = temp_agg.aggregate(trajectories_frame)
trajectories_frame = _filter_distinct_locations(trajectories_frame)
distinct_locations = trajectories_frame.dropna().groupby(level=0).resample(time_unit, level=1).count()
distinct_locations = distinct_locations.groupby(level=0).cumsum().iloc[:, 0]
return distinct_locations
def jump_lengths(trajectories_frame):
"""
Calculates jump lengths between each step in the trajectory
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with jump lengths between consecutive records
"""
jumps = trajectories_frame.groupby(level=0).progress_apply(lambda x: x.distance(x.shift()))
return jumps
def nonzero_trips(trajectories_frame):
"""
Counts all trips that had distance larger than 0.
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with a count of nonzero trips for each user
"""
jumps = jump_lengths(trajectories_frame).dropna().droplevel([1, 2])
return jumps[jumps != 0].groupby(by="ID").count()
def self_transitions(trajectories_frame):
"""
Calculates the number of self transitions for each user
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with the number of self transitions for each user
"""
if isinstance(trajectories_frame, pd.Series):
self_transitions_mask = (trajectories_frame == trajectories_frame.shift())
else:
if not hasattr(trajectories_frame, '_geom_cols'):
trajectories_frame = TrajectoriesFrame(trajectories_frame)
coordinates_frame = trajectories_frame[[trajectories_frame._geom_cols[0], trajectories_frame._geom_cols[1]]]
self_transitions_mask = (coordinates_frame == coordinates_frame.shift()).all(axis=1)
empty_mask = (~self_transitions_mask).groupby(level=0).progress_apply(lambda x: x.all())
empty_mask = empty_mask[empty_mask == True].index
self_transitions_only = trajectories_frame[self_transitions_mask]
empty_self_transitions = pd.DataFrame([0 for x in range(len(empty_mask))], index=empty_mask)
if isinstance(trajectories_frame, pd.Series):
self_transitions_only = self_transitions_only.groupby(level=0).count()
else:
self_transitions_only = self_transitions_only.groupby(level=0).count()[self_transitions_only.columns[0]]
if len(empty_self_transitions) > 0:
self_transitions_only.append(empty_self_transitions.iloc[:, 0]).sort_index()
return self_transitions_only
def waiting_times(trajectories_frame, time_unit='h'):
"""
Calculates waiting times for each transition in TrajectoriesFrame
:param trajectories_frame: TrajectoriesFrame class object
:param time_unit: time unit in which waiting times will be expressed
:return: A series with waiting times for each transition for each user
"""
transitions_only = trajectories_frame[
trajectories_frame.geometry.groupby(level=0).progress_apply(lambda x: x.shift(-1) != x)]
transitions_only['dt'] = transitions_only.index.get_level_values(1)
times = transitions_only.groupby(level=0).progress_apply(
lambda x: (x['dt'] - x['dt'].shift(1)).astype('timedelta64[%s]' % time_unit))
return times
def center_of_mass(trajectories_frame):
"""
Calculates a center of mass for each user's trajectory
:param trajectories_frame: TrajectoriesFrame class object
:return: a GeoSeries with centers of mass of each user's trajectory
"""
return trajectories_frame.dissolve(by=trajectories_frame.index.get_level_values(0)).centroid
def radius_of_gyration(trajectories_frame, time_evolution=True):
"""
Calculates radii of gyration for each user. Optionally uses time steps to express their growth.
:param trajectories_frame: TrajectoriesFrame class object
:param time_evolution: If true, radii of gyration are calculated over time
:return: a Series with radii of gyration for each user
"""
mean_locs = center_of_mass(trajectories_frame)
to_concat_dict = {}
to_concat_list = []
for ind, vals in tqdm(trajectories_frame.groupby(level=0), total=len(trajectories_frame)):
vals = vals.dropna()
rog_ind = vals.distance(mean_locs.loc[ind]) ** 2
if time_evolution:
rog_ind = groupwise_expansion(np.sqrt(rog_ind))
to_concat_list.append(rog_ind)
else:
rog_ind = np.sqrt(rog_ind.mean())
to_concat_dict[ind] = rog_ind
if time_evolution:
radius = pd.concat(to_concat_list)
else:
radius = pd.DataFrame.from_dict(to_concat_dict, orient='index')
return radius
def mean_square_displacement(trajectories_frame, from_center=False, time_evolution=True, reference_locs=None):
"""
Calculates mean square displacements for each user. Optionally uses time steps to express their growth.
:param trajectories_frame: TrajectoriesFrame class object
:param from_center: If ture, displacement is calculated from the trajectory ceneter, if false - from the first point
:param time_evolution: If true, mean square displacements are calculated over time
:param reference_locs: allows to give reference locations for each trajectory explicitly
:return: a Series with mean square displacements for each user
"""
to_concat_dict = {}
to_concat_list = []
if reference_locs is not None:
if from_center:
reference_locs = center_of_mass(trajectories_frame)
else:
reference_locs = trajectories_frame.groupby(level=0).head(1).droplevel(1).geometry
for ind, vals in tqdm(trajectories_frame.groupby(level=0), total=len(trajectories_frame)):
vals = vals.dropna()
msd_ind = (vals.distance(reference_locs.loc[ind]) ** 2)
if time_evolution:
msd_ind = groupwise_expansion(msd_ind)
to_concat_list.append(msd_ind)
else:
msd_ind = msd_ind.mean()
to_concat_dict[ind] = msd_ind
if time_evolution:
msd = pd.concat(to_concat_list)
else:
msd = pd.DataFrame.from_dict(to_concat_dict, orient='index')
return msd
def return_time(trajectories_frame, time_unit='h', by_place=False):
"""
Calculates return times for each unique location in each user's trajectory.
:param trajectories_frame: TrajectoriesFrame class object
:param time_unit: time unit in which return times will be expressed
:param by_place: If true, return times are expressed for each place globally
:return: a Series with return times
"""
if not hasattr(trajectories_frame, '_geom_cols'):
trajectories_frame = TrajectoriesFrame(trajectories_frame)
lat_col = trajectories_frame[trajectories_frame._geom_cols[0]]
lon_col = trajectories_frame[trajectories_frame._geom_cols[1]]
trajectories_frame['datetime_temp'] = trajectories_frame.index.get_level_values(1)
to_concat = []
for ind, vals in tqdm(trajectories_frame.groupby(level=0), total=len(trajectories_frame)):
concat_level = {}
for place, vals2 in vals.groupby([lat_col, lon_col]):
shifts = (vals2.datetime_temp - vals2.datetime_temp.shift()).astype('timedelta64[%s]' % time_unit)
concat_level[place] = shifts
to_concat.append(pd.concat(concat_level))
return_times = pd.concat(to_concat)
if by_place:
return_times = return_times.groupby(level=2).progress_apply(
lambda x: x.groupby(level=[0, 1]).agg(['count', 'mean']).dropna())
return_times = return_times.groupby(level=0).progress_apply(lambda x: x.sort_values('count', ascending=False))
else:
return_times = return_times.groupby(level=2).progress_apply(lambda x: x.sort_values(ascending=False)).droplevel(
[1, 2])
return return_times
def random_entropy(trajectories_frame):
"""
Calculates random entropy for each user in TrajectoriesFrame
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with random entropies for each user
"""
return trajectories_frame.groupby(level=0).progress_apply(lambda x: np.log2(len(pd.unique(x.geometry))))
def unc_entropy(trajectories_frame):
"""
Calculates uncorrelated entropy for each user in TrajectoriesFrame
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with uncorrelated entropies for each user
"""
frequencies = visitation_frequency(trajectories_frame)
return frequencies.groupby(level=0).progress_apply(lambda x: -np.sum([pk * np.log2(pk) for pk in x if pk != 0]))
def _fit_func(x, a, b, c):
return a * np.exp(b * x) + c
def _real_entropy(indi, gs):
"""
Calculates actual entropy for series of symbols
:param indi: unique identifier
:param gs: series of symbols
:return: an unique identifier and entropy value
"""
return indi, np.power(np.mean(matchfinder(gs)), -1) * np.log2(len(gs))
def _real_scalling_entropy(indi, trace, estimation_method='unc'):
"""
Calculates actual antropy for trajectories. If trajectory has missing data, uses estimation. Uncorrelated entropy-based
estimation is used.
:param indi: unique identifier
:param trace: movement trajectory
:return: unique identifier and actual entropy
"""
empty_fraction = trace.isnull().sum() / trace.shape[0]
if empty_fraction < .15:
return _real_entropy(indi, trace)
estimation_step = ceil((.9 - empty_fraction) / .05)
range_to_empty = [empty_fraction + .05 * x for x in range(estimation_step)]
scaling_features = []
uncs = []
real_qs = []
if estimation_method == 'unc':
visit_freq = trace.value_counts() / trace.shape[0] # FOR UNCORRELATED ENTROPY-BASED ESTIMATION
Sunc_baseline = -np.sum(visit_freq * np.log2(visit_freq)) # FOR UNCORRELATED ENTROPY-BASED ESTIMATION
elif estimation_method == 'shuff':
Sunc_baseline = np.power(np.mean(matchfinder(trace.sample(frac=1).reset_index(drop=True))), -1) * \
np.log2(len(
trace.sample(frac=1).reset_index(drop=True))) # FOR SHUFFLED TRAJECTORY-BASED ESTIMATION
for q in range_to_empty[1:]:
trace_copy2 = trace.copy()
points_to_remove = sample(set(trace_copy2[~trace_copy2.isnull()].index),
int(round((q - empty_fraction) * len(trace_copy2))))
trace_copy2.loc[points_to_remove] = None
Strue = np.power(np.mean(matchfinder(trace_copy2)), -1) * np.log2(len(trace_copy2))
trace_shuffled = trace_copy2.sample(frac=1).reset_index(drop=True)
if estimation_method == 'unc':
visit_freq = trace_copy2.value_counts() / trace_copy2.shape[0] # FOR UNCORRELATED ENTROPY-BASED ESTIMATION
Sunc = -np.sum(visit_freq * np.log2(visit_freq)) # FOR UNCORRELATED ENTROPY-BASED ESTIMATION
elif estimation_method == 'shuff':
Sunc = np.power(np.mean(matchfinder(trace_shuffled)), -1) * np.log2(len(trace_shuffled)) # FOR SHUFFLED TRAJECTORY-BASED ESTIMATION
scaling_features.append(np.log2(Strue / Sunc))
uncs.append(Sunc)
real_qs.append(sum(trace_copy2.isnull()) / len(trace_copy2))
try:
popt, pcov = curve_fit(_fit_func, real_qs, scaling_features, maxfev=12000, p0=[0.1, 2, 0.1])
if sum(scaling_features) == 0 and r2_score(scaling_features, [fit_func(x, *popt) for x in real_qs]) < .9:
a, b = np.polyfit(real_qs, scaling_features, 1)
return indi, np.power(2, b) * Sunc_baseline
else:
return indi, np.power(2, _fit_func(0, *popt)) * Sunc_baseline
except:
a, b = np.polyfit(real_qs, scaling_features, 1)
return indi, np.power(2, b) * Sunc_baseline
def real_entropy(trajectories_frame):
"""
Calculates actual entropy for each user in TrajectoriesFrame
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with actual entropies for each user
"""
result_dic = {}
with cf.ThreadPoolExecutor() as executor:
try:
args = [val.labels for indi, val in trajectories_frame.groupby(level=0)]
except KeyError:
args = [val for indi, val in trajectories_frame.groupby(level=0)]
ids = [indi for indi, val in trajectories_frame.groupby(level=0)]
results = list(tqdm(executor.map(_real_scalling_entropy, ids, args), total=len(ids)))
for result in results:
result_dic[result[0]] = result[1]
return pd.Series(np.fromiter(result_dic.values(), dtype=float), index=np.fromiter(result_dic.keys(), dtype=int))
def random_predictability(trajectories_frame):
"""
Calculates random entropy and predictability.
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with random entropy and predictability for each user
"""
distinct_locations = num_of_distinct_locations(trajectories_frame)
rand_ent = random_entropy(trajectories_frame)
merged = pd.DataFrame([distinct_locations, rand_ent], index=['locations', 'entropy'])
return merged.progress_apply(lambda x: fano_inequality(x['locations'], x['entropy'])), rand_ent
def unc_predictability(trajectories_frame):
"""
Calculates uncorrelated entropy and predictability.
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with uncorrelated entropy and predictability for each user
"""
distinct_locations = num_of_distinct_locations(trajectories_frame)
unc_ent = unc_entropy(trajectories_frame)
merged = | pd.DataFrame([distinct_locations, unc_ent], index=['locations', 'entropy']) | pandas.DataFrame |
import pandas as pd
import gdal
import numpy as np
import os
import rasterio
import tqdm
class TrainingData:
"""Prepares training datasets using a raster stack, species occurrences and a set of band means and standard
deviations.
:param self: a class instance of TrainingData
:param oh: an Occurrence object: holds occurrence files and tables
:param gh: a GIS object: holds path and file names required for computation of gis data
:param verbose: a boolean: prints a progress bar if True, silent if False
:return: Object. Used to create a series of .csv files (one for each species detected by the Occurrences object)
containing the input data to the trainer, executed by calling class method create_training_df on TrainingData
object.
"""
def __init__(self, oh, gh, verbose):
self.oh = oh
self.gh = gh
self.verbose = verbose
def prep_training_df(self, src, inras, spec):
"""Loads array from raster stack, locations from species occurrences and band statistics.
:param self: a class instance of TrainingData
:param src: rasterio source object for raster stack.
:param inras: gdal source object for raster stack.
:param spec: string containing the species name for which the data will be loaded.
:return: Tuple. Containing:
string 'spec' that contains the species name for which the files are loaded and returned;
list 'ppa' contains the status for each loaded occurrence (0 for absence, 1 for presence) for the specified
species;
list 'long' and 'lati' contain the longitude and latitude for each occurrence from a specified species;
list 'row' and 'col' contain the values from the previous 'long' and 'lati' columns converted from WGS84 to
image coordinates;
matrix 'myarray' is an multi-dimensional representation of the raster stack;
table 'mean_std' is an table containing the mean and standard deviation for each of the scaled raster layers
"""
data = | pd.read_csv(self.gh.spec_ppa + '/%s_ppa_dataframe.csv' % spec) | pandas.read_csv |
import sys
import pandas
FILENAME = sys.argv[1] #"20220121 Overview CG plates and compounds _consolidated RTG.xlsx"
dfs_per_sheetname = pandas.read_excel(FILENAME, sheet_name=None)
assert "experiments" in dfs_per_sheetname
df = dfs_per_sheetname["experiments"]
assert "experiment ID" in df.columns
assert "compound map see corresponding excel table" in df.columns
assert df["experiment ID"].is_unique
## non-limited list
assert "imaging campaigns" in dfs_per_sheetname
df = dfs_per_sheetname["imaging campaigns"]
assert "imaging campaign ID" in df.columns
assert "experiment ID" in df.columns
assert "timepoint in hours" in df.columns
assert "raw data available in zip file" in df.columns
assert "processed images available in folder" in df.columns
assert "cq1 analysis available in folder" in df.columns
assert "incucyte analyzed data available in csv file" in df.columns
##
assert df["imaging campaign ID"].is_unique
#assert "incucyte timestamp" in df.columns
assert "compounds" in dfs_per_sheetname
df = dfs_per_sheetname["compounds"]
assert "compound ID" in df.columns
assert "SMILES" in df.columns
df2 = df[df.duplicated(subset=["SMILES"], keep=False)]
if len(df2) > 0:
print("Sheet 'compounds': The following groups of entries have the same SMILES but different compound IDs:")
for g, s in df2.groupby("SMILES"):
print(f"{g} : ")
print(s)
print("---")
df2 = df[df.duplicated(subset=["compound ID"], keep=False)]
if len(df2) > 0:
print("Sheet 'compounds': The following groups of entries have the same compound ID but different SMILES:")
for g, s in df2.groupby("compound ID"):
print(f"{g} : ")
print(s)
print("---")
assert df["compound ID"].is_unique
assert df["SMILES"].is_unique
assert not df["SMILES"].str.contains("\n").any()
assert "compound batches" in dfs_per_sheetname
df = dfs_per_sheetname["compound batches"]
assert "compound batch ID" in df.columns
assert "compound ID" in df.columns
df2 = df[df.duplicated(subset=["compound batch ID"], keep=False)]
if len(df2) > 0:
print("Sheet 'compound batches': The following groups of entries have the same compound batch ID:")
for g, s in df2.groupby("compound batch ID"):
print(f"{g} : ")
print(s)
print("---")
assert df["compound batch ID"].is_unique
mapping_tables_to_check = list( [s for s in dfs_per_sheetname if "compound map" in s] )
for mapping_table_name in mapping_tables_to_check:
assert mapping_table_name in dfs_per_sheetname
df = dfs_per_sheetname[mapping_table_name]
assert "well ID" in df.columns
assert "well name" in df.columns
assert "compound batch ID" in df.columns
assert "concentration uM" in df.columns
assert "experimental type" in df.columns
## complex tests follow...
acceptable_experimental_types = ["chemogenomic candidate", "unrelated to this experiment", "blank", "control", "cells only"]
for mapping_table_name in mapping_tables_to_check:
df = dfs_per_sheetname[mapping_table_name]
## check that all rows contain one of the allowed values above
assert df["experimental type"].isin(acceptable_experimental_types).all()
# concentration should be only nan if experimental type is one of the below
cond1 = df["experimental type"] == "blank"
cond2 = df["concentration uM"].isna()
cond3 = df["experimental type"] == "cells only"
cond3b = df["experimental type"] == "unrelated to this experiment"
assert df[cond1].equals(df[(cond1) & (cond2)])
assert df[cond3].equals(df[(cond3) & (cond2)])
assert df[cond3b].equals(df[(cond3b) & (cond2)])
assert df[cond2].equals(df[(cond1) | (cond3) | (cond3b)])
# concentration should be >0 if experimental type is different than the ones above
df_out = df[~((cond1)|(cond3)|(cond3b))].query("not `concentration uM` > 0")
if len( df_out ) > 0:
print(f"Concentrations in table '{mapping_table_name}' are not in the expected range:")
print(df_out)
print("---")
# compound batch should be only nan if experimental type is one of the above
cond4 = df["compound batch ID"].isna()
assert df[cond1].equals(df[(cond4) & (cond1)])
assert df[cond3].equals(df[(cond4) & (cond3)])
assert df[cond3b].equals(df[(cond4) & (cond3b)])
assert df[cond4].equals(df[(cond1) | (cond3) | (cond3b)])
## ID reference tests
foo = dfs_per_sheetname["experiments"]["experiment ID"]
bar = dfs_per_sheetname["imaging campaigns"]["experiment ID"]
assert foo.isin(bar.values).all()
assert bar.isin(foo.values).all()
foo = dfs_per_sheetname["compound batches"]["compound ID"]
bar = dfs_per_sheetname["compounds"]["compound ID"]
bar_foo = set(bar) - set(foo)
if len(bar_foo) > 0:
print("INFO: There are compound IDs in table 'compounds', which are not referenced in table 'compound batches':")
print(bar_foo)
print("---")
foo_bar = set(foo) - set(bar)
if len(foo_bar) > 0:
print("There are compound IDs in table 'compound batches', which cannot be resolved from table 'compounds':")
print(foo_bar)
print("---")
assert foo.isin(bar.values).all()
assert bar.isin(foo.values).all()
for mapping_table_name in mapping_tables_to_check:
foo = dfs_per_sheetname["compound batches"]["compound batch ID"].unique()
bar = dfs_per_sheetname[mapping_table_name]
bar = bar[ bar["experimental type"] != "cells only" ]
bar = bar[ bar["experimental type"] != "blank"]
bar = bar[ bar["experimental type"] != "unrelated to this experiment"]
bar = bar["compound batch ID"].unique()
bar_foo = set(bar) - set(foo)
if len(bar_foo) > 0:
print(f"There are compound batches in table '{mapping_table_name}', which cannot be resolved from table 'compound batches':")
print(bar_foo)
print("---")
print("Done.")
## BLOCK to replace dummy values in the whole excel file
if True:
did_i_change_anything = False
mapping = {
# "old" : "new",
"dummy1" : "dummy1",
"dummy2" : "EUB0001080a",
"dummy3" : "DP000007a",
"dummy4" : "EUB0001108a",
"EUB0000500a" : "EUB0000871a",
"EUB0000528a" : "EUB0000841a",
"EUB0000543aCl" : "EUB0000213bCl",
"EUB0000550aCl" : "EUB0000196bCl",
"EUB0000657aPO4" : "EUB0000140bPO4",
"EUB0000667aCit" : "EUB0000286bCit",
"EUB0000675aCl" : "EUB0000130bCl",
"EUB0000092a" : "EUB0000092b"
}
import openpyxl
wb = openpyxl.load_workbook(FILENAME)
for sheetname in wb.sheetnames:
ws = wb[sheetname]
dimension = ws.calculate_dimension()
for row in ws[dimension]:
for cell in row:
if cell.value in mapping:
print(f"Changing cell {cell} from value {cell.value} to {mapping[cell.value]}")
cell.value = mapping[cell.value]
did_i_change_anything = True
if did_i_change_anything:
wb.save(FILENAME + ".changed.xlsx")
## ... end of BLOCK.
## BLOCK to check the whole excel file for trailing spaces in the fields
if True:
import openpyxl
wb = openpyxl.load_workbook(FILENAME)
for sheetname in wb.sheetnames:
ws = wb[sheetname]
dimension = ws.calculate_dimension()
for row in ws[dimension]:
for cell in row:
if type(cell.value) == str and cell.value.strip() != cell.value:
print(f"Sheet '{sheetname}', cell {cell.coordinate} contains undesired whitespace: '{cell.value}'")
## ... end of BLOCK.
## BLOCK to condense a list of superfluous entries in table 'compounds' vs correct table 'compound batches'
if False:
foo = dfs_per_sheetname["compound batches"]["compound ID"]
bar = dfs_per_sheetname["compounds"]["compound ID"]
bar_foo = set(bar) - set(foo)
dfs_per_sheetname["compounds"][~bar.isin(bar_foo)].to_excel("2022-02-03-new-compounds-sheet.xlsx")
## ... end of BLOCK.
## BLOCK to check for expected pattern in compound concentrations in one plate...
if False:
for mapping_table_name in mapping_tables_to_check:
foo = dfs_per_sheetname[mapping_table_name]
foo = foo[foo["experimental type"]=="chemogenomic candidate"]
print(mapping_table_name)
print("total len:",len(foo))
counter=0
for groupname, series in foo.groupby("eubopen ID"):
if len(series)!=2:
if len(series)==1:
if series["concentration uM"].item() == 10.0:
counter+=1
continue
print("potential ERROR:")
print(series)
else:
if sorted(series["concentration uM"].values) == [1.0, 10.0]:
counter+=2
else:
print("potential ERROR:")
print(series)
print("rather unsuspicious:", counter)
## ... end of BLOCK.
### BLOCK to check for consistency in data and produce condensed output, if EUbOPEN, SGC IDs, and compound names are given in the compound maps ...
if False:
collect_mappings_between_sgc_and_eubopen_id = {}
collect_mappings_between_compound_names_and_eubopen_id = {}
for mapping_table_name in mapping_tables_to_check:
spam = dfs_per_sheetname[mapping_table_name][["SGC ID", "eubopen ID"]].dropna().drop_duplicates()
spam = dfs_per_sheetname[mapping_table_name][["SGC ID", "eubopen ID"]].drop_duplicates()
same_sgc_different_eubopen = spam[spam.duplicated(subset="SGC ID", keep=False)]
same_eubopen_different_sgc = spam[spam.duplicated(subset="eubopen ID", keep=False)]
if len(same_eubopen_different_sgc)>0:
print(f"There are compound batches in table '{mapping_table_name}', which have different SGC IDs, but the same EUbOPEN ID:")
print(same_eubopen_different_sgc)
print("---")
if len(same_sgc_different_eubopen)>0:
print(f"There are compound batches in table '{mapping_table_name}', which have the same SGC ID, but different EUbOPEN IDs:")
print(same_sgc_different_eubopen)
print("---")
#assert len(same_sgc_different_eubopen) == 0
#assert len(same_eubopen_different_sgc) == 0
for sgc_id, s in spam.groupby("SGC ID"):
if sgc_id in collect_mappings_between_sgc_and_eubopen_id:
value = s["eubopen ID"].item()
if value != collect_mappings_between_sgc_and_eubopen_id[sgc_id] and not ( | pandas.isna(value) | pandas.isna |
#!/usr/bin/env python3
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import argparse
from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager
parser = argparse.ArgumentParser(description='Visualize data as plot')
parser.add_argument('--resource',
choices=['cpu_s', 'rss_s', 'vms_s', 'cpu_w', 'rss_w', 'vms_w',
'read_bytes', 'write_bytes',
'recv_bytes', 'send_bytes'],
default='cpu')
args = parser.parse_args()
if args.resource == 'cpu_s':
resource_key = "CPU Usage(%)[TD-Agent#0]"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'CPU Usage (%)'
ylimit = 100
fig_title = 'CPU Usage (Supervisor)'
fig_name = 'CPU_usage_on_supervisor.png'
divide_base = -1
elif args.resource == 'rss_s':
resource_key = "RSS(MB)[TD-Agent#0]"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'RSS Usage (MB) '
ylimit = 100
fig_title = 'RSS Usage (Supervisor)'
fig_name = 'RSS_usage_on_supervisor.png'
divide_base = -1
elif args.resource == 'vms_s':
resource_key = "VMS(MB)[TD-Agent#0]"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'VMS Usage (MB)'
ylimit = 1200
fig_title = 'VMS Usage (Supervisor)'
fig_name = 'VMS_usage_on_supervisor.png'
divide_base = -1
elif args.resource == 'cpu_w':
resource_key = "CPU Usage(%)[Ruby#0]"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'CPU Usage (%)'
ylimit = 100
fig_title = 'CPU Usage (Worker)'
fig_name = 'CPU_usage_on_worker.png'
divide_base = -1
elif args.resource == 'rss_w':
resource_key = "RSS(MB)[Ruby#0]"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'RSS Usage (MB) '
ylimit = 100
fig_title = 'RSS Usage (Worker)'
fig_name = 'RSS_usage_on_worker.png'
divide_base = -1
elif args.resource == 'vms_w':
resource_key = "VMS(MB)[Ruby#0]"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'VMS Usage (MB)'
ylimit = 1200
fig_title = 'VMS Usage (Worker)'
fig_name = 'VMS_usage_on_worker.png'
divide_base = -1
elif args.resource == 'read_bytes':
resource_key = "read bytes(KiB/sec)"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'Disk Read Usage (bytes)'
ylimit = 2500
fig_title = 'Disk Read Usage'
fig_name = 'Disk_Read_usage.png'
divide_base = -1
elif args.resource == 'write_bytes':
resource_key = "write bytes(KiB/sec)"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'Disk Write Usage (KiB)'
ylimit = 3500
fig_title = 'Disk Write Usage'
fig_name = 'Disk_Write_usage.png'
divide_base = -1
elif args.resource == 'recv_bytes':
resource_key = "recv bytes(/sec)"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'Receive Usage (Bytes)'
ylimit = 50000
fig_title = 'Receive Bytes Usage'
fig_name = 'Receive_Bytes_usage.png'
divide_base = -1
elif args.resource == 'send_bytes':
resource_key = "send bytes(/sec)"
xlabel_message = 'flow rate (lines/second)'
ylabel_message = 'Send Usage (Bytes)'
ylimit = 1500000
fig_title = 'Send Bytes Usage'
fig_name = 'Send_Bytes_usage.png'
divide_base = -1
pwd = os.path.dirname(os.path.realpath(__file__))
inventory_file_name = os.path.join(pwd, '..', 'ansible/hosts')
data_loader = DataLoader()
inventory = InventoryManager(loader=data_loader,
sources=[inventory_file_name])
collector = inventory.get_groups_dict()['collector'][0]
tfvars = {}
with open("terraform.tfvars") as tfvarfile:
for line in tfvarfile:
name, var = line.partition("=")[::2]
tfvars[name.strip()] = var
print(tfvars)
username = tfvars["collector-username"].strip(" \"\n")
print(collector)
sns.set()
sns.set_style('whitegrid')
sns.set_palette('Set3')
base_path = os.path.join(pwd, '..', "ansible", "output", collector, "home", username)
print(base_path)
rate_0 = pd.read_csv(os.path.join(base_path, 'usage-0.tsv'), sep='\t', na_values='.')
rate_500 = pd.read_csv(os.path.join(base_path, 'usage-500.tsv'), sep='\t', na_values='.')
rate_1000 = pd.read_csv(os.path.join(base_path, 'usage-1000.tsv'), sep='\t', na_values='.')
rate_2000 = pd.read_csv(os.path.join(base_path, 'usage-2000.tsv'), sep='\t', na_values='.')
rate_5000 = pd.read_csv(os.path.join(base_path, 'usage-5000.tsv'), sep='\t', na_values='.')
df = pd.DataFrame({
0: rate_0[resource_key],
500: rate_500[resource_key],
1000: rate_1000[resource_key],
2000: rate_2000[resource_key],
5000: rate_5000[resource_key],
})
if divide_base > 1:
df = df.divide(divide_base)
medians = {0: np.round(df[0].median(), 2),
500: np.round(df[500].median(), 2),
1000: np.round(df[1000].median(), 2),
2000: np.round(df[2000].median(), 2),
5000: np.round(df[5000].median(), 2)}
median_labels = [str(np.round(s, 2)) for s in medians]
print(medians)
df_melt = | pd.melt(df) | pandas.melt |
# vim: set fdm=indent:
'''
___
/ | ____ ___ ____ _____ ____ ____
/ /| | / __ `__ \/ __ `/_ / / __ \/ __ \
/ ___ |/ / / / / / /_/ / / /_/ /_/ / / / /
/_/ |_/_/ /_/ /_/\__,_/ /___/\____/_/ /_/
______ __
/ ____/___ ________ _________ ______/ /_
/ /_ / __ \/ ___/ _ \/ ___/ __ `/ ___/ __/
/ __/ / /_/ / / / __/ /__/ /_/ (__ ) /_
/_/ \____/_/ \___/\___/\__,_/____/\__/
___ __ __
/ | _____________ / /__ _________ _/ /_____ _____
/ /| |/ ___/ ___/ _ \/ / _ \/ ___/ __ `/ __/ __ \/ ___/
/ ___ / /__/ /__/ __/ / __/ / / /_/ / /_/ /_/ / /
/_/ |_\___/\___/\___/_/\___/_/ \__,_/\__/\____/_/
GITHUB:
https://github.com/aws-samples/simple-forecat-solution/
USAGE:
streamlit run -- ./app.py --local-dir LOCAL_DIR [--landing-page-url URL]
OPTIONS:
--local-dir LOCAL_DIR /path/to/ a local directory from which the UI
will look for files.
--landing-page-url URL URL of the AFA landing page
'''
import os
import sys
import io
import glob
import time
import datetime
import base64
import pathlib
import textwrap
import argparse
import re
import json
import logging
import gzip
import gc
import boto3
import numpy as np
import pandas as pd
import awswrangler as wr
import streamlit as st
import plotly.express as pex
import plotly.graph_objects as go
import cloudpickle
import gzip
from collections import OrderedDict, deque, namedtuple
from concurrent import futures
from urllib.parse import urlparse
from toolz.itertoolz import partition_all
from botocore.exceptions import ClientError
from sspipe import p, px
from streamlit import session_state as state
from textwrap import dedent
from stqdm import stqdm
from afa import (load_data, resample, run_pipeline, run_cv_select,
calc_smape, calc_wape,
make_demand_classification, process_forecasts, make_perf_summary,
make_health_summary, GROUP_COLS, EXP_COLS)
from lambdamap import LambdaExecutor, LambdaFunction
from awswrangler.exceptions import NoFilesFound
from streamlit import caching
from streamlit.uploaded_file_manager import UploadedFile
from streamlit.script_runner import RerunException
from st_aggrid import AgGrid, GridOptionsBuilder, JsCode
from joblib import Parallel, delayed
from humanfriendly import format_timespan
ST_STATIC_PATH = pathlib.Path(st.__path__[0]).joinpath("static")
ST_DOWNLOADS_PATH = ST_STATIC_PATH.joinpath("downloads")
LAMBDAMAP_FUNC = "AfaLambdaMapFunction"
LOCAL_DIR = "/home/ec2-user/SageMaker"
if not os.path.exists(ST_DOWNLOADS_PATH):
ST_DOWNLOADS_PATH.mkdir()
FREQ_MAP = OrderedDict(Daily="D", Weekly="W-MON", Monthly="MS")
FREQ_MAP_AFC = OrderedDict(Daily="D", Weekly="W", Monthly="M")
FREQ_MAP_LONG = {
"D": "Daily", "W-MON": "Weekly", "W": "Weekly", "M": "Monthly",
"MS": "Monthly"
}
FREQ_MAP_PD = {
"D": "D",
"W": "W-MON",
"W-SUN": "W-MON",
"W-MON": "W-MON",
"M": "MS",
"MS": "MS"
}
METRIC = "smape"
MAX_LAMBDAS = 1000
def validate(df):
"""Validate a dataset.
"""
err_msgs = []
warn_msgs = []
# check column names
for col in EXP_COLS:
if col not in df:
err_msgs.append(f"missing **{col}** column")
msgs = {
"errors": err_msgs,
"warnings": warn_msgs
}
is_valid_file = len(err_msgs) == 0
return df, msgs, is_valid_file
@st.cache
def load_file(path):
"""
"""
if path.endswith(".csv.gz"):
compression = "gzip"
elif path.endswith(".csv"):
compression = None
else:
raise NotImplementedError
return pd.read_csv(path, dtype={"timestamp": str}, compression=compression)
def _sum(y):
if np.all(pd.isnull(y)):
return np.nan
return np.nansum(y)
def _resample(df2, freq):
df2 = df2.groupby(["channel", "family", "item_id"]) \
.resample(freq) \
.demand \
.sum(min_count=1)
return df2
def process_data(df, freq, chunksize=None):
"""
"""
df["timestamp"] = pd.DatetimeIndex(df["timestamp"])
df.set_index("timestamp", inplace=True)
groups = df.groupby(["channel", "family", "item_id"], sort=False)
if chunksize is None:
chunksize = min(groups.ngroups, 1000)
total = int(np.ceil(groups.ngroups / chunksize))
all_results = []
for chunk in stqdm(partition_all(chunksize, groups), total=total, desc="Progress"):
results = Parallel(n_jobs=-1)(delayed(_resample)(dd, freq) for _, dd in chunk)
all_results.extend(results)
df = pd.concat(all_results) \
.reset_index(["channel", "family", "item_id"])
df.index.name = None
return df
class StreamlitExecutor(LambdaExecutor):
"""Custom LambdaExecutor to display a progress bar in the app.
"""
def map(self, func, payloads, local_mode=False):
"""
"""
if local_mode:
f = func
else:
f = LambdaFunction(func, self._client, self._lambda_arn)
ex = self._executor
wait_for = [ex.submit(f, *p["args"], **p["kwargs"]) for p in payloads]
return wait_for
def display_progress(wait_for, desc=None):
"""
"""
# display progress of the futures
pbar = stqdm(desc=desc, total=len(wait_for))
prev_n_done = 0
n_done = sum(f.done() for f in wait_for)
while n_done != len(wait_for):
diff = n_done - prev_n_done
pbar.update(diff)
prev_n_done = n_done
n_done = sum(f.done() for f in wait_for)
time.sleep(0.25)
diff = n_done - prev_n_done
pbar.update(diff)
return
def run_lambdamap(df, horiz, freq):
"""
"""
payloads = []
freq = FREQ_MAP_PD[freq]
if freq[0] == "W":
cv_periods = None
cv_stride = 2
elif freq[0] == "M":
cv_periods = None
cv_stride = 1
else:
raise NotImplementedError
from toolz.itertoolz import partition
from tqdm.auto import tqdm
#with st.spinner(f":rocket: Launching forecasts via AWS Lambda (λ)..."):
# resample the dataset to the forecast frequency before running
# lambdamap
start = time.time()
df2 = get_df_resampled(df, freq)
print(f"completed in {format_timespan(time.time()-start)}")
groups = df2.groupby(GROUP_COLS, as_index=False, sort=False)
# generate payload
for _, dd in groups:
payloads.append(
{"args": (dd, horiz, freq),
"kwargs": {"metric": "smape",
"cv_periods": cv_periods, "cv_stride": cv_stride}})
# launch jobs in chunks of 1000
executor = StreamlitExecutor(max_workers=min(MAX_LAMBDAS, len(payloads)),
lambda_arn=LAMBDAMAP_FUNC)
wait_for = executor.map(run_cv_select, payloads)
display_progress(wait_for, "🔥 Generating forecasts")
return wait_for
def get_df_resampled(df, freq):
groups = df.groupby(["channel", "family", "item_id"], sort=False)
chunksize = min(1000, groups.ngroups)
total = int(np.ceil(float(groups.ngroups) / chunksize))
all_results = []
for chunk in stqdm(partition_all(chunksize, groups), total=total,
desc="Batch Preparation Progress"):
results = Parallel(n_jobs=-1)(delayed(_resample)(dd, freq) for _, dd in chunk)
all_results.extend(results)
df2 = pd.concat(all_results) \
.reset_index(["channel", "family", "item_id"])
df2 = _resample(df, freq).reset_index(["channel", "family", "item_id"])
df2.index.name = None
state["report"]["data"]["df2"] = df2
return df2
def display_ag_grid(df, auto_height=False, paginate=False,
comma_cols=None, selection_mode=None, use_checkbox=False):
"""
Parameters
----------
df : pd.DataFrame
auto_height : bool
pagination : bool
comma_cols : tuple or list
Numeric columns to apply comma thousands separator.
"""
gb = GridOptionsBuilder.from_dataframe(df)
#gb.configure_selection("single")
gb.configure_auto_height(auto_height)
gb.configure_pagination(enabled=paginate)
if selection_mode is not None:
gb.configure_selection(selection_mode=selection_mode,
use_checkbox=use_checkbox)
comma_renderer = JsCode(textwrap.dedent("""
function(params) {
return params.value
.toString()
.split( /(?=(?:\d{3})+(?:\.|$))/g ).join( "," )
}
"""))
for col in comma_cols:
gb.configure_column(col, cellRenderer=comma_renderer)
response = AgGrid(df, gridOptions=gb.build(), allow_unsafe_jscode=True)
return response
def valid_launch_freqs():
data_freq = state.report["data"]["freq"]
valid_freqs = ["D", "W", "M"]
if data_freq in ("D",):
# don't allow daily forecasting yet
valid_freqs = valid_freqs[1:]
elif data_freq in ("W","W-MON",):
valid_freqs = valid_freqs[1:]
elif data_freq in ("M","MS",):
valid_freqs = valid_freqs[2:]
else:
raise NotImplementedError
return valid_freqs
def create_presigned_url(s3_path, expiration=3600):
"""Generate a presigned URL to share an S3 object
:param bucket_name: string
:param object_name: string
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
parsed_url = urlparse(s3_path, allow_fragments=False)
bucket_name = parsed_url.netloc
object_name = parsed_url.path.strip("/")
# Generate a presigned URL for the S3 object
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket_name,
'Key': object_name},
ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
# The response contains the presigned URL
return response
def make_df_backtests(df_results, parallel=False):
"""Expand df_results to a "long" dataframe with the columns:
channel, family, item_id, timestamp, actual, backtest.
"""
def _expand(dd):
ts = np.hstack(dd["ts_cv"].apply(np.hstack))
ys = np.hstack(dd["y_cv"].apply(np.hstack))
yp = np.hstack(dd["yp_cv"].apply(np.hstack))
df = pd.DataFrame({"timestamp": ts, "demand": ys, "backtest": yp})
return df
groups = df_results.query("rank == 1") \
.groupby(["channel", "family", "item_id"],
as_index=True, sort=False)
if parallel:
df_backtests = groups.parallel_apply(_expand)
else:
df_backtests = groups.apply(_expand)
df_backtests["timestamp"] = pd.DatetimeIndex(df_backtests["timestamp"])
return df_backtests.reset_index(["channel", "family", "item_id"])
def save_report(report_fn):
"""
"""
if "report" not in state or "name" not in state["report"]:
return
if "path" not in state["report"]["data"]:
st.warning(textwrap.dedent(f"""
Warning: unable to save report, no input data was loaded.
"""))
return
start = time.time()
with st.spinner(":hourglass_flowing_sand: Saving Report ..."):
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
local_path = f'/tmp/{report_fn}'
# save the report locally
cloudpickle.dump(state["report"], gzip.open(local_path, "wb"))
# upload the report to s3
s3_path = \
f'{state["report"]["afa"]["s3_afa_reports_path"]}/{report_fn}'
parsed_url = urlparse(s3_path, allow_fragments=False)
bucket = parsed_url.netloc
key = parsed_url.path.strip("/")
s3_client = boto3.client("s3")
try:
response = s3_client.upload_file(local_path, bucket, key)
signed_url = create_presigned_url(s3_path)
st.info(textwrap.dedent(f"""
The report can be downloaded [here]({signed_url}).
"""))
except ClientError as e:
logging.error(e)
st.text(f"(completed in {format_timespan(time.time() - start)})")
return
def make_df_reports(bucket, prefix):
s3 = boto3.client("s3")
df = pd.DataFrame()
df["filename"] = \
[e['Key'] for p in s3.get_paginator("list_objects_v2")
.paginate(Bucket=bucket, Prefix=prefix) for e in p['Contents']]
#df["s3_path"] = "s3://" + bucket + "/" + df["filename"]
df["filename"] = df["filename"].apply(os.path.basename)
return df
#
# Panels
#
def make_mask(df, channel, family, item_id):
mask = np.ones(len(df)).astype(bool)
# only mask when all three keys are non-empty
if channel == "" or family == "" or item_id == "":
return ~mask
mask &= df["channel"].str.upper() == channel.upper()
mask &= df["family"].str.upper() == family.upper()
mask &= df["item_id"].str.upper() == item_id.upper()
return mask
@st.cache
def make_downloads(df_pred, df_results):
"""
"""
pred_fn = os.path.join(ST_DOWNLOADS_PATH,
f"{state.uploaded_file.name}_fcast.csv.gz")
results_fn = os.path.join(ST_DOWNLOADS_PATH,
f"{state.uploaded_file.name}_results.csv.gz")
state.df_pred.to_csv(pred_fn, index=False, compression="gzip")
state.df_results.to_csv(results_fn, index=False, compression="gzip")
return pred_fn, results_fn
def _info(s):
st.info(textwrap.dedent(s))
def _success(s):
st.success(textwrap.dedent(s))
def _write(s):
st.write(textwrap.dedent(s))
def panel_create_report(expanded=True):
"""Display the 'Load Data' panel.
"""
def _load_data(path):
if path.endswith(".csv"):
compression = None
elif path.endswith(".csv.gz"):
compression = "gzip"
else:
raise NotImplementedError
df = pd.read_csv(path,
dtype={"timestamp": str, "channel": str, "family": str,
"item_id": str}, compression=compression)
return df
default_name = state["report"].get("name", None)
file_path = state["report"]["data"].get("path", None)
freq = state["report"]["data"].get("freq", None)
st.markdown("## Create Report")
with st.beta_expander("⬆️ Load + Validate Data", expanded=expanded):
st.write(f"""Step 1 – Create a new forecast report by selecting an uploaded
file containing the demand history for your use-case. You must also specify
the frequency of the demand (e.g. _Daily_, _Weekly_, or _Monthly_). Demand
history files are uploaded using the [SageMaker Notebook interface]({state["landing_page_url"]})""")
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
btn_refresh_files = st.button("Refresh Files", help="Refresh the _File_ selector with recently uploaded files.")
with st.form("create_report_form"):
report_name = st.text_input("Report Name (optional)",
help="You may optionally give this report a name, otherwise one will be automatically generated.")
_cols = st.beta_columns([3,1])
with _cols[0]:
fn = file_selectbox(
"File (.csv or .csv.gz files)", args.local_dir,
help="This file contains the demand history as either a `.csv` or `.csv.gz` file.")
with _cols[1]:
freq = st.selectbox("Frequency", list(s for s in FREQ_MAP.values() if s != 'D'),
format_func=lambda s: FREQ_MAP_LONG[s],
help="This input file must contain demand history at a _daily_, _weekly_, or _monthly_ frequency.")
btn_validate = st.form_submit_button("Load & Validate")
if btn_validate:
start = time.time()
if fn is None:
st.error(textwrap.dedent("""
**Error**
No files were selected.
1. Upload your file(s).
2. Click the **Refresh Files** button.
3. Select the file from the dropdown box.
4. Select the **Frequency**.
5. Click the **Validate** button.
####
"""))
st.stop()
if report_name == "":
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
report_name = f"AfaReport_{now_str}"
if report_name != "" and re.match(r"^[A-Za-z0-9-_]*$", report_name) is None:
st.error(dedent("""
The report name may only contain:
- uppercase letters
- lowercase letters
- numbers
- dashes ('-')
- underscores ('_')
####
"""))
else:
# temporarily load the file for validation and store it in state
# iff the data is valid
with st.spinner(":hourglass_flowing_sand: Validating file ..."):
df, msgs, is_valid_file = validate(_load_data(fn))#.drop(["timestamp", "channel"], axis=1))
if is_valid_file:
with st.spinner(":hourglass_flowing_sand: Processing file ..."):
state.report["name"] = report_name
state.report["data"]["path"] = fn
state.report["data"]["sz_bytes"] = os.path.getsize(fn)
state.report["data"]["freq"] = freq
# impute missing dates from the validated dataframe, this
# will fill in the missing timestamps with null demand values
# state.report["data"]["df"] = \
# load_data(df, impute_freq=state.report["data"]["freq"])
state.report["data"]["df"] = \
process_data(df,state.report["data"]["freq"])
state.report["data"]["is_valid"] = True
# clear any existing data health check results, this forces
# a rechecking of data health
state.report["data"]["df_health"] = None
st.text(f"(completed in {format_timespan(time.time() - start)})")
else:
err_bullets = "\n".join("- " + s for s in msgs["errors"])
st.error(f"**Validation failed**\n\n{err_bullets}")
if state.report["data"].get("is_valid", False):
_success(f"""
`{os.path.basename(state.report["data"]["path"])}` is **valid**
""")
return
def panel_load_report(expanded=True):
"""
"""
def format_func(s):
if s == "local":
return "Local Filesystem"
elif s == "s3":
return "☁️ S3"
s3 = boto3.client("s3")
st.markdown("## Load Report")
with st.beta_expander("📂 Load Report", expanded=expanded):
st.write(f"""Optional – Alternatively, you can load a previously-generated
report. Report files must have the `.pkl.gz` file extension and can be uploaded
using the [SageMaker Notebook interface]({state["landing_page_url"]}).""")
report_source = st.radio("Source", ["local"], format_func=format_func)
_cols = st.beta_columns([3,1])
with _cols[0]:
if report_source == "local":
fn = file_selectbox("File", os.path.join(args.local_dir),
globs=("*.pkl.gz",))
elif report_source == "s3":
pass
else:
raise NotImplementedError
load_report_btn = st.button("Load", key="load_report_btn")
with _cols[1]:
st.write("##")
st.button("Refresh Files", key="refresh_report_files_btn")
if load_report_btn:
start = time.time()
with st.spinner(":hourglass_flowing_sand: Loading Report ..."):
state["report"] = cloudpickle.load(gzip.open(fn, "rb"))
st.text(f"(completed in {format_timespan(time.time() - start)})")
state["prev_state"] = "report_loaded"
return
def panel_data_health():
"""
"""
df = state.report["data"].get("df", None)
df_health = state.report["data"].get("df_health", None)
freq = state.report["data"].get("freq", None)
if df is None:
return
st.header("Data Health")
with st.beta_expander("❤️ Data Health", expanded=True):
st.write(f"""Step 2 – Inspect the characteristics of the dataset
for irregularities prior to generating any forecasts. For example,
missing channels, families, item IDs; or unusually short/long
timeseries lengths.""")
with st.spinner("Performing data health check ..."):
start = time.time()
# check iff required
if df_health is None:
df_health = make_health_summary(df, state.report["data"]["freq"])
# save the health check results
state.report["data"]["df_health"] = df_health
# calc. ranked series by demand
state.report["data"]["df_ranks"] = \
df.groupby(["channel", "family", "item_id"]) \
.agg({"demand": sum}) \
.sort_values(by="demand", ascending=False)
num_series = df_health.shape[0]
num_channels = df_health["channel"].nunique()
num_families = df_health["family"].nunique()
num_item_ids = df_health["item_id"].nunique()
first_date = df_health['timestamp_min'].dt.strftime('%Y-%m-%d').min()
last_date = df_health['timestamp_max'].dt.strftime('%Y-%m-%d').max()
if freq == 'D':
duration_unit = 'D'
duration_str = 'days'
elif freq in ("W", "W-MON",):
duration_unit = 'W'
duration_str = 'weeks'
elif freq in ("M", "MS",):
duration_unit = 'M'
duration_str = 'months'
else:
raise NotImplementedError
duration = pd.Timestamp(last_date).to_period(duration_unit) - \
pd.Timestamp(first_date).to_period(duration_unit)
pc_missing = \
df_health["demand_missing_dates"].sum() / df_health["demand_len"].sum()
with st.beta_container():
_cols = st.beta_columns(3)
with _cols[0]:
st.markdown("#### Summary")
st.text(textwrap.dedent(f"""
No. series:\t{num_series}
No. channels:\t{num_channels}
No. families:\t{num_families}
No. item IDs:\t{num_item_ids}
"""))
with _cols[1]:
st.markdown("#### Timespan")
st.text(f"Frequency:\t{FREQ_MAP_LONG[freq]}\n"
f"Duration:\t{duration.n} {duration_str}\n"
f"First date:\t{first_date}\n"
f"Last date:\t{last_date}\n")
#f"% missing:\t{int(np.round(pc_missing*100,0))}")
with _cols[2]:
st.markdown("#### Timeseries Lengths")
fig = pex.box(df_health, x="demand_nonnull_count", height=160)
fig.update_layout(
margin={"t": 5, "b": 0, "r": 0, "l": 0},
xaxis_title=duration_str,
height=100
)
st.plotly_chart(fig, use_container_width=True)
st.text(f"(completed in {format_timespan(time.time() - start)})")
return
def panel_launch():
"""
"""
def _format_func(short):
if short == "local":
s = " Local"
if short == "lambdamap":
s = "AWS Lambda"
return s
df = state.report["data"].get("df", None)
df_health = state.report["data"].get("df_health", None)
horiz = state.report["afa"].get("horiz", None)
freq = state.report["afa"].get("freq", None)
if df is None or df_health is None:
return
st.header("Statistical Forecasts")
with st.beta_expander("🚀 Launch", expanded=True):
st.write(f"""Step 3 – Generate forecasts by training and evaluating 75+
configurations of [statistical forecasting
models](https://otexts.com/fpp3/) for each timeseries in
parallel using AWS Lambda. A forecast at the desired _horizon length_ and
_frequency_ is then generated using the each individual timeseries' best model.
This process typically completes at a rate of 500–1,000 timeseries/min.
""")
with st.form("afa_form"):
with st.beta_container():
_cols = st.beta_columns(3)
with _cols[0]:
horiz = st.number_input("Horizon Length", value=1, min_value=1)
with _cols[1]:
freq = st.selectbox("Forecast Frequency", valid_launch_freqs(), 0,
format_func=lambda s: FREQ_MAP_LONG[s])
with _cols[2]:
backend = st.selectbox("Compute Backend",
["lambdamap"], 0, _format_func)
btn_launch = st.form_submit_button("Launch")
if btn_launch:
start = time.time()
# save form data
state.report["afa"]["freq"] = freq
state.report["afa"]["horiz"] = horiz
state.report["afa"]["backend"] = backend
df = state.report["data"]["df"]
freq_in = state.report["data"]["freq"]
freq_out = state.report["afa"]["freq"]
if backend == "local":
wait_for = \
run_pipeline(df, freq_in, freq_out, metric=METRIC,
cv_stride=2, backend="futures", horiz=horiz)
display_progress(wait_for, "🔥 Generating forecasts")
raw_results = [f.result() for f in futures.as_completed(wait_for)]
elif backend == "lambdamap":
with st.spinner(f":rocket: Launching forecasts via AWS Lambda (λ)..."):
all_raw_results = []
groups = df.groupby(["channel", "family", "item_id"], sort=False)
chunksize = min(5000, groups.ngroups)
# divide the dataset into chunks
df["grp"] = groups.ngroup() % int(np.ceil(groups.ngroups / chunksize))
groups = df.groupby("grp", sort=False)
total = df["grp"].nunique()
for _, dd in stqdm(groups, total=total, desc="Overall Progress"):
wait_for = run_lambdamap(dd, horiz, freq_out)
raw_results = [f.result() for f in futures.as_completed(wait_for)]
all_raw_results.extend(raw_results)
raw_results = all_raw_results
else:
raise NotImplementedError
with st.spinner("⏳ Calculating results ..."):
# generate the results and predictions as dataframes
df_results, df_preds, df_model_dist, best_err, naive_err = \
process_forecasts(wait_for, METRIC)
# generate the demand classifcation info
df_demand_cln = make_demand_classification(df, freq_in)
# save results and forecast data
state.report["afa"]["df_results"] = df_results
state.report["afa"]["df_preds"] = df_preds
state.report["afa"]["df_demand_cln"] = df_demand_cln
state.report["afa"]["df_model_dist"] = df_model_dist
state.report["afa"]["best_err"] = best_err
state.report["afa"]["naive_err"] = naive_err
state.report["afa"]["job_duration"] = time.time() - start
job_duration = state.report["afa"].get("job_duration", None)
if job_duration:
st.text(f"(completed in {format_timespan(job_duration)})")
return
def panel_accuracy():
"""
"""
df = state.report["data"].get("df", None)
df_demand_cln = state.report["afa"].get("df_demand_cln", None)
df_results = state.report["afa"].get("df_results", None)
df_model_dist = state["report"]["afa"].get("df_model_dist", None)
best_err = state["report"]["afa"].get("best_err", None)
naive_err = state["report"]["afa"].get("naive_err", None)
horiz = state.report["afa"].get("horiz", None)
freq_out = state.report["afa"].get("freq", None)
if df is None or df_results is None or df_model_dist is None:
return
def _calc_metrics(dd, metric="smape"):
if metric == "smape":
metric_func = calc_smape
elif metric == "wape":
metric_func = calc_wape
else:
raise NotImplementedError
ys = np.hstack(dd["y_cv"].apply(np.hstack))
yp = np.hstack(dd["yp_cv"].apply(np.hstack))
return metric_func(ys, yp)
df_acc = df_results.groupby(["channel", "family", "item_id"], as_index=False, sort=True) \
.apply(lambda dd: _calc_metrics(dd, METRIC)) \
.rename({None: METRIC}, axis=1)
with st.beta_expander("🎯 Forecast Summary", expanded=True):
_write(f"""
Step 4 – The forecast error is calculated as the [symmetric
mean absolute percentage error
(SMAPE)](https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error)
via sliding window backtesting. Forecast _accuracy_ is calculated as
`100-SMAPE` and is averaged across all timeseries to give the _overall
accuracy_. The overall accuracy of the best naive models is used as a baseline.
The _classification_ distribution indicates the percentage timeseries
that have a _short_, _medium_, or _continuous_ lifecycle. The _Best Models_ chart
shows the distribution of each model type that were selected as the best model
across the dataset.
""")
df_cln = pd.DataFrame({"category": ["short", "medium", "continuous"]})
df_cln = df_cln.merge(
df_demand_cln["category"]
.value_counts(normalize=True)
.reset_index()
.rename({"index": "category", "category": "frac"}, axis=1),
on="category", how="left"
)
df_cln = df_cln.fillna(0.0)
df_cln["frac"] *= 100
df_cln["frac"] = df_cln["frac"].astype(int)
_cols = st.beta_columns(3)
with _cols[0]:
st.markdown("#### Parameters")
st.text(f"Horiz. Length:\t{horiz}\n"
f"Frequency:\t{FREQ_MAP_LONG[freq_out]}")
st.markdown("#### Classification")
st.text(f"Short:\t\t{df_cln.iloc[0]['frac']} %\n"
f"Medium:\t\t{df_cln.iloc[1]['frac']} %\n"
f"Continuous:\t{df_cln.iloc[2]['frac']} %")
with _cols[1]:
st.markdown("#### Best Models")
df_model_dist = df_model_dist.query("perc > 0")
labels = df_model_dist["model_type"].values
values = df_model_dist["perc"].values
fig = go.Figure(data=[go.Pie(labels=labels, values=values, hole=0.40)])
fig.update(layout_showlegend=False)
fig.update_layout(
margin={"t": 0, "b": 0, "r": 20, "l": 20},
width=200,
height=150,
)
#fig.update_traces(textinfo="percent+label", texttemplate="%{label} – %{percent:.1%f}")
fig.update_traces(textinfo="percent+label")
st.plotly_chart(fig)
acc_val = (1 - np.nanmean(df_acc[METRIC])) * 100.
acc_naive = (1 - naive_err.err_mean) * 100.
with _cols[2]:
st.markdown("#### Overall Accuracy")
st.markdown(
f"<div style='font-size:36pt;font-weight:bold'>{acc_val:.0f}%</div>"
f"({np.clip(acc_val - acc_naive, 0, None):.0f}% increase vs. naive)",
unsafe_allow_html=True)
return
@st.cache()
def make_df_top(df, df_results, groupby_cols, dt_start, dt_stop, cperc_thresh,
metric="smape"):
"""
"""
def calc_period_metrics(dd, dt_start, dt_stop):
"""
"""
dt_start = pd.Timestamp(dt_start)
dt_stop = pd.Timestamp(dt_stop)
ts = np.hstack(dd["ts_cv"].apply(np.hstack))
ix = (ts >= dt_start) & (ts <= dt_stop)
ys = np.hstack(dd["y_cv"].apply(np.hstack))[ix]
yp = np.hstack(dd["yp_cv"].apply(np.hstack))[ix]
if metric == "smape":
error = calc_smape(ys, yp)
elif metric == "wape":
error = calc_wape(ys, yp)
else:
raise NotImplementedError
return error
metric_name = f"{metric}_mean"
df.index.name = "timestamp"
dt_start = pd.Timestamp(dt_start).strftime("%Y-%m-%d")
dt_stop = pd.Timestamp(dt_stop).strftime("%Y-%m-%d")
df2 = df.query(f"timestamp >= '{dt_start}' and timestamp <= '{dt_stop}'")
total_demand = df2["demand"].sum()
# calculate per-group demand %
df_grp_demand = \
df2.groupby(groupby_cols, as_index=False, sort=False) \
.agg({"demand": sum})
df_grp_demand["perc"] = df_grp_demand["demand"] / total_demand * 100
# get the best models for each group
df_grp_metrics = \
df_results.query("rank == 1") \
.groupby(groupby_cols, as_index=False, sort=False) \
.apply(lambda dd: calc_period_metrics(dd, dt_start, dt_stop)) \
.pipe(pd.DataFrame) \
.rename({None: metric_name}, axis=1) \
.reset_index()
df_grp_metrics["accuracy"] = 100 * (1-df_grp_metrics[metric_name])
df_grp_metrics.drop(["index", metric_name], axis=1, inplace=True)
# combine, sort, and display
df_grp = df_grp_demand \
.merge(df_grp_metrics, on=groupby_cols, how="left") \
.sort_values(by="demand", ascending=False)
df_grp["cperc"] = df_grp["perc"].cumsum()
df_grp = df_grp.query(f"cperc <= {cperc_thresh}")
df_grp.rename({"perc": "% total demand", "accuracy": "% accuracy"}, axis=1, inplace=True)
df_grp.drop("cperc", axis=1, inplace=True)
# calc. summary row
df_grp_summary = df_grp.agg({"demand": sum, "% accuracy": np.nanmean})
df_grp_summary["% total demand"] = np.round(100 * df_grp_summary["demand"] / total_demand, 1)
df_grp_summary = pd.DataFrame(df_grp_summary).T[["demand", "% total demand", "% accuracy"]]
df_grp_summary.insert(0, "group by", ", ".join(groupby_cols))
df_grp_summary["% accuracy"] = df_grp_summary["% accuracy"].round(0)
df_grp["demand"] = df_grp["demand"].round(0)
df_grp["% total demand"] = df_grp["% total demand"].round(1)
df_grp["% accuracy"] = df_grp["% accuracy"].round(0)
df_grp.insert(0, "rank", np.arange(df_grp.shape[0]) + 1)
df_grp_summary["demand"] = df_grp_summary["demand"].round(0)
df_grp_summary["% total demand"] = df_grp_summary["% total demand"].round(1)
return df_grp, df_grp_summary
@st.cache()
def make_ml_df_top(df, df_backtests, groupby_cols, dt_start, dt_stop, cperc_thresh, metric):
"""
"""
def calc_period_metrics(dd, dt_start, dt_stop):
"""
"""
dt_start = pd.Timestamp(dt_start)
dt_stop = pd.Timestamp(dt_stop)
ts = dd["timestamp"]
ix = (ts >= dt_start) & (ts <= dt_stop)
ys = dd["target_value"][ix]
yp = dd["demand"][ix]
if metric == "smape":
error = calc_smape(ys, yp)
elif metric == "wape":
error = calc_wape(ys, yp)
else:
raise NotImplementedError
return error
df.index.name = "timestamp"
dt_start = pd.Timestamp(dt_start).strftime("%Y-%m-%d")
dt_stop = pd.Timestamp(dt_stop).strftime("%Y-%m-%d")
df2 = df.query(f"timestamp >= '{dt_start}' and timestamp <= '{dt_stop}'")
total_demand = df2["demand"].sum()
# calculate per-group demand %
df_grp_demand = \
df2.groupby(groupby_cols, as_index=False, sort=False) \
.agg({"demand": sum})
df_grp_demand["perc"] = df_grp_demand["demand"] / total_demand * 100
# get the best models for each group
df_grp_metrics = \
df_backtests.groupby(groupby_cols, as_index=False, sort=False) \
.apply(lambda dd: calc_period_metrics(dd, dt_start, dt_stop)) \
.rename({None: metric}, axis=1)
df_grp_metrics["accuracy"] = 100 * (1-df_grp_metrics[metric])
df_grp_metrics.drop(metric, axis=1, inplace=True)
# combine, sort, and display
df_grp = df_grp_demand \
.merge(df_grp_metrics, on=groupby_cols, how="left") \
.sort_values(by="demand", ascending=False)
df_grp["cperc"] = df_grp["perc"].cumsum()
df_grp = df_grp.query(f"cperc <= {cperc_thresh}")
df_grp.rename({"perc": "% total demand", "accuracy": "% accuracy"}, axis=1, inplace=True)
df_grp.drop("cperc", axis=1, inplace=True)
# calc. summary row
df_grp_summary = df_grp.agg({"demand": sum, "% accuracy": np.nanmean})
df_grp_summary["% total demand"] = np.round(100 * df_grp_summary["demand"] / total_demand, 1)
df_grp_summary = | pd.DataFrame(df_grp_summary) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.axes as ax
# creates the data for the HOAc/Ni(110) IR
colnames = ['Wavenumber', 'Intensity']
# 15 s
f1 = pd.read_csv("Ni(110) 1e-9Torr15s 210K.0.dpt", '\t', header=None, names=colnames)
f1.set_index(colnames[0], inplace=True)
f2 = pd.read_csv("Ni(110) 1e-9Torr15s 352K.0.dpt", '\t', header=None, names=colnames)
f2.set_index(colnames[0], inplace=True)
f3 = pd.read_csv("Ni(110) 1e-9Torr15s 452K.0.dpt", '\t', header=None, names=colnames)
f3.set_index(colnames[0], inplace=True)
k90_15s = -1*f3 # background was taken at 90 K
k210_15s = -1*(f3-f1)
k352_15s = -1*(f3-f2)
cat_15s = pd.concat([k90_15s, k210_15s, k352_15s], axis=1, keys=['90K', '210K', '352K'])
# 60 s
f4 = pd.read_csv("Ni(110) 1e-9Torr60s -200K.0.dpt", '\t', header=None, names=colnames)
f4.set_index(colnames[0], inplace=True)
f5 = pd.read_csv("Ni(110) 1e-9Torr60s -350K.0.dpt", '\t', header=None, names=colnames)
f5.set_index(colnames[0], inplace=True)
f6 = pd.read_csv("Ni(110) 1e-9Torr60s -450K.1.dpt", '\t', header=None, names=colnames)
f6.set_index(colnames[0], inplace=True)
f7 = | pd.read_csv("Ni(110) 1e-9Torr60s -550K.1.dpt", '\t', header=None, names=colnames) | pandas.read_csv |
import pandas as pd
from npi.npi import NPI, convert_practitioner_data_to_long, provider_taxonomies
from npi.pecos import PECOS
from npi.samhsa import SAMHSA
from npi.utils.utils import isid
from utils.loaders import pickle_read
def getcol(df, src, idvar, col, newname):
return (df.merge(src[[idvar, col]].drop_duplicates())
.rename(columns={col: newname}))
def conform_data_sources(source, cols, **kwargs):
'''by default includes name, then adds
other variables in a systematic fashion
can use npi_source="ploc2" for secondary practice locations
need to pass kwargs=practypes
'''
if isinstance(source, NPI):
return conform_NPI(source, cols, **kwargs)
elif isinstance(source, SAMHSA):
return conform_SAMHSA(source, cols, **kwargs)
elif isinstance(source, PECOS):
return conform_PECOS(source, cols, **kwargs)
def conform_NPI(source, cols, **kwargs):
df = source.expanded_fullnames.copy()
idvar = 'npi'
if 'practitioner_type' in cols:
src = (source.practitioner_type
.pipe(convert_practitioner_data_to_long,
types=kwargs['practypes']))
df = df.pipe(
getcol, src, idvar, 'PractitionerType', 'practitioner_type')
if 'state' in cols:
if not kwargs or 'npi_source' not in kwargs.keys():
src = (source
.plocstatename.drop(columns='month')
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'plocstatename', 'state')
elif 'npi_source' in kwargs.keys() and kwargs['npi_source'] == 'ploc2':
src = (source
.secondary_practice_locations[[idvar, 'ploc2statename']]
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'ploc2statename', 'state')
if 'zip5' in cols:
if not kwargs or 'npi_source' not in kwargs.keys():
src = (source.ploczip
.assign(zip5=lambda x: x['ploczip'].str[:5])
.drop(columns=['month', 'ploczip'])
.drop_duplicates())
elif 'npi_source' in kwargs.keys() and kwargs['npi_source'] == 'ploc2':
src = (source
.secondary_practice_locations
.assign(
zip5=lambda x: x['ploc2zip'].str[:5])[[idvar, 'zip5']]
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'zip5', 'zip5')
if 'tel' in cols:
if not kwargs or 'npi_source' not in kwargs.keys():
src = (source
.ploctel.drop(columns='month')
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'ploctel', 'tel')
elif 'npi_source' in kwargs.keys() and kwargs['npi_source'] == 'ploc2':
src = (source
.secondary_practice_locations[[idvar, 'ploc2tel']]
.drop_duplicates())
src['tel'] = (src.ploc2tel
.astype('str')
.str.split('.', expand=True)[0])
src['tel'] = (src.tel.str.replace('-', '')
.str.replace('(', '')
.str.replace(')', '')
.str.replace(' ', ''))
df = df.pipe(getcol, src, idvar, 'tel', 'tel')
return df.drop_duplicates()
def conform_SAMHSA(source, cols, **kwargs):
df = source.names.copy()
idvar = 'samhsa_id'
if 'practitioner_type' in cols:
df = df.pipe(getcol, source.samhsa, idvar, 'PractitionerType',
'practitioner_type')
if 'state' in cols:
df = df.pipe(getcol, source.samhsa, idvar, 'State', 'state')
if 'zip5' in cols:
src = (source
.samhsa
.assign(zip5=lambda df: df['Zip'].str[:5])[[idvar, 'zip5']]
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'zip5', 'zip5')
if 'tel' in cols:
src = source.samhsa['samhsa_id']
src2 = (pd.DataFrame(source.samhsa['Phone']
.str.replace('-', '')
.str.replace('(', '')
.str.replace(')', '')
.str.replace(' ', '')))
src = pd.concat([src, src2], axis=1)
df = df.pipe(getcol, src, idvar, 'Phone', 'tel')
return df.drop_duplicates()
def conform_PECOS(source, cols, **kwargs):
df = source.names.copy()
idvar = 'NPI'
if 'practitioner_type' in cols:
df = df.pipe(getcol, source.practitioner_type, idvar, 'Credential',
'practitioner_type')
df.loc[df.practitioner_type.isin(['MD', 'DO']),
'practitioner_type'] = 'MD/DO'
df.loc[df.practitioner_type.isin(['CNA']),
'practitioner_type'] = 'CRNA'
df = df[df.practitioner_type.isin(kwargs['practypes'])]
if 'state' in cols:
df = df.pipe(
getcol, source.physician_compare, idvar, 'State', 'state')
if 'zip5' in cols:
src = (source
.physician_compare
.assign(zip5=lambda df: df['Zip Code'].astype(str).str[:5]))
src = src[[idvar, 'zip5']].drop_duplicates()
df = df.pipe(getcol, src, idvar, 'zip5', 'zip5')
if 'tel' in cols:
src = source.physician_compare['NPI']
src2 = (source.physician_compare['Phone Number']
.astype('string')
.apply(lambda x: str(x).replace('.0', '')))
src = pd.concat([src, src2], axis=1)
df = df.pipe(getcol, pd.DataFrame(src), idvar, 'Phone Number', 'tel')
return df.drop_duplicates()
def make_clean_matches(df1, df2, id_use, id_target,
blocklist=pd.DataFrame()):
'''merges on common columns'''
# DELETE IF NAME CONFLICTS IN MATCHES
if not blocklist.empty:
df1 = (df1.merge(blocklist, how='left', indicator=True)
.query('_merge=="left_only"'))[df1.columns]
df2 = (df2.merge(blocklist, how='left', indicator=True)
.query('_merge=="left_only"'))[df2.columns]
m = df1.merge(df2)[[id_use, id_target]].drop_duplicates()
m = m[~m[id_use].duplicated(keep=False)]
m = m[~m[id_target].duplicated(keep=False)]
assert m[id_use].is_unique
assert m[id_target].is_unique
return m
def make_clean_matches_iterate(df1, idvar1, ordervar, df2, idvar2, blocklist):
orders = sorted((df1[ordervar].value_counts().index.tolist()))
for o in orders:
m = make_clean_matches(
df1.query(f'order=={o}'),
df2,
id_use=idvar1, id_target=idvar2,
blocklist=blocklist[[x for x in blocklist.columns
if x != 'order']])
blocklist = blocklist.append(m.assign(order=o))
return blocklist
def reconcat_names(df, firstname, middlename, lastname):
n = (df.assign(
n=lambda x: x[firstname] + ' ' + x[middlename] + ' ' + x[lastname])
.n)
df[f'{firstname}_r'] = n.apply(lambda y: y.split()[0])
df[f'{middlename}_r'] = n.apply(lambda y: ' '.join(y.split()[1:-1]))
df[f'{lastname}_r'] = n.apply(lambda y: y.split()[-1])
return df
def generate_matches(s, npi, pecos, varlist, practypes, final_crosswalk):
from npi.utils.globalcache import c
df1 = conform_data_sources(s, varlist)
df2 = conform_data_sources(npi, varlist, practypes=practypes)
final_crosswalk = c.make_clean_matches_iterate(df1, 'samhsa_id', 'order',
df2, 'npi',
final_crosswalk)
print('(1) Found %s matches' % final_crosswalk.shape[0])
df3 = conform_data_sources(pecos, varlist, practypes=practypes)
df3 = df3.rename(columns={'NPI': 'npi'})
final_crosswalk = c.make_clean_matches_iterate(df1, 'samhsa_id', 'order',
df3, 'npi',
final_crosswalk)
print('(2) Found %s matches' % final_crosswalk.shape[0])
df4 = conform_data_sources(npi, varlist,
practypes=practypes, npi_source="ploc2")
final_crosswalk = c.make_clean_matches_iterate(df1, 'samhsa_id', 'order',
df4, 'npi',
final_crosswalk)
print('(3) Found %s matches' % final_crosswalk.shape[0])
return final_crosswalk
# out = make_clean_matches_iterate(df1, 'samhsa_id', 'order', df2, 'npi', pd.DataFrame())
#
# priority_names = out[['samhsa_id']].assign(order=1).merge(df1)
# priority_names['new_firstname'] = priority_names.assign(n=lambda df: df['firstname'] + ' ' + df['middlename'] + ' ' + df['lastname']).n.apply(lambda x: x.split()[0])
# priority_names['new_middlename'] = priority_names.assign(n=lambda df: df['firstname'] + ' ' + df['middlename'] + ' ' + df['lastname']).n.apply(lambda x: ' '.join(x.split()[1:-1]))
# priority_names['new_lastname'] = priority_names.assign(n=lambda df: df['firstname'] + ' ' + df['middlename'] + ' ' + df['lastname']).n.apply(lambda x: x.split()[-1])
# priority_names = priority_names.assign(new_suffix=lambda df: df.Suffix)
# priority_names = priority_names[['samhsa_id','new_firstname','new_middlename','new_lastname','new_suffix','practitioner_type','state','zip5']].drop_duplicates()
#
# # USE RECONCAT NAMES
# priority_names2 = out[['npi']].merge(df2)
# priority_names2['new_firstname'] = priority_names2.assign(n=lambda df: df['pfname'] + ' ' + df['pmname'] + ' ' + df['plname']).n.apply(lambda x: x.split()[0])
# priority_names2['new_middlename'] = priority_names2.assign(n=lambda df: df['pfname'] + ' ' + df['pmname'] + ' ' + df['plname']).n.apply(lambda x: ' '.join(x.split()[1:-1]))
# priority_names2['new_lastname'] = priority_names2.assign(n=lambda df: df['pfname'] + ' ' + df['pmname'] + ' ' + df['plname']).n.apply(lambda x: x.split()[-1])
# priority_names2 = priority_names2.assign(new_suffix=lambda df: df.pnamesuffix)
# priority_names2 = priority_names2[['npi','new_firstname','new_middlename','new_lastname','new_suffix','practitioner_type','state','zip5']].drop_duplicates()
#
# expand_matches = out[['samhsa_id','npi']].merge(priority_names).merge(out[['samhsa_id','npi']].merge(priority_names2), how='outer', indicator=True)
# all_good = expand_matches.query('_merge=="both"')[['samhsa_id','npi']].drop_duplicates()
# expand_matches = expand_matches.merge(all_good, how='left', indicator='_merge2').query('_merge2!="both"').drop(columns='_merge2')
#
# o1 = out.merge(df1[['samhsa_id', 'middlename','Suffix']].dropna().query('middlename!="" or Suffix!=""').drop_duplicates())
# o2 = out.merge(df2[['npi', 'pmname', 'pnamesuffix']].dropna().query('pmname!="" or pnamesuffix!=""').drop_duplicates())
# lo1 = o1.merge(o2, left_on=o1.columns.tolist(), right_on=o2.columns.tolist(), how='outer', indicator=True).query('_merge=="left_only"')[['samhsa_id','npi']].drop_duplicates()
# ro1 = o1.merge(o2, left_on=o1.columns.tolist(), right_on=o2.columns.tolist(), how='outer', indicator=True).query('_merge=="right_only"')[['samhsa_id','npi']].drop_duplicates()
# lo1.merge(ro1)
def match_samhsa_npi():
# I don't exploit timing here
s = SAMHSA()
s.retrieve('names')
npi = NPI(entities=1)
npi.retrieve('fullnames')
npi.retrieve('expanded_fullnames')
npi.retrieve('credentials')
npi.retrieve('ptaxcode')
npi.retrieve('practitioner_type')
npi.retrieve('plocstatename')
npi.retrieve('ploczip')
npi.retrieve('ploctel')
npi.retrieve('secondary_practice_locations')
pecos = PECOS(['NPI', 'Last Name', 'First Name', 'Middle Name',
'Suffix', 'State', 'Zip Code', 'Phone Number'])
pecos.retrieve('names')
pecos.retrieve('practitioner_type')
# matching data to generate a crosswalk
final_crosswalk = pd.DataFrame()
practypes = ['MD/DO', 'NP', 'PA', 'CRNA', 'CNM', 'CNS']
# 0. TELEPHONE
final_crosswalk = generate_matches(
s, npi, pecos,
['practitioner_type', 'state', 'zip5', 'tel'],
practypes, final_crosswalk)
final_crosswalk = generate_matches(
s, npi, pecos,
['practitioner_type', 'state', 'tel'],
practypes, final_crosswalk)
final_crosswalk = generate_matches(
s, npi, pecos,
['practitioner_type', 'state', 'zip5'],
practypes, final_crosswalk)
final_crosswalk = generate_matches(
s, npi, pecos,
['practitioner_type', 'state'],
practypes, final_crosswalk)
final_crosswalk = generate_matches(
s, npi, pecos,
['state', 'zip5', 'tel'],
practypes, final_crosswalk)
final_crosswalk1 = generate_matches(
s, npi, pecos,
['state', 'tel'],
practypes, final_crosswalk)
final_crosswalk2 = generate_matches(
s, npi, pecos,
['state', 'zip5'],
practypes, final_crosswalk)
final_crosswalk3 = generate_matches(
s, npi, pecos,
['state'],
practypes, final_crosswalk)
final_crosswalk4 = generate_matches(
s, npi, pecos,
['practitioner_type'],
practypes, final_crosswalk)
final_crosswalk5 = generate_matches(
s, npi, pecos,
[],
practypes, final_crosswalk)
fin = (final_crosswalk1.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"'))
fin = (fin.append(
final_crosswalk2
.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"')))
fin = fin.append(
final_crosswalk3.query('order==1')
.merge(s.names).query('middlename!=""')[['samhsa_id', 'npi']]
.drop_duplicates()
.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"'))
fin = fin.append(
final_crosswalk4.query('order==1')
.merge(s.names).query('middlename!=""')[['samhsa_id', 'npi']]
.drop_duplicates()
.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"'))
fin = fin.append(
final_crosswalk5.query('order==1')
.merge(s.names).query('middlename!=""')[['samhsa_id', 'npi']]
.drop_duplicates()
.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"'))
fin = fin[['samhsa_id', 'npi']].drop_duplicates()
fin = fin[~fin['samhsa_id'].duplicated(keep=False)]
fin = fin[~fin['npi'].duplicated(keep=False)]
fin = final_crosswalk.append(fin).drop(columns='order').drop_duplicates()
fin = fin.append(pd.DataFrame(dict(samhsa_id=[42325, 34010, 80, 62, 42387,
42333, 42339],
npi=[1558332031, 1154652295,
1871718890, 1275599524, 1457360588,
1609002799, 1346518842]
)))
nopunct1 = (npi
.expanded_fullnames
.assign(nopunct=npi.expanded_fullnames['name']
.str.replace("'", "")
.str.replace('-', '')
.str.replace(' ', ''))[['npi', 'nopunct']])
remainders = (fin.merge(s.samhsa.drop_duplicates(),
how='right', on='samhsa_id', indicator=True)
.query('_merge=="right_only"'))
nopunct2 = (remainders[['samhsa_id']]
.merge(s.names)
.assign(nopunct=lambda df: (df['name']
.str.replace("'", "")
.str.replace('-', '')
.str.replace(' ', ''))))
nopunct2 = nopunct2[['samhsa_id', 'nopunct']]
matches = nopunct2.merge(nopunct1)
matches2 = matches[['npi', 'samhsa_id']].drop_duplicates()
matches2 = matches2[~matches2['samhsa_id'].duplicated(keep=False)]
matches2 = matches2[~matches2['npi'].duplicated(keep=False)]
newmatches = (matches2.merge(nopunct1)
.merge(nopunct2)[
matches2.merge(nopunct1)
.merge(nopunct2)
.nopunct.str.len() >= 10][['npi', 'samhsa_id']]
.drop_duplicates())
newmatches = newmatches[~newmatches.samhsa_id.isin(fin.samhsa_id)]
newmatches = newmatches[~newmatches.npi.isin(fin.npi)]
fin = fin.append(newmatches)
assert fin['samhsa_id'].is_unique
assert fin['npi'].is_unique
fin.reset_index(inplace=True, drop=True)
return fin
def analysis_dataset():
# some of this should get added to the PECOS class
# including also the name match
# Get matches of NPI to SAMHSA
# matches = (pd.read_csv('/work/akilby/npi/final_matches.csv')
# .drop(columns='Unnamed: 0'))
# from npi.utils.globalcache import c
# matches = c.match_samhsa_npi()
npi = NPI(entities=1)
npi.retrieve('practitioner_type')
npi_practype = (npi.practitioner_type
.pipe(convert_practitioner_data_to_long,
types=['MD/DO', 'NP', 'PA',
'CRNA', 'CNM', 'CNS']))
npi.retrieve('pgender')
pecos = PECOS(['NPI', 'Last Name', 'First Name', 'Middle Name',
'Suffix', 'State', 'Zip Code', 'Phone Number'])
pecos.retrieve('practitioner_type')
# 1. Select MD/DO and NPs from either NPI or PECOS
practitioners = (pecos.practitioner_type.merge(npi_practype,
how='left',
left_on="NPI",
right_on='npi'))
mddo = (practitioners
.query('Credential=="MD/DO" or Credential=="MD" or Credential=="DO'
'" or PractitionerType=="MD/DO"')
.NPI.drop_duplicates())
nps = practitioners.loc[(practitioners['Primary specialty']
== "NURSE PRACTITIONER")
| (practitioners['Credential'] == 'NP')
| (practitioners['PractitionerType'] == "NP")]
nps = nps.NPI.drop_duplicates()
# pecos_groups = PECOS(['NPI', 'Organization legal name',
# 'Group Practice PAC ID',
# 'Number of Group Practice members',
# 'Hospital affiliation CCN 1',
# 'Hospital affiliation LBN 1',
# 'Hospital affiliation CCN 2',
# 'Hospital affiliation LBN 2',
# 'Hospital affiliation CCN 3',
# 'Hospital affiliation LBN 3',
# 'Hospital affiliation CCN 4',
# 'Hospital affiliation LBN 4',
# 'Hospital affiliation CCN 5',
# 'Hospital affiliation LBN 5'],
# drop_duplicates=False, date_var=True)
# 2. Get group practice information. most sole practitioners
# are missing a group practice ID
pecos_groups_loc = PECOS(['NPI', 'Organization legal name',
'Group Practice PAC ID',
'Number of Group Practice members',
'State', 'Zip Code', 'Phone Number'],
drop_duplicates=False, date_var=True)
groups = pecos_groups_loc.physician_compare.drop_duplicates()
groups = groups.reset_index(drop=True).reset_index()
# A bunch of sole practitioners (groupsize =1 ) are missing
# give them a single-period group practice ID (not constant over
# time even though other IDs are)
groups.loc[
groups['Group Practice PAC ID'].isnull(),
'Group Practice PAC ID'] = (groups['index'] + 100000000000)
groups = groups.drop(columns='index')
groups = groups.merge(
groups[['NPI', 'Group Practice PAC ID', 'date']]
.drop_duplicates()
.groupby(['Group Practice PAC ID', 'date'])
.size()
.reset_index())
groups.loc[
groups['Number of Group Practice members'].isnull(),
'Number of Group Practice members'] = groups[0]
groups.drop(columns=[0], inplace=True)
coprac = (groups[['Group Practice PAC ID',
'Number of Group Practice members',
'State',
'Zip Code', 'date']]
.drop_duplicates())
coprac_ids = coprac.reset_index(drop=True).reset_index().rename(
columns={'index': 'group_prac_zip_date_id'})
coprac_np_counts = (groups
.merge(nps)
.merge(coprac_ids))
idvars = ['group_prac_zip_date_id', 'date', 'NPI']
coprac_np_counts = coprac_np_counts[idvars].drop_duplicates()
coprac_np_counts = (coprac_np_counts
.groupby(['group_prac_zip_date_id', 'date'])
.size()
.reset_index()
.rename(columns={0: 'np_count'}))
coprac_mds = (groups
.merge(mddo)
.merge(coprac_ids))
coprac_mds = coprac_mds[idvars].drop_duplicates()
coprac_mds = coprac_mds.merge(coprac_np_counts, how='left')
coprac_mds['np_count'] = coprac_mds.np_count.fillna(0)
preproc = (coprac_mds
.sort_values(['NPI', 'date', 'np_count',
'group_prac_zip_date_id'])
.groupby(['NPI', 'date']))
mins = preproc.first()
maxes = preproc.last()
mins = (mins
.reset_index()
.merge(coprac_ids)
.sort_values(['NPI', 'date'])
.reset_index(drop=True))
maxes = (maxes
.reset_index()
.merge(coprac_ids)
.sort_values(['NPI', 'date'])
.reset_index(drop=True))
copracs = mins.merge(maxes, on=['NPI', 'date'], suffixes=['_min', '_max'])
# mins = (coprac_mds
# .drop(columns='group_prac_zip_date_id')
# .groupby(['NPI', 'date'], as_index=False).min())
# maxes = (coprac_mds.drop(columns='group_prac_zip_date_id')
# .groupby(['NPI', 'date'], as_index=False).max())
# copracs = mins.merge(maxes.rename(columns={'np_count': 'np_count_max'}))
assert (copracs[['NPI', 'date']].drop_duplicates().shape[0]
== copracs.shape[0])
# Specialties. time varying?
pecos_specs = PECOS(['NPI', 'Primary specialty',
'Secondary specialty 1',
'Secondary specialty 2',
'Secondary specialty 3',
'Secondary specialty 4'],
drop_duplicates=False, date_var=True)
mddo = pecos_specs.physician_compare.merge(mddo)
prim_spec = mddo[['NPI', 'date', 'Primary specialty']].drop_duplicates()
prim_spec = prim_spec.groupby(['NPI', 'date']).first().reset_index()
# prim_spec = pd.concat([m[['NPI', 'date']],
# pd.get_dummies(
# m['Primary specialty'])],
# axis=1).groupby(['NPI', 'date']).sum()
# prim_spec = 1*(prim_spec > 0)
sec_spec = (mddo.drop(columns=['Primary specialty'])
.drop_duplicates()[mddo.drop(columns=['Primary specialty'])
.drop_duplicates()
.isnull().sum(1) < 4]
.set_index(['NPI', 'date'])
.stack()
.reset_index()
.drop(columns='level_2')
.dropna()
.drop_duplicates()
.rename(columns={0: 'secondary_spec'})
.query('secondary_spec!=" "'))
sec_spec = pd.concat([sec_spec[['NPI', 'date']],
pd.get_dummies(
sec_spec['secondary_spec'])],
axis=1).groupby(['NPI', 'date']).sum()
sec_spec = 1*(sec_spec > 0)
copracs = copracs.merge(prim_spec)
# copracs = copracs.merge(sec_spec, how='left')
copracs = copracs.merge(sec_spec.reset_index(), how='left')
copracs = copracs.fillna({x: 0 for x in sec_spec.columns})
# copracs = copracs.merge(mddo[['NPI', 'Primary specialty']])
pecos_education = PECOS(['NPI', 'Medical school name', 'Graduation year'])
copracs = (copracs
.merge(pecos_education
.physician_compare[['NPI', 'Graduation year']]
.groupby('NPI', as_index=False)
.first()))
copracs['gradyear'] = pd.qcut(copracs['Graduation year'], 20)
copracs = copracs.merge(npi.pgender, left_on='NPI', right_on='npi')
# waiver dates from new file
matches = pickle_read(
'/work/akilby/npi/Cache/Caches/output_1588990540883395.pkl')
s = SAMHSA()
samhsa_match = (s.samhsa[['WaiverType', 'samhsa_id', 'Date']]
.drop_duplicates())
samhsa_match = samhsa_match.merge(matches)
sam = (samhsa_match[['npi', 'Date', 'WaiverType']]
.groupby(['npi', 'WaiverType'])
.min()
.unstack(1)
.reset_index())
sam.columns = ['npi', 'Date30', 'Date100', 'Date275']
copracs = copracs.merge(sam, how='left')
copracs = copracs.drop(columns=['NPI', 'Graduation year'])
for variable in ['Group Practice PAC ID_min',
'Group Practice PAC ID_max',
'Number of Group Practice members_min',
'Number of Group Practice members_max']:
copracs[variable] = copracs[variable].astype(int)
copracs['State_min'] = copracs['State_min'].astype(str)
copracs['State_max'] = copracs['State_max'].astype(str)
copracs['Zip Code_min'] = copracs['Zip Code_min'].astype(str)
copracs['Zip Code_max'] = copracs['Zip Code_max'].astype(str)
copracs['Primary specialty'] = copracs['Primary specialty'].astype(str)
isid(copracs, ['npi', 'date'])
return copracs
def final_analysis_dataset(final):
npi = NPI(entities=1)
# gender
npi.retrieve('pgender')
# education
educ = (PECOS(['NPI', 'Medical school name', 'Graduation year'])
.physician_compare)
educ = educ.groupby('NPI', as_index=False).first()
educ['gradyear'] = pd.qcut(educ['Graduation year'], 20)
# Specialties. time varying?
pecos_specs = PECOS(['NPI', 'Primary specialty',
'Secondary specialty 1',
'Secondary specialty 2',
'Secondary specialty 3',
'Secondary specialty 4'],
drop_duplicates=False, date_var=True)
specs = (pecos_specs
.physician_compare
.drop(columns='date')
.drop_duplicates()
.set_index(['NPI', 'Primary specialty'])
.stack()
.reset_index()
.drop(columns='level_2')
.rename(columns={0: 'secondary_spec'}))
npi.retrieve('ptaxcode')
taxcodes = npi.ptaxcode[['npi', 'ptaxcode']]
# waiver dates from new file
matches = pickle_read(
'/work/akilby/npi/Cache/Caches/output_1588990540883395.pkl')
s = SAMHSA()
samhsa_match = (s.samhsa[['WaiverType', 'samhsa_id', 'Date', 'State']]
.drop_duplicates())
samhsa_match = samhsa_match.merge(matches)
sam2 = (samhsa_match[['npi', 'State', 'Date', 'WaiverType']]
.groupby(['npi', 'State', 'WaiverType'])
.min()
.unstack(2)
.reset_index())
sam2.columns = ['npi', 'State', 'Date30', 'Date100', 'Date275']
# ####
final = final.reset_index().merge(educ, how='left')
final = final.rename(columns={'NPI': 'npi'}).merge(npi.pgender)
# because I added in the state column, I now lose 884 Mds
final = final.merge(sam2, how='left')
specs = (specs
.set_index('NPI')
.stack()
.reset_index()
.drop(columns='level_1')
.drop_duplicates()
.rename(columns={0: 'spec', 'NPI': 'npi'})
.query('spec!=" "'))
specs = specs.merge(final.npi.drop_duplicates())
taxcodes = taxcodes.merge(final.npi.drop_duplicates())
tax_desc = (taxcodes
.merge(provider_taxonomies(),
left_on='ptaxcode', right_on='TaxonomyCode')
[['npi', 'ptaxcode', 'Classification', 'Specialization']]
.set_index(['npi', 'ptaxcode'])
.stack()
.reset_index()
.drop(columns='level_2')
.rename(columns={0: 'spec'})
.assign(spec=lambda df: df.spec.str.upper()))
tax_desc.loc[lambda df: df.spec.str.endswith(' -'),
'spec'] = (tax_desc
.loc[lambda df: df.spec.str.endswith(' -')]
.spec.str.replace(' -', ''))
tax_desc = (tax_desc[~((tax_desc
.ptaxcode
.isin(provider_taxonomies()
.query('Specialization=="General Practice"')
.TaxonomyCode) &
(tax_desc.spec == 'GENERAL PRACTICE')))])
allspec = specs.append(tax_desc.drop(columns='ptaxcode')).drop_duplicates()
allspec.loc[lambda df: df.spec == 'PREVENTATIVE MEDICINE',
'spec']='PREVENTIVE MEDICINE'
allspec = allspec.assign(spec=lambda df: df.spec.str.strip())
# t = (taxcodes
# .merge(final[~final.npi.isin(specs.npi)].npi.drop_duplicates())
# .rename(columns={'ptaxcode': 'spec'}))
# t = specs.append(t)
# new
# taxes = (t.merge(provider_taxonomies(),
# left_on='spec', right_on='TaxonomyCode')
# [['npi', 'spec', 'Classification']]
# .drop_duplicates()
# .dropna()
# .append(t.merge(provider_taxonomies(),
# left_on='spec', right_on='TaxonomyCode')
# .query('Specialization!="General Practice"')
# [['npi', 'spec', 'Specialization']]
# .drop_duplicates()
# .dropna()
# .rename(columns={'Specialization': 'Classification'}))
# .sort_values(['npi', 'spec', 'Classification'])
# .assign(spec=lambda df: df.Classification.str.upper().str.strip())
# )
# taxes.loc[lambda df: df.spec.str.endswith(' -'),
# 'spec'] = (taxes
# .loc[lambda df: df.spec.str.endswith(' -')]
# .spec.str.replace(' -', ''))
# taxes = taxes.drop(columns='Classification')
# taxes = (t
# .merge(provider_taxonomies(),
# how='left', left_on='spec',
# right_on='TaxonomyCode', indicator=True)
# .query('_merge=="left_only"')[['npi', 'spec']]
# .append(taxes)
# .drop_duplicates())
# t = taxes.copy()
t2 = allspec[allspec.spec.isin(allspec.spec.value_counts()[
allspec.spec.value_counts() > 500].index)]
t2 = (pd.concat([t2, | pd.get_dummies(t2.spec) | pandas.get_dummies |
import streamlit as st
import altair as alt
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import plotly.express as px
from pathlib import Path
from functools import lru_cache
import statsmodels.formula.api as smf
from datetime import datetime
import pandasdmx as pdmx
plt.style.use(
"https://github.com/aeturrell/coding-for-economists/raw/main/plot_style.txt"
)
@st.cache
def prep_gdp_output_codes():
hdf = pd.read_excel(Path("data", "uk_gdp_output_hierarchy.xlsx"), header=None)
hdf = hdf.dropna(how="all", axis=1)
for i in range(3):
hdf.iloc[i, :] = hdf.iloc[i, :].fillna(method="ffill")
hdf = hdf.T
hdf["total"] = hdf[3].str.contains("Total")
hdf = hdf.query("total==False")
hdf = hdf.drop("total", axis=1)
for col in range(5):
hdf[col] = hdf[col].str.lstrip().str.rstrip()
hdf = hdf.rename(columns={4: "section", 5: "code"})
return hdf
def get_uk_regional_gdp():
# current year
latest_year = datetime.now().year - 1
# Tell pdmx we want OECD data
oecd = pdmx.Request("OECD")
# Set out everything about the request in the format specified by the OECD API
data = oecd.data(
resource_id="REGION_ECONOM",
key="1+2.UKC.SNA_2008.GDP.REG+CURR_PR.ALL.2017+2018+2019+2020/all?",
).to_pandas()
# example that works:
"https://stats.oecd.org/restsdmx/sdmx.ashx/GetData/REGION_ECONOM/1+2.GBR+UKC+UKC11+UKC12.SNA_2008.GDP.REG+CURR_PR+USD_PPP+REAL_PR+REAL_PPP+PC+PC_CURR_PR+PC_USD_PPP+PC_REAL_PR+PC_REAL_PPP.ALL.2001+2002+2003+2004+2005+2006+2007+2008+2009+2010+2011+2012+2013+2014+2015+2016+2017+2018+2019+2020/all?"
df = pd.DataFrame(data).reset_index()
df.head()
@st.cache
def ons_blue_book_data(code):
data = grab_ONS_time_series_data("BB", code)
xf = pd.DataFrame(pd.json_normalize(data["years"]))
xf = xf[["year", "value"]]
xf["year"] = xf["year"].astype(int)
xf["value"] = xf["value"].astype(float)
xf["title"] = data["description"]["title"]
xf["code"] = code
xf = pd.DataFrame(xf.loc[xf["year"].argmax(), :]).T
return xf
@st.cache
@lru_cache(maxsize=32)
def ons_get_gdp_output_with_breakdown():
df = prep_gdp_output_codes()
xf = pd.DataFrame()
for code in df["code"].unique():
xf = pd.concat([xf, ons_blue_book_data(code)], axis=0)
df = pd.merge(df, xf, on=["code"], how="inner")
# for later treemap use, only use highest level name if hierachy has
# missing levels
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
# now, any nones with non-none children must be swapped
df.loc[(df[2].isnull()) & (~df[3].isnull()), [2, 3]] = df.loc[
(df[2].isnull()) & (~df[3].isnull()), [3, 2]
].values
df.loc[(df[0] == df[1]), [1]] = df.loc[(df[0] == df[1]), [2]].values
df.loc[(df[1] == df[2]), [2]] = df.loc[(df[1] == df[2]), [3]].values
# another round of this
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
df.loc[(df[3] == df[2]), [3]] = None
return df
@st.cache
def grab_ONS_time_series_data(dataset_id, timeseries_id):
"""
This function grabs specified time series from the ONS API.
"""
api_endpoint = "https://api.ons.gov.uk/"
api_params = {"dataset": dataset_id, "timeseries": timeseries_id}
url = (
api_endpoint
+ "/".join(
[x + "/" + y for x, y in zip(api_params.keys(), api_params.values())][::-1]
)
+ "/data"
)
return requests.get(url).json()
def ons_clean_qna_data(data):
if data["quarters"] != []:
df = pd.DataFrame(pd.json_normalize(data["quarters"]))
df["date"] = (
pd.to_datetime(
df["date"].apply(lambda x: x[:4] + "-" + str(int(x[-1]) * 3)),
format="%Y-%m",
)
+ pd.tseries.offsets.MonthEnd()
)
else:
df = pd.DataFrame(pd.json_normalize(data["months"]))
df["date"] = (
| pd.to_datetime(df["date"], format="%Y %b") | pandas.to_datetime |
import os
import mlflow.keras
import numpy as np
import pandas as pd
# Working directory must be the higher .../app folder
if str(os.getcwd())[-3:] != 'app': raise Exception(f'Working dir must be .../app folder and not "{os.getcwd()}"')
from app.z_helpers import helpers as my_helpers
from app.d_prediction.a_tf_base import median_scaling, _get_prep_data, _reformat_DF
from app.d_prediction.NN_tensorflow_models import TF_ERROR_METRICS
from hyperopt import STATUS_OK, Trials, fmin, hp, tpe
from xgboost import XGBRegressor
from hyperopt.early_stop import no_progress_loss
import mlflow
def _optimize_obj(params):
#mlflow.autolog()
def _eval(model, X, y):
y_pred = model.predict(X)
print(y_pred)
scores = {}
for metric in TF_ERROR_METRICS:
scores[metric.name] = np.float(metric(y, y_pred))
return scores
dataset = _get_prep_data(params['train'], params['val'], params['test'], flatten=True, keep_last_n_periods=params['backlooking_period'])
model = XGBRegressor(n_estimators=int(params['n_estimators']),
booster=params['booster'],
gamma=params['gamma'],
max_depth=params['max_depth'],
eta=params['eta'],
min_child_weight=params['min_child_weight'],
nthread=params['nthread'],
random_state=params['seed'],
verbosity=params['silent'],
subsample=params['subsample'],
colsample_bytree=params['colsample_bytree'],
tree_method=params['tree_method'])
results = model.fit(dataset['train_X'].numpy(), dataset['train_y'].numpy(),
eval_set=[(dataset['train_X'].numpy(), dataset['train_y'].numpy()),
(dataset['val_X'].numpy(), dataset['val_y'].numpy())],
eval_metric=params['eval_metric'],
verbose=params['silent'])
final_results = {}
final_results['train'] = _eval(results, dataset['train_X'].numpy(), dataset['train_y'].numpy())
final_results['val'] = _eval(results, dataset['val_X'].numpy(), dataset['val_y'].numpy())
final_results['test'] = _eval(results, dataset['test_X'].numpy(), dataset['test_y'].numpy())
out = {'loss': final_results['val']['mean_absolute_error'], 'status': STATUS_OK, 'results': final_results, 'params': params}
with mlflow.start_run() as run:
mlflow.xgboost.log_model(results, '')
mlflow_params = {'kwargs': params, 'model_name': 'XGBoost'}
for set in ['train', 'val', 'test']:
mlflow_params[f'metrics_{set}'] = final_results[set]
mlflow_saved = my_helpers.mlflow_last_run_add_param(param_dict=mlflow_params)
return out
def _find_optimal_model(train_ds, val_ds, test_ds, data_props, examples):
search_space = {
'backlooking_period': hp.choice('backlooking_period', [1, 2, 3, 4]),
'n_estimators': hp.quniform('n_estimators', 100, 1000, 1),
'eta': hp.quniform('eta', 0.025, 0.5, 0.025),
# A problem with max_depth casted to float instead of int with
# the hp.quniform method.
'max_depth': hp.choice('max_depth', np.arange(1, 14, dtype=int)),
'min_child_weight': hp.quniform('min_child_weight', 1, 6, 1),
'subsample': hp.quniform('subsample', 0.5, 1, 0.05),
'gamma': hp.quniform('gamma', 0.5, 1, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),
'eval_metric': 'mae',
# Increase this number if you have more cores. Otherwise, remove it and it will default
# to the maxium number.
'nthread': None,
'booster': 'gbtree',
'tree_method': 'exact',
'silent': 0,
'seed': 42
}
search_space['train'] = train_ds
search_space['val'] = val_ds
search_space['test'] = test_ds
search_space['iter_step'] = data_props['iter_step']
trials = Trials()
best = fmin(_optimize_obj,
search_space,
algo=tpe.suggest,
trials=trials,
early_stop_fn=no_progress_loss(iteration_stop_count=25, percent_increase=0.025),
max_evals=100)
best_result = trials.best_trial['result']['results']
best_params = trials.best_trial['result']['params']
best_result = pd.DataFrame(best_result)
best_result = _reformat_DF(best_result, data_props['iter_step'])
best_params = pd.Series(best_params, name=data_props['iter_step'])
return best_result, best_params
def run_model_acorss_time(data_obj, model_name, y_col, max_serach_iterations=200, redo_serach_best_model=False, max_backlooking=None, example_len=5, example_list=[], export_results=False):
results_storage = {}
def _reformat_DF(df, head):
df = df.append(pd.Series(df.columns.tolist(), name='columns', index=df.columns.tolist()))
df = df.append(pd.Series([head] * len(df.columns.tolist()), name='time_step', index=df.columns.tolist()))
df.columns = [f'{head}_{col}' for col in df.columns.tolist()]
return df
results_storage = {'error': pd.DataFrame(), 'model': | pd.DataFrame() | pandas.DataFrame |
"""
This module contains simple functions for pandas library.
"""
import sys
import pandas as pd
from unidecode import unidecode
import multiprocessing
from os import listdir
from os.path import isfile, join
import numpy as np
def format_columns_name(df_data):
"""
Function to format a DataFrame columns name, removing special characters, making lower case and replace whitespaces for _
Parameters
| ----------
df_data : pandas DataFrame
The DataFrame object to normalize columns
Returns
----------
df_data: pandas DataFrame
The DataFrame object with normalized columns names
"""
if not isinstance(df_data, pd.DataFrame):
raise TypeError('df_data should be instance of {}'.format(pd.DataFrame))
df_data = df_data.copy()
df_data.columns = [unidecode(str(x)).lower().strip().replace(' ', '_') for x in df_data.columns]
return df_data
def print_value_counts(df_data, field, msg='{index} : {count} ({percentage:.2f}%)', limit=None):
"""
Print the result of a value counts of a DataFrame on a format message
Parameters
| ----------
df_data (DataFrmae): Original data to print values
field (String): the label of
msg (String): the message to print in format {index} {count} {percentage}
limit (int): Max number of unique values to print (if is too long)
"""
if not isinstance(df_data, pd.DataFrame):
raise TypeError('df_data should be instance of {}'.format(pd.DataFrame))
if not limit:
limit = df_data[field].unique().size
df_data[field].value_counts().reset_index(name='count').assign(
percentage=lambda x: 100 * x['count'] / x['count'].sum()).assign(total=lambda x: x['count'].sum()).head(
limit).apply(lambda x: print(msg.format(index=x['index'], count=x['count'], percentage=x['percentage'])), axis=1)
def get_field_from_df(value, value_field, return_field, df_data, return_first_value=True, null_return=None):
"""
Function to return a value from a DataFrame with a filter. Used on apply functions
Parameters
----------
value : Object
Data value to search on DataFrame
value_field : String
DataFrame field to compare with value
return_field : String
DataFrame column to return
df_data : pandas DataFrame
DataFrame to search value
return_first_value : boolean (default True):
If return the first value from DataFrame
null_return : Object (defaut None)
Value to return if did not found value on DataFrame
Returns
-------
data : Object
The DataFrame value of return_field or null_return
"""
try:
if return_first_value:
return df_get_data.loc[df_get_data[label] == data, field].values[0]
else:
return df_get_data.loc[df_get_data[label] == data, field].values
except Exception as e:
return null_return
def _apply_function(args):
"""
Calls an apply function on a DataFrame with a set of parameters
Parameters
----------
args: dict
Parameters for functon
Returns
-------
data.apply : Function
The result of the apply function
"""
df_data, function, kwargs = args
return df_data.apply(function, **kwargs)
def multiprocessing_apply(df_data, function, **kwargs):
"""
Pandas apply function using multiprocessors
Parameters
----------
df_data : pandas DataFrame
DataFrame for the apply function
function : Function
Function to apply on DataFrame
**kwargs : dict
Function parameters
Returns
-------
res : list
The result of the apply function
"""
try:
num_cores = kwargs.pop('num_cores')
except Exception as e:
num_cores = 2
try:
verbose = kwargs.pop('verbose')
except Exception:
verbose = False
if verbose:
print('Creating multiprocessing with {} cores'.format(num_cores))
pool = multiprocessing.Pool(processes=num_cores)
df_list = []
step = int(df_data.shape[0] / num_cores)
for i in range(0, num_cores):
if i == num_cores - 1:
df_append = df_data[i * step:]
else:
df_append = df_data[i * step:(1 + i) * step]
if df_append.shape[0] > 0:
df_list.append(df_append)
if verbose:
print('Mapping process')
try:
res = pool.map(_apply_function, [(df, function, kwargs) for df in df_list])
pool.close()
return pd.concat(list(res))
except Exception as e:
if verbose:
print('Error: {}'.formart(str(e)))
pool.terminate()
def join_dataframe_from_folder(folder_path, set_file=True, subfolders=True, format_columns=True):
"""
Join serveral DataFrames from folder. Can join DataFrames of subfolders too
Parameters
----------
folder_path : String
Path of initial Folder
set_file : bool (default True)
If should add a columns to identify file name from DataFrame
subfolders : bool (default True)
If should join files from subfolders
format_columns : bool (default True)
Format columns from final DataFrame
Returns
-------
df_return : DataFrame
A DataFrame from all files in folder
"""
df_return = pd.DataFrame()
try:
for item in listdir(folder_path):
if isfile(join(folder_path, item)):
if set_file:
df_return = df_return.append(_infer_dataframe_filetype(join(folder_path, item)).assign(file=item), sort=False)
else:
df_return = df_return.append(_infer_dataframe_filetype(join(folder_path, item)), sort=False)
elif subfolders:
df_return = df_return.append(join_dataframe_from_folder(join(folder_path, item), subfolders, format_columns), sort=False)
except Exception as e:
raise(e)
if format_columns:
return format_columns_name(df_return)
return df_return
def _infer_dataframe_filetype(path, type=None, encoding=None, sep={',': 0, ';': 0, '\t': 0}):
"""
Infer DataFrame filetype using file extension
Parameters
----------
path : String
Path of file
type : String (default None)
Type to read file (None to infer file)
encoding : String (default None)
File encoding
sep : dict (default {': ':0: ';':0: '\t':0})
Dict of characters to try to split csv files
Returns
-------
dataframe : pandas DataFrame
A DataFrame object with path data
"""
if not type:
if '.xls' in path:
type = 'excel'
else:
type = 'csv'
if type == 'excel':
try:
return pd.read_excel(path, encoding='latin')
except Exception as e_latin:
try:
return | pd.read_excel(path, encoding='utf8') | pandas.read_excel |
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import numpy as np
today = datetime.date.today()
# Pre-processing for now, will include in __main__ later. Configure data source
df = pd.read_csv('historicalPriceData.csv')
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
import numpy as np
np.warnings.filterwarnings('ignore') #to not display numpy warnings... be careful
import pandas as pd
from mpi4py import MPI
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from subprocess import call
from orca import *
from orca.data import *
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
# this whole script will run on all processors requested by the job script
with open('orca/data/scenario_list-NSGAIII-50000.txt') as f:
scenarios = f.read().splitlines()
result_ids = ['SHA_storage','SHA_out','SHA_target','SHA_out_to_delta','SHA_tocs','FOL_storage','FOL_out',
'FOL_target','FOL_out_to_delta','FOL_tocs','ORO_storage','ORO_out','ORO_target','ORO_out_to_delta',
'ORO_tocs','DEL_in','DEL_out','DEL_TRP_pump','DEL_HRO_pump','SHA_sodd','SHA_spill',
'ORO_sodd','ORO_spill','FOL_sodd','FOL_spill', 'DEL_X2','ORO_forecast','FOL_forecast','SHA_forecast','DEL_SODD_CVP','DEL_SODD_SWP']
input_ids = ['SHA_in_tr', 'ORO_in_tr','FOL_in_tr','ORO_fnf','BND_fnf', 'SHA_fnf','FOL_fnf','SR_WYI','SR_WYT_rolling','SR_WYT', 'WYT_sim','WYI_sim',
'SHA_fci', 'ORO_fci', 'FOL_fci', 'BND_swe', 'ORO_swe', 'YRS_swe', 'FOL_swe','SHA_remaining_flow','ORO_remaining_flow','FOL_remaining_flow',
'octmar_flow_to_date','octmar_mean','octmar_std','aprjul_flow_to_date','aprjul_mean','aprjul_std','aprjul_slope','aprjul_intercept']
comm = MPI.COMM_WORLD # communication object
rank = comm.rank # what number processor am I?
s = scenarios[rank]
#Define some parameters and adaptations
window_type = 'expanding'
window_length = 40
index_exceedence_sac = 8
shift = 0
SHA_shift = shift
ORO_shift = shift
FOL_shift = shift
SHA_exceedance = {"W": 2, "AN": 2, "BN": 2, "D": 2, "C": 2}
FOL_exceedance = {"W": 10, "AN": 10, "BN": 5, "D": 2, "C": 1}
ORO_exceedance = {"W": 2, "AN": 2, "BN": 2, "D": 2, "C": 2}
#####Which files should stay on cluster
remove_processed = True
remove_forecasted = True
remove_model_results = False
call(['mkdir', 'orca/data/scenario_runs/%s'%s])
input_df = pd.read_csv('input_climate_files/%s_input_data.csv'%s, index_col = 0, parse_dates = True)
gains_loop_df = pd.read_csv('orca/data/historical_runs_data/gains_loops.csv', index_col = 0, parse_dates = True)
OMR_loop_df = pd.read_csv('orca/data/historical_runs_data/OMR_loops.csv', index_col = 0, parse_dates = True)
proj_ind_df = process_projection(input_df,gains_loop_df,OMR_loop_df,'orca/data/json_files/gains_regression.json','orca/data/json_files/inf_regression.json',window = window_type)
proj_ind_df.to_csv('orca/data/scenario_runs/%s/orca-data-processed-%s.csv'%(s,s))
proj_ind_df = pd.read_csv('orca/data/scenario_runs/%s/orca-data-processed-%s.csv'%(s,s), index_col = 0, parse_dates = True)
WYI_stats_file = pd.read_csv('orca/data/forecast_regressions/WYI_forcasting_regression_stats.csv', index_col = 0, parse_dates = True)
carryover_stats_file = pd.read_csv('orca/data/forecast_regressions/carryover_regression_statistics.csv', index_col = 0, parse_dates = True)
forc_df= projection_forecast(proj_ind_df,WYI_stats_file,carryover_stats_file,window_type,window_length, index_exceedence_sac)
forc_df.to_csv('orca/data/scenario_runs/%s/orca-data-climate-forecasted-%s.csv'%(s,s))
model = Model('orca/data/scenario_runs/%s/orca-data-climate-forecasted-%s.csv'%(s),
'orca/data/historical_runs_data/results.csv',SHA_shift, ORO_shift, FOL_shift,sd='10-01-1999',projection = True, sim_gains = True) #climate scenario test
projection_results = model.simulate() # takes a while... save results
projection_results.to_csv('orca/data/scenario_runs/%s/%s-results.csv'%(s,s))
comm.barrier()
if consolidate_outputs:
if comm.rank <= 30:
obj = result_ids[comm.rank]
dfobj = pd.DataFrame()
for sc in scenarios:
projection_results = pd.read_csv('orca/data/scenario_runs/%s/%s-results.csv'%(sc,sc), index_col = 0, parse_dates = True)
dfobj[sc] = projection_results[obj]
dfobj.to_csv('orca/data/climate_results/%s.csv'%(obj))
if consolidate_outputs:
if comm.rank >= 31 and comm.rank <=59:
obj = input_ids[comm.rank-31]
dfobj = | pd.DataFrame() | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.add,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = self._query_compiler.add(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pandas.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas(pandas.DataFrame.agg, arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas(pandas.DataFrame.agg, func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.align,
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def all(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def any(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pandas.Series, dict)):
if isinstance(other, dict):
other = pandas.Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = | pandas.Index([other.name], name=self.index.name) | pandas.Index |
from __future__ import print_function
import os
import sys
###########################################################
# Change to your own library path
###########################################################
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse
import pytz
# date_time format
date_time_format = '%Y-%m-%dT%H:%M:%S.%f'
date_format = '%Y-%m-%d'
ema_col = ['id', 'survey_type', 'delivered_ts', 'completed_ts', 'activity', 'location', 'atypical', 'stress',
'stressor_partner', 'stressor_fam', 'stressor_breakdown', 'stressor_money', 'stressor_selfcare', 'stressor_health',
'stressor_otherhealth', 'stressor_household', 'stressor_child', 'stressor_discrimination', 'stressor_none',
'moststressful', 'moststressful_time', 'work_location', 'attend_fidam', 'attend_fidpm', 'attend_hasp',
'attend_pgy1did', 'attend_pgy2did', 'attend_pgy3did', 'attend_none', 'work_start', 'work_end',
'jobperformance', 'jobperformance_best', 'jobsatisfaction', 'sleepquant', 'sleepqual', 'alcoholuse',
'alcohol_total', 'tobaccouse', 'tobacco_total', 'physactivity', 'physactivity_total',
'workstressor_computer', 'workstressor_patientint', 'workstressor_conflict', 'workstressor_census',
'workstressor_late', 'workstressor_paged', 'workstressor_supervise', 'workstressor_admin',
'workstressor_diffcases', 'workstressor_death', 'charting', 'charting_total', 'coworkertrust',
'work_inperson', 'work_digital', 'support_inperson', 'support_digital', 'socialevents', 'hangouts', 'wellness']
pt = pytz.timezone('US/Pacific')
def make_dir(data_path):
if os.path.exists(data_path) is False:
os.mkdir(data_path)
def check_micu_data_valid(data_time, start_date1, end_date1, start_date2, end_date2):
cond1 = (pd.to_datetime(data_time) - pd.to_datetime(start_date1)).total_seconds() >= 0
cond2 = (pd.to_datetime(end_date1) + timedelta(days=1) - | pd.to_datetime(data_time) | pandas.to_datetime |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# It exports clustering results to an Excel file.
# %% [markdown] tags=[]
# # Modules loading
# %% tags=[]
# %load_ext autoreload
# %autoreload 2
# %% tags=[]
from pathlib import Path
from IPython.display import display
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import conf
from entity import Trait
from utils import get_git_repository_path
# %% [markdown]
# # Settings
# %% tags=[]
DELIVERABLES_BASE_DIR = get_git_repository_path() / "data"
display(DELIVERABLES_BASE_DIR)
# %% tags=[]
OUTPUT_DIR = DELIVERABLES_BASE_DIR / "clustering" / "partitions"
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
display(OUTPUT_DIR)
# %%
N_TOP_PARTITIONS = 5
# %% [markdown] tags=[]
# # Load data
# %% [markdown] tags=[]
# ## PhenomeXcan (S-MultiXcan)
# %% tags=[]
INPUT_SUBSET = "z_score_std"
# %% tags=[]
INPUT_STEM = "projection-smultixcan-efo_partial-mashr-zscores"
# %% tags=[]
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
f"{INPUT_SUBSET}-{INPUT_STEM}.pkl",
).resolve()
# %% tags=[]
data = pd.read_pickle(input_filepath)
# %% tags=[]
data.shape
# %% tags=[]
data.head()
# %% [markdown] tags=[]
# ## Best clustering partitions
# %%
CONSENSUS_CLUSTERING_DIR = Path(
conf.RESULTS["CLUSTERING_DIR"], "consensus_clustering"
).resolve()
display(CONSENSUS_CLUSTERING_DIR)
# %% tags=[]
input_file = Path(CONSENSUS_CLUSTERING_DIR, "best_partitions_by_k.pkl").resolve()
display(input_file)
# %% tags=[]
best_partitions = pd.read_pickle(input_file)
# %%
best_partitions = best_partitions[best_partitions["selected"]]
# %% tags=[]
best_partitions.shape
# %% tags=[]
best_partitions.head()
# %% [markdown] tags=[]
# # Export clustering results
# %%
def get_trait_objs(phenotype_full_code):
if Trait.is_efo_label(phenotype_full_code):
traits = Trait.get_traits_from_efo(phenotype_full_code)
else:
traits = [Trait.get_trait(full_code=phenotype_full_code)]
# sort by sample size
return sorted(traits, key=lambda x: x.n_cases / x.n, reverse=True)
def get_trait_description(phenotype_full_code):
traits = get_trait_objs(phenotype_full_code)
return traits[0].description
def get_trait_n(phenotype_full_code):
traits = get_trait_objs(phenotype_full_code)
return traits[0].n
def get_trait_n_cases(phenotype_full_code):
traits = get_trait_objs(phenotype_full_code)
return traits[0].n_cases
def num_to_int_str(num):
if | pd.isnull(num) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 2 22:43:12 2021
@author: obnit
"""
import pandas as pd , matplotlib.pyplot as plt , numpy as np
df = pd.read_csv('Export/tested.csv')
df.Date = pd.to_datetime(df.Date)
df.set_index('Date', inplace=True)
df = df.resample('MS').sum()
df['Percent'] = ((df['Positive']/df['Negative'])*100)
df1 = pd.read_csv('Export/reported.csv')
df1.Date = pd.to_datetime(df1.Date)
df1.set_index('Date', inplace=True)
df1 = df1.resample('MS').sum()
df1['Cumulative cases'] = df1["New cases"].cumsum()
df2 = pd.read_csv('Export/export_price.csv')
df2.Date = | pd.to_datetime(df2.Date) | pandas.to_datetime |
"""
Entry points for compass
"""
from __future__ import absolute_import, print_function, division
import argparse
import os
import multiprocessing
import numpy as np
import pandas as pd
import sys
import subprocess as sp
import logging
import datetime
import json
import gzip
from functools import partial
from tqdm import tqdm
from six import string_types
from math import ceil
from .compass import cache
from ._version import __version__
from .compass.torque import submitCompassTorque
from .compass.algorithm import singleSampleCompass, maximize_reaction_range, maximize_metab_range, initialize_cplex_problem
from .compass.algorithm_t import runCompassParallelTransposed
from .compass.microclustering import microcluster, pool_matrix_cols, unpool_columns
from .models import init_model
from .compass.penalties import eval_reaction_penalties, compute_knn
from . import globals
from . import utils
def parseArgs():
"""Defines the command-line arguments and parses the Compass call
Returns
-------
argparse.Namespace
"""
parser = argparse.ArgumentParser(
prog="Compass",
description="Compass version "+str(__version__)+
". Metabolic Modeling for Single Cells. "
"For more details on usage refer to the documentation: https://yoseflab.github.io/Compass/")
parser.add_argument("--data", help="Gene expression matrix."
" Should be a tsv file with one row per gene and one column per sample",
metavar="FILE")
parser.add_argument("--data-mtx", help="Gene expression matrix."
" Should be a matrix market (mtx) formatted gene file. Must be followed by a tsv file with row names corresponding to genes and optionally that can be followed by a tsv file with sample names. ",
nargs="+",
metavar="FILE")
parser.add_argument("--model", help="Metabolic Model to Use."
" Currently supporting: RECON1_mat, RECON2_mat, or RECON2.2",
default="RECON2_mat",
choices=["RECON1_mat", "RECON2_mat", "RECON2.2"],
metavar="MODEL")
parser.add_argument("--species",
help="Species to use to match genes to model."
" Currently supporting: homo_sapiens or mus_musculus",
choices=["homo_sapiens", "mus_musculus"],
metavar="SPECIES"
#originally default, now required so users will not accidentally overlook it
)
parser.add_argument("--media", help="Which media to simulate",
#default="media1", #TODO:Brandon, where is media1 set?
metavar="MEDIA")
parser.add_argument("--output-dir", help="Where to store outputs",
default='.',
metavar="DIR")
parser.add_argument("--temp-dir", help="Where to store temporary files",
default='<output-dir>/_tmp',
metavar="DIR")
parser.add_argument("--torque-queue", help="Submit to a Torque queue",
metavar="QUEUE")
parser.add_argument("--num-processes",
help="Limit to <N> Processes. "
"Ignored when submitting job onto a queue",
type=int,
metavar="N")
parser.add_argument("--lambda",
help="Smoothing factor for single-cell data. Default is 0, should be"
" set between 0 and 1. For datasets where information sharing is appropriate, we often use 0.25.",
type=float,
default=0,
metavar="F")
parser.add_argument("--single-sample",
help=argparse.SUPPRESS,
type=int,
metavar="N")
#Arguments to help with schedueler scripts
parser.add_argument("--transposed",
help=argparse.SUPPRESS,
action="store_true")
parser.add_argument("--sample-range",
help=argparse.SUPPRESS,
nargs=2)
parser.add_argument("--reaction-range",
help=argparse.SUPPRESS,
nargs=2)
parser.add_argument("--metabolite-range",
help=argparse.SUPPRESS,
nargs=2)
parser.add_argument("--generate-cache",
help=argparse.SUPPRESS,
action="store_true")
parser.add_argument("--test-mode",
help=argparse.SUPPRESS,
action="store_true")
parser.add_argument("--num-threads",
help="Number of threads to use per sample",
type=int, default=1,
metavar="N")
parser.add_argument(
"--and-function",
help="Which function used to aggregate AND associations",
choices=["min", "median", "mean"],
metavar="FXN",
default="mean")
parser.add_argument(
"--select-reactions",
help="Compute compass scores only for the reactions listed in the given file. FILE is expected to be textual, with one line per reaction (undirected, namely adding the suffix \"_pos\" or \"_neg\" to a line will create a valid directed reaction id). Unrecognized reactions in FILE are ignored.",
required=False,
metavar="FILE")
parser.add_argument(
"--select-subsystems",
help="Compute compass scores only for the subsystems listed in the given file. FILE is expected to be textual, with one line per subsystem. Unrecognized subsystems in FILE are ignored.",
required=False,
metavar="FILE")
parser.add_argument("--glucose", type=float,
required=False, help=argparse.SUPPRESS)
# Hidden argument. Used for batch jobs
parser.add_argument("--collect", action="store_true",
help=argparse.SUPPRESS)
# Also used for batch jobs
parser.add_argument("--config-file", help=argparse.SUPPRESS)
parser.add_argument("--num-neighbors",
help="Either effective number of neighbors for "
"gaussian penalty diffusion or exact number of "
"neighbors for KNN penalty diffusion",
default=30,
type=int,
metavar="N")
parser.add_argument("--symmetric-kernel", action="store_true",
help="Use symmetric TSNE kernel (slower)")
parser.add_argument("--input-weights",
help="File with input sample to sample weights",
required=False, metavar="FILE")
parser.add_argument("--penalty-diffusion",
help="Mode to use to share reaction penalty "
"values between single cells",
choices=["gaussian", "knn"],
metavar="MODE",
default="knn")
parser.add_argument("--no-reactions", action="store_true",
help="Skip computing scores for reactions")
parser.add_argument("--calc-metabolites", action="store_true",
help="Compute scores for metabolite "
"uptake/secretion")
parser.add_argument("--precache", action="store_true",
help="Preprocesses the model to find "
" maximum fluxes")
parser.add_argument("--input-knn", help="File with a precomputed knn graph for the samples. "
"File must be a tsv with one row per sample and (k+1) columns. The first column should be sample names, "
"and the next k columns should be indices of the k nearest neighbors (by their order in column 1)",
default=None, metavar="FILE")
parser.add_argument("--input-knn-distances", help="File with a precomputed knn graph for the samples. "
"File must be a tsv with one row per sample and (k+1) columns. The first column should be sample names, "
"and the next k columns should be distances to the k nearest neighbors of that sample",
default=None, metavar="FILE")
parser.add_argument("--output-knn", help="File to save kNN of data to. "
"File will be a tsv with one row per sample and (k+1) columns. The first column will be sample names, "
"and the next k columns will be indices of the k nearest neighbors (by their order in column 1)",
default=None, metavar="FILE")
parser.add_argument("--latent-space", help="File with latent space reprsentation of samples for knn clustering or microclustering. "
"File must a tsv with one row per sample and one column per dimension of the latent space.",
default=None, metavar="FILE")
parser.add_argument("--only-penalties", help="Flag for Compass to only compute the reaction penalties for the dataset.",
action="store_true", default=None)
parser.add_argument("--example-inputs", help="Flag for Compass to list the directory where example inputs can be found.",
action="store_true", default=None)
parser.add_argument("--microcluster-size",
type=int, metavar="C", default=None,
help="Target number of cells per microcluster")
parser.add_argument("--microcluster-file",
type=int, metavar="FILE", default=None,
help="File where a tsv of microclusters will be output. Defaults to micropools.tsv in the output directory.")
parser.add_argument("--microcluster-data-file",
type=int, metavar="FILE", default=None,
help="File where a tsv of average gene expression per microcluster will be output. Defaults to micropooled_data.tsv in the output directory.")
parser.add_argument("--anndata-output", help="Enables output as .h5ad format",
action="store_true")
#Hidden argument for any potential anndata obs or uns
parser.add_argument("--anndata-obs",
help=argparse.SUPPRESS,
default=None)
parser.add_argument("--anndata-uns",
help=argparse.SUPPRESS,
default=None)
#Hidden argument which tracks more detailed information on runtimes
parser.add_argument("--detailed-perf", action="store_true",
help=argparse.SUPPRESS)
#Hidden argument for testing purposes.
parser.add_argument("--penalties-file",
help=argparse.SUPPRESS,
default='')
#Hidden argument to choose the algorithm CPLEX uses. Barrier generally best choice.
#See - https://www.ibm.com/support/knowledgecenter/en/SS9UKU_12.10.0/com.ibm.cplex.zos.help/CPLEX/Parameters/topics/LPMETHOD.html
parser.add_argument("--lpmethod",
help=argparse.SUPPRESS,
default=4,
type=int)
#Hidden argument to choose the setting for Cplex's advanced basis setting. Generally 2 is the best, but for ease of testing I've added it here.
parser.add_argument("--advance",
help=argparse.SUPPRESS,
default=2,
type=int)
#Hidden argument to save argmaxes in the temp directory
parser.add_argument("--save-argmaxes", action="store_true",
help=argparse.SUPPRESS)
#Removes potential inflation of expression by isoforms
parser.add_argument("--isoform-summing",
choices=['legacy', 'remove-summing'],
default='legacy',
metavar="MODE",
help="Flag to stop isoforms of the same gene being summed/OR'd together (remove-summing) or kept (legacy). Defaults to legacy")
#Argument to output the list of needed genes to a file
parser.add_argument("--list-genes", default=None, metavar="FILE",
help="File to output a list of metabolic genes needed for selected metabolic model.")
parser.add_argument("--list-reactions", default=None, metavar="FILE",
help="File to output a list of reaction id's and their associated subsystem for selected metabolic model.")
args = parser.parse_args()
args = vars(args) # Convert to a Dictionary
load_config(args)
if not args['species']:
if args['data'] or args['data_mtx']:
parser.error("The --species argument must be specified for the species of the dataset input")
if args['list_genes']:
parser.error("The --species argument must be specified for the genes to list")
if args['data'] and args['data_mtx']:
parser.error("--data and --data-mtx cannot be used at the same time. Select only one input per run.")
if not args['data'] and not args['data_mtx']:
if not args['precache'] and not args['list_genes'] and not args['example_inputs'] and not args['list_reactions']:
parser.error("Nothing selected to do. Add arguments --data, --data-mtx, --precache, --list-genes, --list-reactions, or --example-inputs for Compass to do something.")
else:
if args['data_mtx']:
args['data'] = args['data_mtx']
else:
if type(args['data']) != list:
args['data'] = [args['data']]
args['data'] = [os.path.abspath(p) for p in args['data']]
if len(args['data']) == 2:
args['data'].append(None)
if args['input_weights']:
args['input_weights'] = os.path.abspath(args['input_weights'])
if args['select_reactions']:
args['select_reactions'] = os.path.abspath(args['select_reactions'])
if args['select_subsystems']:
args['select_subsystems'] = os.path.abspath(args['select_subsystems'])
if args['temp_dir'] == "<output-dir>/_tmp":
args['temp_dir'] = os.path.join(args['output_dir'], '_tmp')
args['output_dir'] = os.path.abspath(args['output_dir'])
args['temp_dir'] = os.path.abspath(args['temp_dir'])
if args['microcluster_size']:
if not args['microcluster_file']:
args['microcluster_file'] = os.path.join(args['output_dir'], 'micropools.tsv')
if not args['microcluster_data_file']:
args['microcluster_data_file'] = os.path.join(args['output_dir'], 'micropooled_data.tsv')
if args['input_knn']:
args['input_knn'] = os.path.abspath(args['input_knn'])
if args['input_knn_distances']:
args['input_knn_distances'] = os.path.abspath(args['input_knn_distances'])
if args['output_knn']:
args['output_knn'] = os.path.abspath(args['output_knn'])
if args['latent_space']:
args['latent_space'] = os.path.abspath(args['latent_space'])
if args['lambda'] < 0 or args['lambda'] > 1:
parser.error(
"'lambda' parameter cannot be less than 0 or greater than 1"
)
if args['generate_cache'] and \
(args['no_reactions'] or not args['calc_metabolites']):
parser.error(
"--generate-cache cannot be run with --no-reactions or "
"without --calc-metabolites" #Not sure about why this needs metabolites to calculated
)
if args['reaction_range']:
args['reaction_range'] = [int(x) for x in args['reaction_range']]
if args['metabolite_range']:
args['metabolite_range'] = [int(x) for x in args['metabolite_range']]
if args['sample_range']:
args['sample_range'] = [int(x) for x in args['sample_range']]
return args
def entry():
"""Entry point for the compass command-line script
"""
start_time = datetime.datetime.now()
args = parseArgs()
if args['example_inputs']:
print(os.path.join(globals.RESOURCE_DIR, "Test-Data"))
return
if args['list_genes'] is not None:
model = init_model(model=args['model'], species=args['species'],
exchange_limit=globals.EXCHANGE_LIMIT, media=args['media'],
isoform_summing=args['isoform_summing'])
genes = list(set.union(*[set(reaction.list_genes()) for reaction in model.reactions.values()]))
genes = str("\n".join(genes))
with open(args['list_genes'], 'w') as fout:
fout.write(genes)
fout.close()
return
if args['list_reactions'] is not None:
model = init_model(model=args['model'], species=args['species'],
exchange_limit=globals.EXCHANGE_LIMIT, media=args['media'],
isoform_summing=args['isoform_summing'])
reactions = {r.id:r.subsystem for r in model.reactions.values()}
with open(args['list_reactions'], 'w') as fout:
json.dump(reactions, fout)
fout.close()
return
if args['data']:
if not os.path.isdir(args['output_dir']):
os.makedirs(args['output_dir'])
#Check if the arguments passed in will be the same as the previous run
temp_args_file = os.path.join(args['temp_dir'], "_temp_args.json")
if not os.path.isdir(args['temp_dir']) and args['temp_dir'] != '/dev/null':
os.makedirs(args['temp_dir'])
with open(temp_args_file, 'w') as fout:
json.dump(args, fout)
fout.close()
elif os.path.exists(temp_args_file):
#Handle ths before making logger because the logger redirected outputs
with open(temp_args_file, 'r') as fin:
temp_args = json.load(fin)
fin.close()
ignored_diffs = ['num_processes', 'only_penalties', 'num_threads', 'torque_queue', 'single_sample']
diffs = [x for x in args.keys() if args[x] != temp_args[x] and x not in ignored_diffs]
if len(diffs) > 0:
table = pd.DataFrame({'temp_dir':{x:temp_args[x] for x in diffs},
'current':{x:args[x] for x in diffs}})
print("Warning: The arguments used in the temporary directory (", args['temp_dir'],
") are different from current arguments. Cached results may not be compatible with current settings")
print("Differing arguments:")
print(table)
print("Enter 'y' or 'yes' if you want to use cached results.\n",
"Otherwise press enter and rerun Compass after removing/renaming the temporary directory or changing the --temp-dir argument")
if sys.version_info.major >= 3:
ans = input()
else:
ans = raw_input()
if ans != 'y' and ans != 'yes':
return
else:
print("Warning: Temporary directory found without saved arguments. Cached results may not be compatible with current settings")
globals.init_logger(args['output_dir'])
# Log some things for debugging/record
logger = logging.getLogger('compass')
logger.debug("Compass version: " + __version__)
try:
commit = sp.check_output(
["git", '--git-dir', globals.GIT_DIR, "rev-parse", "--short",
"HEAD"],
stderr=open(os.devnull, 'w')
)
logger.debug("Git commit: " + commit.decode())
except sp.CalledProcessError:
logger.debug("Git commit: Not in Git repo")
except sp.SubprocessError:
logger.debug("Git command failed to execute")
logger.debug("Python Version:")
logger.debug(sys.version)
logger.debug("Python prefix: " + sys.prefix)
logger.debug("Numpy version: " + np.__version__)
logger.debug("Pandas version: " + pd.__version__)
logger.debug("Supplied Arguments: ")
for (key, val) in args.items():
logger.debug(" {}: {}".format(key, val))
logger.debug("\nCOMPASS Started: {}".format(start_time))
# Parse arguments and decide what course of action to take
if args['microcluster_size'] and args['data']:
microcluster_dir = os.path.join(args['temp_dir'], "microclusters")
if not os.path.isdir(microcluster_dir):
os.makedirs(microcluster_dir)
microcluster_success_token = os.path.join(microcluster_dir, "success_token")
pooled_data_file = args['microcluster_data_file'] #os.path.join(microcluster_dir, "micropooled_data.tsv")
pools_file = os.path.join(microcluster_dir, "pools.json")
if os.path.exists(microcluster_success_token):
logger.info("Micropools found from previous Compass run")
pooled_latent_file = os.path.join(microcluster_dir, "pooled_latent.tsv")
if os.path.exists(pooled_latent_file):
args['latent_space'] = pooled_latent_file
logger.info("Pooled latent space file found from previous Compass run")
else:
logger.info("Partitioning dataset into microclusters of size "+str(args['microcluster_size']))
data = utils.read_data(args['data'])
pools = microcluster(data, cellsPerPartition = args['microcluster_size'],
latentSpace = args['latent_space'], inputKnn = args['input_knn'],
inputKnnDistances = args['input_knn_distances'], n_jobs = args['num_processes'])
pooled_data = pool_matrix_cols(data, pools)
pooled_data.to_csv(pooled_data_file, sep="\t")
with open(pools_file, 'w') as fout:
json.dump(pools, fout)
fout.close()
if args['latent_space']:
pooled_latent_file = os.path.join(microcluster_dir, "pooled_latent.tsv")
latent = pd.read_csv(args['latent_space'], sep='\t', index_col=0).T
pooled_latent = pool_matrix_cols(latent, pools).T
pooled_latent.to_csv(pooled_latent_file, sep='\t')
args['latent_space'] = pooled_latent_file
#Input KNN is not relevant for microclustered input
if args['input_knn']:
args['input_knn'] = None
#outputting table of micropools
pools_table = pd.DataFrame(columns = data.columns, index=['microcluster'])
for cluster in pools:
for sample in pools[cluster]:
pools_table.iloc[0, sample] = cluster
pools_table.T.to_csv(args['microcluster_file'], sep="\t")
with open(microcluster_success_token, 'w') as fout:
fout.write('Success!')
fout.close()
args['orig_data'] = args['data']
args['data'] = [pooled_data_file]
args['pools_file'] = pools_file
if args['glucose']:
if not args['media']:
fname = "_glucose_"+str(args['glucose'])
glucose_media_file = os.path.join(globals.MODEL_DIR, args['model'], 'media', fname+".json")
if not os.path.exists(glucose_media_file):
fout = open(glucose_media_file, 'w')
json.dump({'EX_glc(e)_neg':float(args['glucose'])}, fout)
fout.close()
args['media'] = fname
else:
media_file = args['media'] + '.json'
media_file = os.path.join(globals.MODEL_DIR, args['model'], 'media', media_file)
with open(media_file) as fin:
media = json.load(fin)
media.update({'EX_glc(e)_neg':float(args['glucose'])})
fname = args['media']+"_glucose_"+str(args['glucose'])
glucose_media_file = os.path.join(globals.MODEL_DIR, args['model'], 'media', fname+".json")
if not os.path.exists(glucose_media_file):
fout = open(glucose_media_file, 'w')
json.dump(media, fout)
fout.close()
args['media'] = fname
#if args['output_knn']:
# compute_knn(args)
# logger.info("Compass computed knn succesfully")
# return
if args['single_sample'] is not None:
args['penalties_file'] = os.path.join(args['temp_dir'], 'penalties.txt.gz')
singleSampleCompass(data=args['data'], model=args['model'],
media=args['media'], directory=args['temp_dir'],
sample_index=args['single_sample'], args=args)
end_time = datetime.datetime.now()
logger.debug("\nElapsed Time: {}".format(end_time-start_time))
return
if args['collect']:
collectCompassResults(args['data'], args['temp_dir'],
args['output_dir'], args)
end_time = datetime.datetime.now()
logger.debug("\nElapsed Time: {}".format(end_time-start_time))
return
#Check if the cache for (model, media) exists already:
size_of_cache = len(cache.load(init_model(model=args['model'], species=args['species'],
exchange_limit=globals.EXCHANGE_LIMIT, media=args['media'],
isoform_summing=args['isoform_summing']), args['media']))
if size_of_cache == 0 or args['precache']:
logger.info("Building up model cache")
precacheCompass(args=args)
end_time = datetime.datetime.now()
logger.debug("\nElapsed Time: {}".format(end_time-start_time))
if not args['data']:
return
else:
logger.info("Cache for model and media already built")
# Time to evaluate the reaction expression
success_token = os.path.join(args['temp_dir'], 'success_token_penalties')
penalties_file = os.path.join(args['temp_dir'], 'penalties.txt.gz')
if os.path.exists(success_token):
logger.info("Reaction Penalties already evaluated")
logger.info("Resuming execution from previous run...")
else:
logger.info("Evaluating Reaction Penalties...")
penalties = eval_reaction_penalties(args['data'], args['model'],
args['media'], args['species'],
args)
penalties.to_csv(penalties_file, sep='\t', compression='gzip')
with open(success_token, 'w') as fout:
fout.write('Success!')
args['penalties_file'] = penalties_file
if args['only_penalties']:
return
# Now run the individual cells through cplex in parallel
# This is either done by sending to Torque queue, or running on the
# same machine
if args['torque_queue'] is not None:
logger.info(
"Submitting COMPASS job to Torque queue - {}".format(
args['torque_queue'])
)
submitCompassTorque(args,
temp_dir=args['temp_dir'],
output_dir=args['output_dir'],
queue=args['torque_queue'])
return
else:
if args['transposed']:
runCompassParallelTransposed(args)
else:
runCompassParallel(args)
end_time = datetime.datetime.now()
logger.debug("\nElapsed Time: {}".format(end_time-start_time))
return
def runCompassParallel(args):
logger = logging.getLogger('compass')
# If we're here, then run compass on this machine with N processes
if args['num_processes'] is None:
args['num_processes'] = multiprocessing.cpu_count()
if args['num_processes'] > multiprocessing.cpu_count():
args['num_processes'] = multiprocessing.cpu_count()
# Get the number of samples
data = utils.read_data(args['data'])
n_samples = len(data.columns)
partial_map_fun = partial(_parallel_map_fun, args=args)
pool = multiprocessing.Pool(args['num_processes'])
logger.info(
"Processing {} samples using {} processes"
.format(n_samples, args['num_processes'])
)
logger.info(
"Progress bar will update once the first sample is finished"
)
pbar = tqdm(total=n_samples)
for _ in pool.imap_unordered(partial_map_fun, range(n_samples)):
pbar.update()
collectCompassResults(args['data'], args['temp_dir'],
args['output_dir'], args)
logger.info("COMPASS Completed Successfully")
def _parallel_map_fun(i, args):
data = args['data']
model = args['model']
media = args['media']
temp_dir = args['temp_dir']
sample_dir = os.path.join(temp_dir, 'sample' + str(i))
if not os.path.isdir(sample_dir):
os.makedirs(sample_dir)
out_file = os.path.join(sample_dir, 'out.log')
err_file = os.path.join(sample_dir, 'err.log')
with open(out_file, 'w') as fout, open(err_file, 'w') as ferr:
stdout_bak = sys.stdout
stderr_bak = sys.stderr
sys.stdout = fout
sys.stderr = ferr
globals.init_logger(sample_dir)
logger = logging.getLogger('compass')
logger.debug("Compass: Single-sample mode")
logger.debug("Supplied Arguments: ")
for (key, val) in args.items():
logger.debug(" {}: {}".format(key, val))
start_time = datetime.datetime.now()
logger.debug("\nCOMPASS Started: {}".format(start_time))
try:
singleSampleCompass(
data=data, model=model,
media=media, directory=sample_dir,
sample_index=i, args=args
)
except Exception as e:
sys.stdout = stdout_bak
sys.stderr = stderr_bak
# Necessary because cplex exceptions can't be pickled
# and can't transfer from subprocess to main process
if 'cplex' in str(type(e)).lower():
raise(Exception(str(e)))
else:
raise(e)
end_time = datetime.datetime.now()
logger.debug("\nElapsed Time: {}".format(end_time-start_time))
def collectCompassResults(data, temp_dir, out_dir, args):
"""
Collects results for individual samples in temp_dir
and aggregates into out_dir
Parameters
==========
data : str
Full path to data file
temp_dir : str
Directory - where to look for sample results.
out_dir : str
Where to store aggregated results. Is created if it doesn't exist.
args : dict
Other arguments
"""
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
logger = logging.getLogger('compass')
logger.info("Collecting results from: " + temp_dir)
logger.info("Writing output to: " + out_dir)
# Get the number of samples
sample_names = utils.read_sample_names(data, slow_names = True)
n_samples = len(sample_names)
if args['anndata_output']:
args['anndata_annotations'] = utils.read_annotations(data)
reactions_all = []
secretions_all = []
uptake_all = []
# Gather all the results
for i in range(n_samples):
sample_name = sample_names[i]
sample_dir = os.path.join(temp_dir, 'sample' + str(i))
try:
reactions = pd.read_csv(
os.path.join(sample_dir, 'reactions.txt'),
sep='\t', index_col=0)
reactions_all.append(reactions)
except:
reactions_all.append(pd.DataFrame(columns=[sample_name]))
try:
secretions = pd.read_csv(
os.path.join(sample_dir, 'secretions.txt'),
sep='\t', index_col=0)
secretions_all.append(secretions)
except:
secretions_all.append(pd.DataFrame(columns=[sample_name]))
try:
uptake = pd.read_csv(
os.path.join(sample_dir, 'uptake.txt'),
sep='\t', index_col=0)
uptake_all.append(uptake)
except:
uptake_all.append(pd.DataFrame(columns=[sample_name]))
if args['microcluster_size']:
with open(args['pools_file']) as fin:
pools = json.load(fin)
fin.close()
pools = {int(x):pools[x] for x in pools} #Json saves dict keys as strings
# Join and output
if not args['no_reactions']:
reactions_all = pd.concat(reactions_all, axis=1, sort=True)
utils.write_output(reactions_all, os.path.join(out_dir, 'reactions'), args)
if args['calc_metabolites']:
secretions_all = pd.concat(secretions_all, axis=1, sort=True)
utils.write_output(secretions_all, os.path.join(out_dir, 'secretions'), args)
uptake_all = | pd.concat(uptake_all, axis=1, sort=True) | pandas.concat |
import pandas as pd
import gzip
import datetime
import os
import numpy as np
import pickle
WINDOW_SIZE = 2880 #a week of information 7*24*60 -> for two days, 2880
LOOK_AHEAD = 2 # how much we want to predict, 2 hours
INSTRUMENT_OF_INTEREST = 'EURUSD'
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def insert_row(row_number, df, row_value):
# Starting value of upper half
start_upper = 0
# End value of upper half
end_upper = row_number
# Start value of lower half
start_lower = row_number
# End value of lower half
end_lower = df.shape[0]
# Create a list of upper_half index
upper_half = [*range(start_upper, end_upper, 1)]
# Create a list of lower_half index
lower_half = [*range(start_lower, end_lower, 1)]
# Increment the value of lower half by 1
lower_half = [x.__add__(1) for x in lower_half]
# Combine the two lists
index_ = upper_half + lower_half
# Update the index of the dataframe
df.index = index_
# Insert a row at the end
df.loc[row_number] = row_value
# Sort the index labels
df = df.sort_index()
# return the dataframe
return df
def add_missing_dates(data):
time = data['DateTime']
for i, t in enumerate (time):
tt = datetime.datetime.strptime(t.split(' ')[1], '%H:%M:%S.%f')
min = tt + datetime.timedelta(0,60)
min2 = tt + datetime.timedelta(0, 120)
if len(time) < (i+1) and min != datetime.datetime.strptime(time[i+1].split(' ')[1], '%H:%M:%S.%f') and min2 == datetime.datetime.strptime(time[i+1].split(' ')[1], '%H:%M:%S.%f'):
input = [t.split(' ')[0]+min.strftime("%H:%M:%S.%f"), data['BidOpen'][i], data['BidHigh'][i], data['BidLow'][i], data['BidClose'][i+1], data['AskOpen'][i], data['AskHigh'][i], data['AskLow'][i], data['AskClose'][i+1]]
insert_row(i+1, data, input)
return data
def clean_dataset(data, instrument):
data = data.rename(columns={"DateTime": "DateTime", "BidOpen": "BidOpen"+instrument, "BidHigh": "BidHigh"+instrument, "BidLow": "BidLow"+instrument,"BidClose": "BidClose"+instrument,
"AskOpen": "AskOpen"+instrument,"AskHigh": "AskHigh"+instrument, "AskLow": "AskLow"+instrument,"AskClose": "AskClose"+instrument})
for column in data.columns:
if column == 'DateTime':
continue
try:
m = max(data[column])
except:
continue
data[column] = data[column]/m
return data
def windowded (instrument):
frames = []
for filename in os.listdir('./data/'):
if instrument in filename:
try:
data = pd.read_csv(gzip.open('./data/'+filename, 'rb'))
except:
continue
data = add_missing_dates(data)
print(instrument,filename)
frames.append(data)
result = | pd.concat(frames) | pandas.concat |
# AUTOGENERATED! DO NOT EDIT! File to edit: drone.ipynb (unless otherwise specified).
__all__ = ['axes_figure', 'axes']
# Cell
import math
import numpy as np
import plotly.express as px
import pandas as pd
import gtsam
# Cell
def axes_figure(pose: gtsam.Pose3, scale: float = 1.0, labels: list = ["X", "Y", "Z"]):
"""Create plotly express figure with Pose3 coordinate frame."""
t = np.reshape(pose.translation(),(3,1))
M = np.hstack([t,t,t,pose.rotation().matrix() * scale + t])
df = | pd.DataFrame({"x": M[0], "y": M[1], "z": M[2]}, labels+labels) | pandas.DataFrame |
#=========== pakages/modules that are used here ==============================
#import from Python standard modules
import colored
from colored import stylize
import requests
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import pandas as pd
from os import path
import statsmodels.api as sm
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
from matplotlib import style
#import from the modules I have created
import stock_price as sp
import sentiment_analysis as sa
#================ code =======================================================
# text printing styles for alert messages
invalid_style = colored.fg("red") + colored.attr("bold")
def get_stock_news(ticker, pagenum):
'''
Web scrape a stock's news headlines from 'Business Insider' website, and
save the data in a csv, given number of pages of news headlines a user wants
extract.
Parameters
----------
ticker(string): a ticker symbol
pagenum(int): number of pages of news headlines a user wants to extract
Returns
-------
Returns the csv output file name.
for example: if the ticker is AMZN, today is Dec 12 2020, and pagenum = 30.
output file name = 'AMZN_HisNews_20201212_p30.csv'
'''
cur_time = datetime.now() #current time
all_news = [] # create an empty list to store news headlines
# output file name
fout = ticker + '_HisNews_' + cur_time.strftime('%Y%m%d') +\
'_p' + str(pagenum) +'.csv'
# if output file already exists, just return the filename
if path.exists(fout) == True:
return fout
# loop news webpage one by one to web scrape the news headlines
for i in range(1, pagenum + 1):
url='https://markets.businessinsider.com/news/{}?p={}'.format(ticker.lower(),
str(i))
req = requests.get(url)
#check if a url exists/works
if req.status_code == 200:
soup = BeautifulSoup(req.content, 'lxml')
newsSoup = soup.find_all('div',
{"class": "col-md-6 further-news-container latest-news-padding"})
for j in range(len(newsSoup)):
#news headline text
news_title = newsSoup[j].find('a').get_text()
#elapsed time since the news headline has been posted
news_time_section = newsSoup[j].find('span',
{"class": "warmGrey source-and-publishdate"}
).get_text()
news_time = news_time_section.split()[-1]
# the measurement unit of elapsed time: m (minutes), h (hours) or d (days)
time_unit = news_time[-1]
# get the amount of elapsed time in the measurement unit
time_amount = int(news_time[:-1].replace(',', ''))
#compare the current time and the elapsed time to get the date
#when the news was posted
if time_unit == 'm':
news_date = cur_time - timedelta(minutes = time_amount)
elif time_unit == 'h':
news_date = cur_time - timedelta(hours = time_amount)
elif time_unit == 'd':
news_date = cur_time - timedelta(days = time_amount)
# convert the new post date to yyyy-mm-dd text string
news_date = news_date.strftime('%Y-%m-%d')
#append the news to the news list
all_news.append([news_date, news_title])
# store all news in panda Dataframe and sace as csv
news_df = pd.DataFrame(all_news, columns=['Date', 'Headline'])
news_df.to_csv(fout, index = False, encoding = 'utf-8-sig')
return fout
def calc_news_sa(ticker, pagenum):
'''
Calculate each piece of news headline's sentiment scores regarding
subjectivity, polarity, compound, positivity, negativity, neutrality.
Add all these sentiment scores (6 columns) to the news csv file.
Parameters
----------
ticker(string): a ticker symbol
pagenum(int): number of pages of news headlines a user wants to extract
Returns
-------
a panda Dataframe for news, with all those sentiment scores added.
'''
#news file name
date_today = datetime.today()
today_str = date_today.strftime("%Y%m%d")
fint = ticker + '_HisNews_' + today_str + '_p' + str(pagenum) +'.csv'
# if news fint N/A, extract and save stock news first
print("\n loading news about " + ticker +" ...")
if path.exists(fint) == False:
get_stock_news(ticker, pagenum)
df = pd.read_csv(fint) #load news data
# if a column called 'Subjectivity' already exists in the news file, stop
# and returns the file name.
# beccause it means all sentiment scores have been added to the file.
if 'Subjectivity' in df.columns:
return df
# Get subjectivity and polarity for news headline
df['Subjectivity'] = df['Headline'].apply(sa.getSubjectivity)
df['Polarity'] = df['Headline'].apply(sa.getPolarity)
# Get the sentiment scores for each day
compound = [] # compound score
neg = [] # negative score
neu = [] # neutral score
pos = [] # positive score
# loop to calculate compound, negative, neutral, positive for each day
for i in range(0, len(df['Headline'])):
SIA = sa.getSIA(df['Headline'][i])
compound.append(SIA['compound'])
neg.append(SIA['neg'])
neu.append(SIA['neu'])
pos.append(SIA['pos'])
# store the sentiment scores as columns in the DataFrame
df['Compound'] = compound
df['Negative'] = neg
df['Neutral'] = neu
df['Positive'] = pos
#save the updated DataFrame to the news csv file.
df.to_csv(fint, index = False, encoding='utf-8-sig')
return df
def display_news_10(ticker, pagenum):
'''
Displays the most recent 10 news headlines for a stock.
Parameters
----------
ticker(string): a ticker symbol
pagenum(int): number of pages of news headlines a user wants to extract
'''
# news input file name
date_today = datetime.today()
today_str = date_today.strftime("%Y%m%d")
fint = ticker + '_HisNews_' + today_str + '_p' + str(pagenum) +'.csv'
# if news fint N/A, extract and save stock news first
print("\n loading news about " + ticker +" ...")
if path.exists(fint) == False:
calc_news_sa(ticker, pagenum)
# load the most recent 10 news headlines
df = pd.read_csv(fint).head(10)
# print the most recent 10 news headlines
print('''
Index Date Headlines
----- ---------- ---------''')
for index, row in df.iterrows():
print(' ' + '{:^5s}'.format(str(index+1)) +' {:>10s}'.format(row['Date']) +
' ' + '{:<s}'.format(row['Headline']))
def merge_news_sa_price(ticker, from_date, pagenum):
'''
Merge stock news sentiment scores (Compound and Polarity) with stock
price data.
Parameters
----------
ticker(string): a ticker symbol
from_date(string): a string that means the start date for a stock's
price inquiry, with format like yyyy-mm-dd
pagenum(int): number of pages of news headlines a user wants to extract
Returns
-------
a merged panda Dataframe that contains price info and news sentiment scores.
'''
# news input file name
# date_today = datetime.today() - timedelta(days = 1)
date_today = datetime.today()
today_str = date_today.strftime("%Y%m%d")
fint_news = ticker + '_HisNews_' + today_str + '_p' + str(pagenum) +'.csv'
# stock price input file name
end_date = date_today.strftime('%Y-%m-%d')
fint_price = ticker + '_HisPrice_' + from_date.replace('-','') +\
'_' + end_date.replace('-','') + '.csv'
# if news file N/A, extract and save stock news
# otherwise, load stock news data
if path.exists(fint_news) == False:
df_news = calc_news_sa(ticker, pagenum)
else:
df_news = pd.read_csv(fint_news, index_col = 0)
# get avergae Compound and Polarity score by date for news headlines
df_news_new = df_news.groupby(['Date']).agg({'Compound':'mean',
'Polarity':'mean'})
# if price file N/A, extract and save stock price
if path.exists(fint_price) == False:
# if there is no price data between the from_date to today,
# returns -1 and stop
if sp.load_stock_price(ticker, from_date, end_date) == False:
return -1
#add price movement direction and change in % to the stock price data
sp.add_price_move(ticker, from_date, end_date)
# load stock price data
df_price = pd.read_csv(fint_price, index_col = 0)
# inner join stock price and news sentiment by date
df_price_news = df_price.merge(df_news_new, how = 'inner', on ='Date',
left_index = True)
return df_price_news
def model_news_sa_price(ticker, from_date, pagenum):
'''
Analyzes the significance of the news headlines sentiments on the stock
price movement day over day, by using OLS model
Display the OLS model summary and a plot about sentiment movement vs price
movement
Parameters
----------
ticker(string): a ticker symbol
from_date(string): a string that means the start date for a stock's
price inquiry, with format like yyyy-mm-dd
pagenum(int): number of pages of news headlines a user wants to extract
'''
# load the merged dataset between price and news sentiment
df = merge_news_sa_price(ticker, from_date, pagenum)
# if there is price data between the from_date to today, returns -1 and stop
if type(df) != pd.DataFrame:
return -1
# stop if the data size is too little
if len(df) <= 3:
print(stylize(' Not enough price or news data to display. Try again.',
invalid_style))
return -1
#pick opening price, Compound score, Polarity scores as x variables
X = df[['Open','Compound', 'Polarity']]
#pick adj close price as outcome variable
Y = df['Adj Close']
X = sm.add_constant(X) # adding a constant
#assign OLS model
model = sm.OLS(Y, X).fit()
predictions = model.predict(X)
#print model summary
print_model = model.summary()
print(print_model)
# plot sentiment scores for all tickers
mpl.rcParams.update(mpl.rcParamsDefault) #set plot format back to default
df.index = pd.to_datetime(df.index) #convert index from string to date type
fig, ax = plt.subplots(figsize=(6, 3))
#plot actual stock price
ax.plot(df.index, Y.values, '-', color = 'royalblue', label = 'Real Price')
#plot model stock price
ax.plot(df.index, predictions , '--*', color = 'darkorange',
label = 'Model Price')
# format labels and ticks
ax.set_ylabel('Price').set_size(10)
ax.set_xlabel('Date').set_size(10)
ax.tick_params(axis = "x", labelsize = 8 , rotation = 0)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.tick_params(axis = "y", labelsize = 8 )
ax.set_title(ticker +': Real stock price vs OLS Model price').set_size(10)
plt.legend(loc=2, prop={"size":8})
plt.tight_layout()
plt.show()
def plot_news_sa_price(ticker, from_date, pagenum):
'''
Plots a stock's news headlines' sentiment movement vs price movement
Parameters
----------
ticker(string): a ticker symbol
from_date(string): a string that means the start date for a stock's
price inquiry, with format like yyyy-mm-dd
pagenum(int): number of pages of news headlines a user wants to extract
'''
# get df with merged news sentiment and price history
df_price_news = merge_news_sa_price(ticker, from_date, pagenum)
#if there is no data between from_date and today, return -1 and stop.
if type(df_price_news) != pd.DataFrame:
return -1
# returns -1 if data size is too small
if len(df_price_news) <= 3:
print(stylize(' Not enough price or news data to display. Try again.',
invalid_style))
return -1
# convert index from text to date type
df_price_news.index = | pd.to_datetime(df_price_news.index) | pandas.to_datetime |
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def plot_all(events):
df = pd.DataFrame(events)
def bar_chart(df, type):
df = df[df.type == type]
df = df[['days_to_birth','type']]
df = df.assign(Bin=lambda x: pd.cut(x.days_to_birth, bins=10, precision=0))
df = df[['Bin','type']]
df = df.rename(columns={"type": type, 'Bin': 'days_to_birth'})
df.groupby(['days_to_birth']).count().plot(kind='bar')
#fig, ax = plt.subplots()
#groupby = df.groupby(['days_to_birth']).count().plot(kind='bar', ax=ax)
#groupby = df.groupby(['Bin']).count().plot(kind='bar', ax=ax)
# ticks = ax.set_xticks(ax.get_xticks()[::100])
bar_chart(df, 'diagnosis')
bar_chart(df, 'observation')
bar_chart(df, 'sample')
bar_chart(df, 'treatment')
df = | pd.DataFrame(events) | pandas.DataFrame |
import pandas as pd
import openpyxl
import numpy as np
import os
import string
import glob
''' This program compiles all (individual) saved excel files to compare different models in one environment
'''
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
path_core = __location__+ "/Results/Train/"
print("OK")
# SELECT THE ENVIRONMENTS
# env_path_list = ["Env_1",
# "Env_2",
# "Env_3",
# "Env_8",
# "Env_9",
# "Env_10",
# "Env_11"]
env_path_list = ["Env_1",
"Env_2",
"Env_3",
"Env_4"]
env_path_list = ["Env_1"]
alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', 'AO', 'AP', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AV', 'AW', 'AX', 'AY', 'AZ']
list_sheets = ["Run_Conf", "Score", "Percent", "Loss", "Time"]
for env_path in env_path_list:
file_path_list = []
path = path_core + env_path + "/Train_Env_1_DQN*.xlsx"
for fname in sorted(glob.glob(path)):
file_path_list.append(fname)
print("LEN(FILE_PATH_LIST):", len(file_path_list))
load_path = __location__+ "/Results/Train/Compare_Models.xlsx"
excel_data_base = pd.ExcelFile(load_path)
load_path_new = __location__+ "/Results/Train/" + env_path + "/Compare_Models_new_" + env_path + ".xlsx"
excel_writer_to_append = pd.ExcelWriter(load_path_new)
workbook = excel_writer_to_append.book
excel_data_base_col = pd.read_excel(excel_data_base, sheetname="Run_Conf")
df_Run_Conf_list = pd.DataFrame()
df_Score_list = pd.DataFrame()
df_Percent_list = pd.DataFrame()
df_Loss_list = pd.DataFrame()
df_Time_list = pd.DataFrame()
for i in range(len(file_path_list)):
print("File:", i)
excel_file = pd.ExcelFile(file_path_list[i])
# print("excel_file ", excel_file )
df_Run_Conf = pd.read_excel(excel_file, sheetname=list_sheets[0], converters={'A': str})
df_Run_Conf = df_Run_Conf.set_index(list_sheets[0])
df_Score = pd.read_excel(excel_file, sheetname=list_sheets[1], parse_cols="A:B")
df_Score = df_Score.set_index(list_sheets[1])
df_Percent = pd.read_excel(excel_file, sheetname=list_sheets[2], parse_cols="A:B")
df_Percent = df_Percent.set_index(list_sheets[2])
df_Loss = pd.read_excel(excel_file, sheetname=list_sheets[3], parse_cols="A:B")
df_Loss = df_Loss.set_index(list_sheets[3])
df_Time = pd.read_excel(excel_file, sheetname=list_sheets[4], parse_cols="A:B")
df_Time = df_Time.set_index(list_sheets[4])
df_Run_Conf_list = pd.concat([df_Run_Conf_list, df_Run_Conf], axis=1, join="outer")
df_Score_list = pd.concat([df_Score_list, df_Score], axis=1, join="outer")
df_Percent_list = pd.concat([df_Percent_list, df_Percent], axis=1, join="outer")
df_Loss_list = | pd.concat([df_Loss_list, df_Loss], axis=1, join="outer") | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
from six.moves import urllib
import pandas as pd
HOUSING_PATH = "datasets/housing"
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
# In[2]:
housing = load_housing_data()
# housing =
housing.head()
# In[3]:
housing.info()
# In[4]:
housing.describe()
# In[5]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(10,7))
plt.show()
# In[6]:
import numpy as np
def split_train_set(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set, test_set = split_train_set(housing, 0.2)
print("Train: ", len(train_set), "+ Test: ", len(test_set))
# In[7]:
import hashlib
def test_set_check(identifier, test_ratio, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
def split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash))
return data.loc[~in_test_set], data.loc[in_test_set]
housing_with_id = housing.reset_index() # adds an `index` column
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
# In[8]:
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
# In[9]:
housing.hist(column='median_income', bins=10)
plt.show()
# In[10]:
housing['income_cat'] = np.ceil(housing['median_income']/1.5)
housing.hist('income_cat', bins=10)
plt.show()
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
housing.hist('income_cat')
plt.show()
# In[11]:
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.iloc[train_index]
strat_test_set = housing.iloc[test_index]
for set in (strat_train_set, strat_test_set):
set.drop(columns='income_cat', inplace=True)
# In[12]:
housing = strat_train_set.copy()
housing.describe()
# In[13]:
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1)
# In[14]:
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.2,
s=housing["population"]/100, label="population",
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True)
# In[15]:
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# In[16]:
from pandas.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
| scatter_matrix(housing[attributes], figsize=(12, 8)) | pandas.plotting.scatter_matrix |
import requests
import pandas as pd
import urllib.parse as up
import time
import re
from .utils import log_message as print
from tqdm import tqdm
from io import StringIO
RETRY_MAX = 10 # times
TIMEOUT = 150 # seconds
SLEEP = 60 # seconds
SEARCH_RESULT_HEADER = [
"urlkey",
"timestamp",
"original",
"mimetype",
"statuscode",
"digest",
"length",
]
IMPORTANCE_MATRIX = pd.DataFrame.from_dict(
{ "url": 0,
"last_donation_time":1,
"last_update_time": 1,
"created_date": 4,
"location_city": 4,
"location_country": 4,
"location_postalcode": 0,
"location_stateprefix": 1,
"description": 1,
"poster": 1,
"story": 4,
"title": 2,
"goal": 3,
"raised_amnt": 3,
"goal_amnt": 3,
"currency": 3,
"tag": 2,
"num_donors": 3,
"num_likes":2,
"num_shares": 2,
"charity_details": 1,
"error_message": 0,
},
orient="index",
columns=["importance_score"],
)
IMPORTANCE_MATRIX.index.name = "field"
IMPORTANCE_MATRIX = IMPORTANCE_MATRIX["importance_score"]
def get_max_search_page():
npage_query = r"http://web.archive.org/cdx/search/cdx?"
npage_query += r"url=gofundme.com&matchType=domain"
npage_query += r"&showNumPages=true"
try:
npage = int(requests.get(npage_query).text.strip())
except Exception as ee:
print(ee)
npage = 1775 # good estimate retrieved from browser
return npage
def pull_request(url_to_search, process_func, **kwargs):
retry = True
for retry_count in range(RETRY_MAX):
if retry_count > 0:
print(f"request try {retry_count+1}")
try:
results = process_func(url_to_search, **kwargs)
if results.empty:
results = pd.DataFrame(columns=SEARCH_RESULT_HEADER)
retry = False
if not retry:
break
except Exception as ee:
print(f"returned error: {str(ee)}")
if retry_count < (RETRY_MAX - 1):
print(f"sleep {SLEEP} secs before retrying request")
time.sleep(SLEEP)
if retry:
print(f"failed to search {retry_count+1} times")
results = pd.DataFrame(columns=SEARCH_RESULT_HEADER)
return results
def _search_wayback(url_to_search, timeout=TIMEOUT):
# api_query = f"http://archive.org/wayback/available?url={url_to_search}"
# search_way_one = f'http://timetravel.mementoweb.org/api/json/2014/{url_to_search}'
# search_way_2 = f'http://web.archive.org/web/timemap/link/{url_to_search}'
search_way_3 = (
f"http://web.archive.org/cdx/search/cdx?url={url_to_search}"
+ "&matchType=prefix&output=json"
)
print(f"Search for archives w query: {search_way_3}")
search_page = requests.get(search_way_3, timeout=timeout)
search_results = pd.read_json(StringIO(search_page.text), encoding="utf-8")
if not search_results.empty:
search_results = search_results.rename(columns=search_results.iloc[0, :]).drop(
index=0
)
search_results = search_results.sort_values(by="timestamp").reset_index(
drop=True
)
return search_results
def search_wayback(url_to_search, timeout=TIMEOUT):
return pull_request(url_to_search, _search_wayback, timeout=timeout)
def clean_wayback_search_results(queryoutput):
parsed_urls = queryoutput.original.apply(lambda x: up.urlsplit(x))
parsed_urls = pd.DataFrame.from_records(
parsed_urls,
columns=["_scheme", "_domain", "_path", "_query", "_fragment"],
index=parsed_urls.index,
).join(queryoutput, how="left")
parsed_urls["unquote_path"] = parsed_urls._path.apply(lambda x: up.unquote(x))
parsed_urls["unquote_query"] = parsed_urls._query.apply(lambda x: up.unquote(x))
select_urls = parsed_urls.groupby(by=["unquote_path"], as_index=False).apply(
lambda df: df.loc[df.unquote_query == df.unquote_query.min(), :]
)
select_urls = select_urls.sort_values("timestamp", ascending=False).reset_index(
drop=True
)
select_urls["original"] = (
select_urls["original"].astype(str).apply(remove_port_from_url)
)
return select_urls
def remove_port_from_url(url_str):
return url_str.replace(":80", "")
def filter_nonworking_search_results(search_results):
search_results = search_results[~(search_results.statuscode.isin(["301", "404"]))]
return search_results
def get_campaign_page(url_to_get, check_status_code=True):
retry = True
print(f"Requesting {url_to_get}")
for retry_count in range(RETRY_MAX):
if retry_count > 0:
print(f"request try {retry_count+1}")
try:
campaign_page = requests.get(url_to_get)
if check_status_code:
retry = campaign_page.status_code != 200
# 200 is http response code for success
if retry and (retry_count == (RETRY_MAX - 1)):
raise Exception("http status code is not 200")
else:
retry = False
if not retry:
break
except Exception as ee:
print(f"returned error: {ee}")
if retry_count < (RETRY_MAX - 1):
print(f"sleep {SLEEP} secs before retrying request")
time.sleep(SLEEP)
if retry:
print(f"failed to request {retry_count+1} times")
campaign_page = None
return campaign_page
def scrape_quality(row):
not_none = | pd.Series(row) | pandas.Series |
# -*- coding: utf-8 -*-
# @Time : 2018/11/13 4:07 PM
# @Author : Inf.Turing
# @Site :
# @File : stacking_model.py
# @Software: PyCharm
import os
import pandas as pd
import lightgbm as lgb
import numpy as np
from sklearn.metrics import f1_score
path = './'
w2v_path = path + '/w2v'
train = pd.read_csv(path + '/train_2.csv')
test = pd.read_csv(path + '/test_2.csv')
train_first = | pd.read_csv(path + '/train_all.csv') | pandas.read_csv |
from linlearn import BinaryClassifier
from linlearn.robust_means import Holland_catoni_estimator, median_of_means
import numpy as np
import logging
import pickle
from datetime import datetime
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import itertools
from tqdm import tqdm
import joblib
import time
from sklearn.metrics import accuracy_score
from numba import njit
from numba import (
int64,
float64,
)
from numba.experimental import jitclass
def ensure_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
ensure_directory('exp_archives/')
file_handler = logging.FileHandler(filename='exp_archives/logitlasso_exp.log')
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", handlers=handlers
)
save_results = True
save_fig= True
logging.info(64*"=")
logging.info("Running new experiment session")
logging.info(64*"=")
step_size = 0.1
random_state = 43
max_iter = 100
fit_intercept = True
MOM_block_size = 0.05
test_loss_meantype = "ordinary"
if not save_results:
logging.info("WARNING : results will NOT be saved at the end of this session")
logging.info("loading data ...")
dataset = "Bank"#"Stroke"#"Adult"#"weatherAUS"#"Heart"#
def load_heart(test_size=0.3):
csv_heart = pd.read_csv("heart/heart.csv")
categoricals = ["sex", "cp", "fbs", "restecg", "exng", "slp", "caa", "thall"]
label = "output"
for cat in categoricals:
one_hot = pd.get_dummies(csv_heart[cat], prefix=cat)
csv_heart = csv_heart.drop(cat, axis=1)
csv_heart = csv_heart.join(one_hot)
df_train, df_test = train_test_split(
csv_heart,
test_size=test_size,
shuffle=True,
random_state=random_state,
stratify=csv_heart[label],
)
y_train = df_train.pop(label)
y_test = df_test.pop(label)
return df_train.to_numpy(), df_test.to_numpy(), y_train.to_numpy(), y_test.to_numpy()
def load_stroke(test_size=0.3):
csv_stroke = | pd.read_csv("stroke/healthcare-dataset-stroke-data.csv") | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.