prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import logging
from pathlib import Path
import re
import scipy.stats as ss
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.stats.multitest as multitest
import sklearn.metrics
from intermine.webservice import Service
import biclust_comp.utils as utils
def plot_sample_enrichment_impc(X_file, max_factors=None, max_traits=None):
sample_info = read_sample_info_IMPC("data/real/IMPC/sample_info.txt")
X = utils.read_matrix_tsv(X_file)
trait_dummies = | pd.get_dummies(sample_info[['tissue', 'genotype']]) | pandas.get_dummies |
import pandas as pd
import numpy as np
# JSON parsing
import json
# HTML parsing
from lxml import etree
import urllib
# SQLite RDBMS
import sqlite3
# Time conversions
import time
# Parallel processing
import swifter
# NoSQL DB
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError, OperationFailure
import os
# TODO: Adapt the data loading code from class.
# YOUR CODE HERE
def get_df(rel):
ret = pd.DataFrame(rel).fillna('')
for k in ret.keys():
ret[k] = ret[k].astype(str)
return ret
def extract_relation(rel, name):
'''
Pull out a nested list that has a key, and return it as a list
of dictionaries suitable for treating as a relation / dataframe
'''
# We'll return a list
ret = []
if name in rel:
ret2 = rel.pop(name)
try:
# Try to parse the string as a dictionary
ret2 = json.loads(ret2.replace('\'','\"'))
except:
# If we get an error in parsing, we'll leave as a string
pass
# If it's a dictionary, add it to our return results after
# adding a key to the parent
if isinstance(ret2, dict):
item = ret2
item['person'] = rel['_id']
ret.append(item)
else:
# If it's a list, iterate over each item
index = 0
for r in ret2:
item = r
if not isinstance(item, dict):
item = {'person': rel['_id'], 'value': item}
else:
item['person'] = rel['_id']
# A fix to a typo in the data
if 'affilition' in item:
item['affiliation'] = item.pop('affilition')
item['pos'] = index
index = index + 1
ret.append(item)
return ret
def data_loading(file, dbname='linkedin.db', filetype='localobj', LIMIT=20000):
if(filetype == 'localpath'):
# linked_in = urllib.request.urlopen('file://' + cwd + '/' + file)
linked_in = open(file)
elif(filetype == 'localobj'):
linked_in = file
else: #URL
linked_in = urllib.request.urlopen(file)
names = []
people = []
groups = []
education = []
skills = []
experience = []
honors = []
also_view = []
events = []
lines = []
i = 0
# LIMIT = 20000 # Max records to parse
for line in file:
try:
line = line.decode('utf-8')
except:
line = line
try:
person = json.loads(line)
# By inspection, all of these are nested dictionary or list content
nam = extract_relation(person, 'name')
edu = extract_relation(person, 'education')
grp = extract_relation(person, 'group')
skl = extract_relation(person, 'skills')
exp = extract_relation(person, 'experience')
hon = extract_relation(person, 'honors')
als = extract_relation(person, 'also_view')
eve = extract_relation(person, 'events')
# This doesn't seem relevant and it's the only
# non-string field that's sometimes null
if 'interval' in person:
person.pop('interval')
lines.append(person)
names = names + nam
education = education + edu
groups = groups + grp
skills = skills + skl
experience = experience + exp
honors = honors + hon
also_view = also_view + als
events = events + eve
except:
pass
i = i + 1
if(i % 10000 == 0):
print (i)
if i >= LIMIT:
break
people_df = get_df(pd.DataFrame(lines))
names_df = get_df(pd.DataFrame(names))
education_df = get_df(pd.DataFrame(education))
groups_df = get_df(pd.DataFrame(groups))
skills_df = get_df(pd.DataFrame(skills))
experience_df = get_df(pd.DataFrame(experience))
honors_df = get_df(pd.DataFrame(honors))
also_view_df = get_df(pd.DataFrame(also_view))
events_df = get_df( | pd.DataFrame(events) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit
import pytest
import os
from collections import namedtuple
from itertools import product, combinations
from vectorbt import settings
from vectorbt.utils import checks, config, decorators, math, array, random, enum, data, params
from tests.utils import hash
seed = 42
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
with pytest.raises(Exception) as e_info:
conf.update(d=2)
conf.update(d=2, force_update=True)
assert conf['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, read_only=True)
with pytest.raises(Exception) as e_info:
conf['a'] = 2
with pytest.raises(Exception) as e_info:
del conf['a']
with pytest.raises(Exception) as e_info:
conf.pop('a')
with pytest.raises(Exception) as e_info:
conf.popitem()
with pytest.raises(Exception) as e_info:
conf.clear()
with pytest.raises(Exception) as e_info:
conf.update(a=2)
assert isinstance(conf.merge_with(dict(b=dict(d=2))), config.Config)
assert conf.merge_with(dict(b=dict(d=2)), read_only=True).read_only
assert conf.merge_with(dict(b=dict(d=2)))['b']['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': [1, 2]}})
conf['a'] = 1
conf['b']['c'].append(3)
conf['b']['d'] = 2
assert conf == {'a': 1, 'b': {'c': [1, 2, 3], 'd': 2}}
conf.reset()
assert conf == {'a': 0, 'b': {'c': [1, 2]}}
def test_merge_dicts(self):
assert config.merge_dicts({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_dicts({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
def test_configured(self):
class H(config.Configured):
def __init__(self, a, b=2, **kwargs):
super().__init__(a=a, b=b, **kwargs)
assert H(1).config == {'a': 1, 'b': 2}
assert H(1).copy(b=3).config == {'a': 1, 'b': 3}
assert H(1).copy(c=4).config == {'a': 1, 'b': 2, 'c': 4}
assert H(pd.Series([1, 2, 3])) == H(pd.Series([1, 2, 3]))
assert H(pd.Series([1, 2, 3])) != H(pd.Series([1, 2, 4]))
assert H(pd.DataFrame([1, 2, 3])) == H(pd.DataFrame([1, 2, 3]))
assert H(pd.DataFrame([1, 2, 3])) != H(pd.DataFrame([1, 2, 4]))
assert H(pd.Index([1, 2, 3])) == H(pd.Index([1, 2, 3]))
assert H(pd.Index([1, 2, 3])) != H(pd.Index([1, 2, 4]))
assert H(np.array([1, 2, 3])) == H(np.array([1, 2, 3]))
assert H(np.array([1, 2, 3])) != H(np.array([1, 2, 4]))
assert H(None) == H(None)
assert H(None) != H(10.)
# ############# decorators.py ############# #
class TestDecorators:
def test_class_or_instancemethod(self):
class G:
@decorators.class_or_instancemethod
def g(self_or_cls):
if isinstance(self_or_cls, type):
return True # class
return False # instance
assert G.g()
assert not G().g()
def test_custom_property(self):
class G:
@decorators.custom_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_custom_method(self):
class G:
@decorators.custom_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_cached_property(self):
np.random.seed(seed)
class G:
@decorators.cached_property
def cache_me(self): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_property(hello="world", hello2="world2")
def cache_me(self): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# clear_cache method
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
G.cache_me.clear_cache(g)
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# test blacklist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# test whitelist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
def test_cached_method(self):
np.random.seed(seed)
class G:
@decorators.cached_method
def cache_me(self, b=10): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_method(hello="world", hello2="world2")
def cache_me(self, b=10): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# clear_cache method
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
G.cache_me.clear_cache(g)
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# test blacklist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g.cache_me)
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# test whitelist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g.cache_me)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# disabled by non-hashable args
G.cache_me.clear_cache(g)
cached_number = g.cache_me(b=np.zeros(1))
assert g.cache_me(b=np.zeros(1)) != cached_number
def test_traverse_attr_kwargs(self):
class A:
@decorators.custom_property(some_key=0)
def a(self): pass
class B:
@decorators.cached_property(some_key=0, child_cls=A)
def a(self): pass
@decorators.custom_method(some_key=1)
def b(self): pass
class C:
@decorators.cached_method(some_key=0, child_cls=B)
def b(self): pass
@decorators.custom_property(some_key=1)
def c(self): pass
assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580
# ############# checks.py ############# #
class TestChecks:
def test_is_pandas(self):
assert not checks.is_pandas(0)
assert not checks.is_pandas(np.array([0]))
assert checks.is_pandas(pd.Series([1, 2, 3]))
assert checks.is_pandas(pd.DataFrame([1, 2, 3]))
def test_is_series(self):
assert not checks.is_series(0)
assert not checks.is_series(np.array([0]))
assert checks.is_series(pd.Series([1, 2, 3]))
assert not checks.is_series(pd.DataFrame([1, 2, 3]))
def test_is_frame(self):
assert not checks.is_frame(0)
assert not checks.is_frame(np.array([0]))
assert not checks.is_frame(pd.Series([1, 2, 3]))
assert checks.is_frame(pd.DataFrame([1, 2, 3]))
def test_is_array(self):
assert not checks.is_array(0)
assert checks.is_array(np.array([0]))
assert checks.is_array(pd.Series([1, 2, 3]))
assert checks.is_array(pd.DataFrame([1, 2, 3]))
def test_is_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
assert not checks.is_numba_func(test_func)
assert checks.is_numba_func(test_func_nb)
def test_is_hashable(self):
assert checks.is_hashable(2)
assert not checks.is_hashable(np.asarray(2))
def test_is_index_equal(self):
assert checks.is_index_equal(
pd.Index([0]),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.Index([0]),
pd.Index([1])
)
assert not checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0])
)
assert checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0]),
strict=False
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.Index([0])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.MultiIndex.from_arrays([[0], [1]])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2'])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name3', 'name4'])
)
def test_is_default_index(self):
assert checks.is_default_index(pd.DataFrame([[1, 2, 3]]).columns)
assert checks.is_default_index(pd.Series([1, 2, 3]).to_frame().columns)
assert checks.is_default_index(pd.Index([0, 1, 2]))
assert not checks.is_default_index(pd.Index([0, 1, 2], name='name'))
def test_is_equal(self):
assert checks.is_equal(np.arange(3), np.arange(3), np.array_equal)
assert not checks.is_equal(np.arange(3), None, np.array_equal)
assert not checks.is_equal(None, np.arange(3), np.array_equal)
assert checks.is_equal(None, None, np.array_equal)
def test_is_namedtuple(self):
assert checks.is_namedtuple(namedtuple('Hello', ['world'])(*range(1)))
assert not checks.is_namedtuple((0,))
def test_method_accepts_argument(self):
def test(a, *args, b=2, **kwargs):
pass
assert checks.method_accepts_argument(test, 'a')
assert not checks.method_accepts_argument(test, 'args')
assert checks.method_accepts_argument(test, '*args')
assert checks.method_accepts_argument(test, 'b')
assert not checks.method_accepts_argument(test, 'kwargs')
assert checks.method_accepts_argument(test, '**kwargs')
assert not checks.method_accepts_argument(test, 'c')
def test_assert_in(self):
checks.assert_in(0, (0, 1))
with pytest.raises(Exception) as e_info:
checks.assert_in(2, (0, 1))
def test_assert_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
checks.assert_numba_func(test_func_nb)
with pytest.raises(Exception) as e_info:
checks.assert_numba_func(test_func)
def test_assert_not_none(self):
checks.assert_not_none(0)
with pytest.raises(Exception) as e_info:
checks.assert_not_none(None)
def test_assert_type(self):
checks.assert_type(0, int)
checks.assert_type(np.zeros(1), (np.ndarray, pd.Series))
checks.assert_type(pd.Series([1, 2, 3]), (np.ndarray, pd.Series))
with pytest.raises(Exception) as e_info:
checks.assert_type(pd.DataFrame([1, 2, 3]), (np.ndarray, pd.Series))
def test_assert_subclass(self):
class A:
pass
class B(A):
pass
class C(B):
pass
checks.assert_subclass(B, A)
checks.assert_subclass(C, B)
checks.assert_subclass(C, A)
with pytest.raises(Exception) as e_info:
checks.assert_subclass(A, B)
def test_assert_type_equal(self):
checks.assert_type_equal(0, 1)
checks.assert_type_equal(np.zeros(1), np.empty(1))
with pytest.raises(Exception) as e_info:
checks.assert_type(0, np.zeros(1))
def test_assert_dtype(self):
checks.assert_dtype(np.zeros(1), np.float)
checks.assert_dtype(pd.Series([1, 2, 3]), np.int)
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), np.int)
with pytest.raises(Exception) as e_info:
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.int)
def test_assert_subdtype(self):
checks.assert_subdtype([0], np.number)
checks.assert_subdtype(np.array([1, 2, 3]), np.number)
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.number)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(np.array([1, 2, 3]), np.float)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.float)
def test_assert_dtype_equal(self):
checks.assert_dtype_equal([1], [1, 1, 1])
checks.assert_dtype_equal(pd.Series([1, 2, 3]), pd.DataFrame([[1, 2, 3]]))
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3.]]), pd.DataFrame([[1, 2, 3.]]))
with pytest.raises(Exception) as e_info:
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3]]), pd.DataFrame([[1, 2, 3.]]))
def test_assert_ndim(self):
checks.assert_ndim(0, 0)
checks.assert_ndim(np.zeros(1), 1)
checks.assert_ndim(pd.Series([1, 2, 3]), (1, 2))
checks.assert_ndim(pd.DataFrame([1, 2, 3]), (1, 2))
with pytest.raises(Exception) as e_info:
checks.assert_ndim(np.zeros((3, 3, 3)), (1, 2))
def test_assert_len_equal(self):
checks.assert_len_equal([[1]], [[2]])
checks.assert_len_equal([[1]], [[2, 3]])
with pytest.raises(Exception) as e_info:
checks.assert_len_equal([[1]], [[2], [3]])
def test_assert_shape_equal(self):
checks.assert_shape_equal(0, 1)
checks.assert_shape_equal([1, 2, 3], np.asarray([1, 2, 3]))
checks.assert_shape_equal([1, 2, 3], pd.Series([1, 2, 3]))
checks.assert_shape_equal(np.zeros((3, 3)), pd.Series([1, 2, 3]), axis=0)
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(1, 0))
with pytest.raises(Exception) as e_info:
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(0, 1))
def test_assert_index_equal(self):
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([1, 2, 3]))
with pytest.raises(Exception) as e_info:
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([2, 3, 4]))
def test_assert_meta_equal(self):
index = ['x', 'y', 'z']
columns = ['a', 'b', 'c']
checks.assert_meta_equal(np.array([1, 2, 3]), np.array([1, 2, 3]))
checks.assert_meta_equal(pd.Series([1, 2, 3], index=index), pd.Series([1, 2, 3], index=index))
checks.assert_meta_equal(pd.DataFrame([[1, 2, 3]], columns=columns), pd.DataFrame([[1, 2, 3]], columns=columns))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.Series([1, 2]), pd.DataFrame([1, 2]))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.DataFrame([1, 2]), pd.DataFrame([1, 2, 3]))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.DataFrame([1, 2, 3]), pd.DataFrame([1, 2, 3], index=index))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.DataFrame([[1, 2, 3]]), pd.DataFrame([[1, 2, 3]], columns=columns))
def test_assert_array_equal(self):
index = ['x', 'y', 'z']
columns = ['a', 'b', 'c']
checks.assert_array_equal(np.array([1, 2, 3]), np.array([1, 2, 3]))
checks.assert_array_equal(pd.Series([1, 2, 3], index=index), pd.Series([1, 2, 3], index=index))
checks.assert_array_equal(pd.DataFrame([[1, 2, 3]], columns=columns),
pd.DataFrame([[1, 2, 3]], columns=columns))
with pytest.raises(Exception) as e_info:
checks.assert_array_equal(np.array([1, 2]), np.array([1, 2, 3]))
def test_assert_level_not_exists(self):
i = pd.Index(['x', 'y', 'z'], name='i')
multi_i = pd.MultiIndex.from_arrays([['x', 'y', 'z'], ['x2', 'y2', 'z2']], names=['i', 'i2'])
checks.assert_level_not_exists(i, 'i2')
checks.assert_level_not_exists(multi_i, 'i3')
with pytest.raises(Exception) as e_info:
checks.assert_level_not_exists(i, 'i')
checks.assert_level_not_exists(multi_i, 'i')
def test_assert_equal(self):
checks.assert_equal(0, 0)
checks.assert_equal(False, False)
with pytest.raises(Exception) as e_info:
checks.assert_equal(0, 1)
def test_assert_dict_valid(self):
checks.assert_dict_valid(dict(a=2, b=3), [['a', 'b', 'c']])
with pytest.raises(Exception) as e_info:
checks.assert_dict_valid(dict(a=2, b=3, d=4), [['a', 'b', 'c']])
checks.assert_dict_valid(dict(a=2, b=3, c=dict(d=4, e=5)), [['a', 'b', 'c'], ['d', 'e']])
with pytest.raises(Exception) as e_info:
checks.assert_dict_valid(dict(a=2, b=3, c=dict(d=4, f=5)), [['a', 'b', 'c'], ['d', 'e']])
# ############# math.py ############# #
class TestMath:
def test_is_close(self):
a = 0.3
b = 0.1 + 0.2
# test scalar
assert math.is_close_nb(a, a)
assert math.is_close_nb(a, b)
assert math.is_close_nb(-a, -b)
assert not math.is_close_nb(-a, b)
assert not math.is_close_nb(a, -b)
assert math.is_close_nb(1e10 + a, 1e10 + b)
# test np.nan
assert not math.is_close_nb(np.nan, b)
assert not math.is_close_nb(a, np.nan)
# test np.inf
assert not math.is_close_nb(np.inf, b)
assert not math.is_close_nb(a, np.inf)
assert not math.is_close_nb(-np.inf, b)
assert not math.is_close_nb(a, -np.inf)
assert not math.is_close_nb(-np.inf, -np.inf)
assert not math.is_close_nb(np.inf, np.inf)
assert not math.is_close_nb(-np.inf, np.inf)
def test_is_close_or_less(self):
a = 0.3
b = 0.1 + 0.2
# test scalar
assert math.is_close_or_less_nb(a, a)
assert math.is_close_or_less_nb(a, b)
assert math.is_close_or_less_nb(-a, -b)
assert math.is_close_or_less_nb(-a, b)
assert not math.is_close_or_less_nb(a, -b)
assert math.is_close_or_less_nb(1e10 + a, 1e10 + b)
# test np.nan
assert not math.is_close_or_less_nb(np.nan, b)
assert not math.is_close_or_less_nb(a, np.nan)
# test np.inf
assert not math.is_close_or_less_nb(np.inf, b)
assert math.is_close_or_less_nb(a, np.inf)
assert math.is_close_or_less_nb(-np.inf, b)
assert not math.is_close_or_less_nb(a, -np.inf)
assert not math.is_close_or_less_nb(-np.inf, -np.inf)
assert not math.is_close_or_less_nb(np.inf, np.inf)
assert math.is_close_or_less_nb(-np.inf, np.inf)
def test_is_less(self):
a = 0.3
b = 0.1 + 0.2
# test scalar
assert not math.is_less_nb(a, a)
assert not math.is_less_nb(a, b)
assert not math.is_less_nb(-a, -b)
assert math.is_less_nb(-a, b)
assert not math.is_less_nb(a, -b)
assert not math.is_less_nb(1e10 + a, 1e10 + b)
# test np.nan
assert not math.is_less_nb(np.nan, b)
assert not math.is_less_nb(a, np.nan)
# test np.inf
assert not math.is_less_nb(np.inf, b)
assert math.is_less_nb(a, np.inf)
assert math.is_less_nb(-np.inf, b)
assert not math.is_less_nb(a, -np.inf)
assert not math.is_less_nb(-np.inf, -np.inf)
assert not math.is_less_nb(np.inf, np.inf)
assert math.is_less_nb(-np.inf, np.inf)
def test_is_addition_zero(self):
a = 0.3
b = 0.1 + 0.2
assert not math.is_addition_zero_nb(a, b)
assert math.is_addition_zero_nb(-a, b)
assert math.is_addition_zero_nb(a, -b)
assert not math.is_addition_zero_nb(-a, -b)
def test_add_nb(self):
a = 0.3
b = 0.1 + 0.2
assert math.add_nb(a, b) == a + b
assert math.add_nb(-a, b) == 0
assert math.add_nb(a, -b) == 0
assert math.add_nb(-a, -b) == -(a + b)
# ############# array.py ############# #
class TestArray:
def test_is_sorted(self):
assert array.is_sorted(np.array([0, 1, 2, 3, 4]))
assert array.is_sorted(np.array([0, 1]))
assert array.is_sorted(np.array([0]))
assert not array.is_sorted(np.array([1, 0]))
assert not array.is_sorted(np.array([0, 1, 2, 4, 3]))
# nb
assert array.is_sorted_nb(np.array([0, 1, 2, 3, 4]))
assert array.is_sorted_nb(np.array([0, 1]))
assert array.is_sorted_nb(np.array([0]))
assert not array.is_sorted_nb(np.array([1, 0]))
assert not array.is_sorted_nb(np.array([0, 1, 2, 4, 3]))
def test_insert_argsort_nb(self):
a = np.random.uniform(size=1000)
A = a.copy()
I = np.arange(len(A))
array.insert_argsort_nb(A, I)
np.testing.assert_array_equal(np.sort(a), A)
np.testing.assert_array_equal(a[I], A)
def test_get_ranges_arr(self):
np.testing.assert_array_equal(
array.get_ranges_arr(0, 3),
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
array.get_ranges_arr(0, [1, 2, 3]),
np.array([0, 0, 1, 0, 1, 2])
)
np.testing.assert_array_equal(
array.get_ranges_arr([0, 3], [3, 6]),
np.array([0, 1, 2, 3, 4, 5])
)
def test_uniform_summing_to_one_nb(self):
@njit
def set_seed():
np.random.seed(seed)
set_seed()
np.testing.assert_array_almost_equal(
array.uniform_summing_to_one_nb(10),
np.array([
5.808361e-02, 9.791091e-02, 2.412011e-05, 2.185215e-01,
2.241184e-01, 2.456528e-03, 1.308789e-01, 1.341822e-01,
8.453816e-02, 4.928569e-02
])
)
assert np.sum(array.uniform_summing_to_one_nb(10)) == 1
def test_renormalize(self):
assert array.renormalize(0, [0, 10], [0, 1]) == 0
assert array.renormalize(10, [0, 10], [0, 1]) == 1
np.testing.assert_array_equal(
array.renormalize(np.array([0, 2, 4, 6, 8, 10]), [0, 10], [0, 1]),
np.array([0., 0.2, 0.4, 0.6, 0.8, 1.])
)
np.testing.assert_array_equal(
array.renormalize_nb(np.array([0, 2, 4, 6, 8, 10]), [0, 10], [0, 1]),
np.array([0., 0.2, 0.4, 0.6, 0.8, 1.])
)
def test_min_rel_rescale(self):
np.testing.assert_array_equal(
array.min_rel_rescale(np.array([2, 4, 6]), [10, 20]),
np.array([10., 15., 20.])
)
np.testing.assert_array_equal(
array.min_rel_rescale(np.array([5, 6, 7]), [10, 20]),
np.array([10., 12., 14.])
)
np.testing.assert_array_equal(
array.min_rel_rescale(np.array([5, 5, 5]), [10, 20]),
np.array([10., 10., 10.])
)
def test_max_rel_rescale(self):
np.testing.assert_array_equal(
array.max_rel_rescale(np.array([2, 4, 6]), [10, 20]),
np.array([10., 15., 20.])
)
np.testing.assert_array_equal(
array.max_rel_rescale(np.array([5, 6, 7]), [10, 20]),
np.array([14.285714285714286, 17.142857142857142, 20.])
)
np.testing.assert_array_equal(
array.max_rel_rescale(np.array([5, 5, 5]), [10, 20]),
np.array([20., 20., 20.])
)
def test_rescale_float_to_int_nb(self):
@njit
def set_seed():
np.random.seed(seed)
set_seed()
np.testing.assert_array_equal(
array.rescale_float_to_int_nb(np.array([0.3, 0.3, 0.3, 0.1]), [10, 20], 70),
np.array([17, 14, 22, 17])
)
assert np.sum(array.rescale_float_to_int_nb(np.array([0.3, 0.3, 0.3, 0.1]), [10, 20], 70)) == 70
# ############# random.py ############# #
class TestRandom:
def test_set_seed(self):
random.set_seed(seed)
def test_seed():
return np.random.uniform(0, 1)
assert test_seed() == 0.3745401188473625
if 'NUMBA_DISABLE_JIT' not in os.environ or os.environ['NUMBA_DISABLE_JIT'] != '1':
@njit
def test_seed_nb():
return np.random.uniform(0, 1)
assert test_seed_nb() == 0.3745401188473625
# ############# enum.py ############# #
Enum = namedtuple('Enum', ['Attr1', 'Attr2'])(*range(2))
class TestEnum:
def test_caseins_getattr(self):
assert enum.caseins_getattr(Enum, 'Attr1') == 0
assert enum.caseins_getattr(Enum, 'attr1') == 0
assert enum.caseins_getattr(Enum, 'Attr2') == 1
assert enum.caseins_getattr(Enum, 'attr2') == 1
with pytest.raises(Exception) as e_info:
enum.caseins_getattr(Enum, 'Attr3')
def test_convert_str_enum_value(self):
assert enum.convert_str_enum_value(Enum, 0) == 0
assert enum.convert_str_enum_value(Enum, 10) == 10
assert enum.convert_str_enum_value(Enum, 10.) == 10.
assert enum.convert_str_enum_value(Enum, 'Attr1') == 0
assert enum.convert_str_enum_value(Enum, 'attr1') == 0
assert enum.convert_str_enum_value(Enum, ('attr1', 'attr2')) == (0, 1)
assert enum.convert_str_enum_value(Enum, [['attr1', 'attr2']]) == [[0, 1]]
def test_to_value_map(self):
assert enum.to_value_map(Enum) == {-1: None, 0: 'Attr1', 1: 'Attr2'}
# ############# data.py ############# #
class TestData:
def test_download(self):
def downloader(symbols, kw=None):
return {s: pd.DataFrame({
'feat1': np.arange(i, 5 + i),
'feat2': np.arange(i, 5 + i) + kw,
}, index=pd.Index(np.arange(i, 5 + i))) for i, s in enumerate(symbols)}
result = data.download('a', kw=10, downloader=downloader)
pd.testing.assert_frame_equal(
result,
pd.DataFrame({
'feat1': np.arange(5),
'feat2': np.arange(5) + 10
}, index=pd.Index(np.arange(5)))
)
result = data.download('a', kw=10, downloader=downloader, cols='feat1')
pd.testing.assert_series_equal(
result,
pd.Series(np.arange(5), name='feat1', index=pd.Index(np.arange(5)))
)
result = data.download('a', kw=10, downloader=downloader, cols=['feat1'])
pd.testing.assert_frame_equal(
result,
pd.DataFrame(np.arange(5), columns=['feat1'], index=pd.Index(np.arange(5)))
)
result = data.download(['a', 'b'], kw=10, downloader=downloader)
pd.testing.assert_frame_equal(
result['a'],
pd.DataFrame({
'feat1': np.arange(5),
'feat2': np.arange(5) + 10
}, index=pd.Index(np.arange(5)))
)
pd.testing.assert_frame_equal(
result['b'],
pd.DataFrame({
'feat1': np.arange(1, 6),
'feat2': np.arange(1, 6) + 10
}, index=pd.Index(np.arange(1, 6)))
)
result = data.download(['a', 'b'], kw=10, downloader=downloader, cols='feat1')
pd.testing.assert_series_equal(
result['a'],
pd.Series(np.arange(5), name='feat1', index=pd.Index(np.arange(5)))
)
pd.testing.assert_series_equal(
result['b'],
pd.Series(np.arange(1, 6), name='feat1', index=pd.Index(np.arange(1, 6)))
)
def test_concat_symbols(self):
def downloader(symbols, kw=None):
return {s: pd.DataFrame({
'feat1': np.arange(i, 5 + i),
'feat2': np.arange(i, 5 + i) + kw,
}, index=pd.Index(np.arange(i, 5 + i))) for i, s in enumerate(symbols)}
downloaded = data.download(['a', 'b'], kw=10, downloader=downloader)
result = data.concat_symbols(downloaded, treat_missing='nan')
pd.testing.assert_frame_equal(
result['feat1'],
pd.DataFrame({
'a': np.concatenate((np.arange(5), np.array([np.nan]))),
'b': np.concatenate((np.array([np.nan]), np.arange(1, 6)))
}, index=pd.Index(np.arange(6)), columns=pd.Index(['a', 'b'], name='symbol'))
)
pd.testing.assert_frame_equal(
result['feat2'],
pd.DataFrame({
'a': np.concatenate((np.arange(5), np.array([np.nan]))) + 10,
'b': np.concatenate((np.array([np.nan]), np.arange(1, 6))) + 10
}, index=pd.Index(np.arange(6)), columns=pd.Index(['a', 'b'], name='symbol'))
)
result = data.concat_symbols(downloaded, treat_missing='drop')
pd.testing.assert_frame_equal(
result['feat1'],
pd.DataFrame({
'a': np.arange(1, 5),
'b': np.arange(1, 5)
}, index=pd.Index(np.arange(1, 5)), columns= | pd.Index(['a', 'b'], name='symbol') | pandas.Index |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractproperty
from pandas.tseries.holiday import AbstractHolidayCalendar
from six import with_metaclass
from numpy import searchsorted
import numpy as np
import pandas as pd
from pandas import (
DataFrame,
date_range,
DatetimeIndex,
DateOffset
)
from pandas.tseries.offsets import CustomBusinessDay
from zipline.utils.calendars._calendar_helpers import (
next_divider_idx,
previous_divider_idx,
is_open
)
from zipline.utils.memoize import remember_last, lazyval
start_default = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end_default = end_base + pd.Timedelta(days=365)
NANOS_IN_MINUTE = 60000000000
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = range(7)
class TradingCalendar(with_metaclass(ABCMeta)):
"""
An TradingCalendar represents the timing information of a single market
exchange.
The timing information is made up of two parts: sessions, and opens/closes.
A session represents a contiguous set of minutes, and has a label that is
midnight UTC. It is important to note that a session label should not be
considered a specific point in time, and that midnight UTC is just being
used for convenience.
For each session, we store the open and close time in UTC time.
"""
def __init__(self, start=start_default, end=end_default):
# Midnight in UTC for each trading day.
_all_days = date_range(start, end, freq=self.day, tz='UTC')
# `DatetimeIndex`s of standard opens/closes for each day.
self._opens = days_at_time(_all_days, self.open_time, self.tz,
self.open_offset)
self._closes = days_at_time(
_all_days, self.close_time, self.tz, self.close_offset
)
# `DatetimeIndex`s of nonstandard opens/closes
_special_opens = self._calculate_special_opens(start, end)
_special_closes = self._calculate_special_closes(start, end)
# Overwrite the special opens and closes on top of the standard ones.
_overwrite_special_dates(_all_days, self._opens, _special_opens)
_overwrite_special_dates(_all_days, self._closes, _special_closes)
# In pandas 0.16.1 _opens and _closes will lose their timezone
# information. This looks like it has been resolved in 0.17.1.
# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa
self.schedule = DataFrame(
index=_all_days,
columns=['market_open', 'market_close'],
data={
'market_open': self._opens,
'market_close': self._closes,
},
dtype='datetime64[ns]',
)
self.market_opens_nanos = self.schedule.market_open.values.\
astype(np.int64)
self.market_closes_nanos = self.schedule.market_close.values.\
astype(np.int64)
self._trading_minutes_nanos = self.all_minutes.values.\
astype(np.int64)
self.first_trading_session = _all_days[0]
self.last_trading_session = _all_days[-1]
self._early_closes = pd.DatetimeIndex(
_special_closes.map(self.minute_to_session_label)
)
@lazyval
def day(self):
return CustomBusinessDay(
holidays=self.adhoc_holidays,
calendar=self.regular_holidays,
)
@abstractproperty
def name(self):
raise NotImplementedError()
@abstractproperty
def tz(self):
raise NotImplementedError()
@abstractproperty
def open_time(self):
raise NotImplementedError()
@abstractproperty
def close_time(self):
raise NotImplementedError()
@property
def open_offset(self):
return 0
@property
def close_offset(self):
return 0
@property
def regular_holidays(self):
"""
Returns
-------
pd.AbstractHolidayCalendar: a calendar containing the regular holidays
for this calendar
"""
return None
@property
def adhoc_holidays(self):
return []
@property
def special_opens(self):
"""
A list of special open times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_opens_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
@property
def special_closes(self):
"""
A list of special close times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_closes_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
# -----
def opens(self):
return self.schedule.market_open
@property
def closes(self):
return self.schedule.market_close
@property
def early_closes(self):
return self._early_closes
def is_session(self, dt):
"""
Given a dt, returns whether it's a valid session label.
Parameters
----------
dt: pd.Timestamp
The dt that is being tested.
Returns
-------
bool
Whether the given dt is a valid session label.
"""
return dt in self.schedule.index
def is_open_on_minute(self, dt):
"""
Given a dt, return whether this exchange is open at the given dt.
Parameters
----------
dt: pd.Timestamp
The dt for which to check if this exchange is open.
Returns
-------
bool
Whether the exchange is open on this dt.
"""
return is_open(self.market_opens_nanos, self.market_closes_nanos,
dt.value)
def next_open(self, dt):
"""
Given a dt, returns the next open.
If the given dt happens to be a session open, the next session's open
will be returned.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next open.
Returns
-------
pd.Timestamp
The UTC timestamp of the next open.
"""
idx = next_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz='UTC')
def next_close(self, dt):
"""
Given a dt, returns the next close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next close.
Returns
-------
pd.Timestamp
The UTC timestamp of the next close.
"""
idx = next_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz='UTC')
def previous_open(self, dt):
"""
Given a dt, returns the previous open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous open.
Returns
-------
pd.Timestamp
The UTC imestamp of the previous open.
"""
idx = previous_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz='UTC')
def previous_close(self, dt):
"""
Given a dt, returns the previous close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous close.
Returns
-------
pd.Timestamp
The UTC timestamp of the previous close.
"""
idx = previous_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz='UTC')
def next_minute(self, dt):
"""
Given a dt, return the next exchange minute. If the given dt is not
an exchange minute, returns the next exchange open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next exchange minute.
Returns
-------
pd.Timestamp
The next exchange minute.
"""
idx = next_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx]
def previous_minute(self, dt):
"""
Given a dt, return the previous exchange minute.
Raises KeyError if the given timestamp is not an exchange minute.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous exchange minute.
Returns
-------
pd.Timestamp
The previous exchange minute.
"""
idx = previous_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx]
def next_session_label(self, session_label):
"""
Given a session label, returns the label of the next session.
Parameters
----------
session_label: pd.Timestamp
A session whose next session is desired.
Returns
-------
pd.Timestamp
The next session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the last session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
try:
return self.schedule.index[idx + 1]
except IndexError:
if idx == len(self.schedule.index) - 1:
raise ValueError("There is no next session as this is the end"
" of the exchange calendar.")
else:
raise
def previous_session_label(self, session_label):
"""
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError("There is no previous session as this is the"
" beginning of the exchange calendar.")
return self.schedule.index[idx - 1]
def minutes_for_session(self, session_label):
"""
Given a session label, return the minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the minutes for the given session.
"""
data = self.schedule.loc[session_label]
return self.all_minutes[
self.all_minutes.slice_indexer(
data.market_open,
data.market_close
)
]
def minutes_window(self, start_dt, count):
try:
start_idx = self.all_minutes.get_loc(start_dt)
except KeyError:
# if this is not a market minute, go to the previous session's
# close
previous_session = self.minute_to_session_label(
start_dt, direction="previous"
)
previous_close = self.open_and_close_for_session(
previous_session
)[1]
start_idx = self.all_minutes.get_loc(previous_close)
end_idx = start_idx + count
if start_idx > end_idx:
return self.all_minutes[(end_idx + 1):(start_idx + 1)]
else:
return self.all_minutes[start_idx:end_idx]
def sessions_in_range(self, start_session_label, end_session_label):
"""
Given start and end session labels, return all the sessions in that
range, inclusive.
Parameters
----------
start_session_label: pd.Timestamp (midnight UTC)
The label representing the first session of the desired range.
end_session_label: pd.Timestamp (midnight UTC)
The label representing the last session of the desired range.
Returns
-------
pd.DatetimeIndex
The desired sessions.
"""
return self.all_sessions[
self.all_sessions.slice_indexer(
start_session_label,
end_session_label
)
]
def sessions_window(self, session_label, count):
"""
Given a session label and a window size, returns a list of sessions
of size `count` + 1, that either starts with the given session
(if `count` is positive) or ends with the given session (if `count` is
negative).
Parameters
----------
session_label: pd.Timestamp
The label of the initial session.
count: int
Defines the length and the direction of the window.
Returns
-------
pd.DatetimeIndex
The desired sessions.
"""
start_idx = self.schedule.index.get_loc(session_label)
end_idx = start_idx + count
return self.all_sessions[
min(start_idx, end_idx):max(start_idx, end_idx) + 1
]
def session_distance(self, start_session_label, end_session_label):
"""
Given a start and end session label, returns the distance between
them. For example, for three consecutive sessions Mon., Tues., and
Wed, `session_distance(Mon, Wed)` would return 2.
Parameters
----------
start_session_label: pd.Timestamp
The label of the start session.
end_session_label: pd.Timestamp
The label of the ending session.
Returns
-------
int
The distance between the two sessions.
"""
start_idx = self.all_sessions.searchsorted(
self.minute_to_session_label(start_session_label)
)
end_idx = self.all_sessions.searchsorted(
self.minute_to_session_label(end_session_label)
)
return abs(end_idx - start_idx)
def minutes_in_range(self, start_minute, end_minute):
"""
Given start and end minutes, return all the calendar minutes
in that range, inclusive.
Given minutes don't need to be calendar minutes.
Parameters
----------
start_minute: pd.Timestamp
The minute representing the start of the desired range.
end_minute: pd.Timestamp
The minute representing the end of the desired range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
start_idx = searchsorted(self._trading_minutes_nanos,
start_minute.value)
end_idx = searchsorted(self._trading_minutes_nanos,
end_minute.value)
if end_minute.value == self._trading_minutes_nanos[end_idx]:
# if the end minute is a market minute, increase by 1
end_idx += 1
return self.all_minutes[start_idx:end_idx]
def minutes_for_sessions_in_range(self, start_session_label,
end_session_label):
"""
Returns all the minutes for all the sessions from the given start
session label to the given end session label, inclusive.
Parameters
----------
start_session_label: pd.Timestamp
The label of the first session in the range.
end_session_label: pd.Timestamp
The label of the last session in the range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
first_minute, _ = self.open_and_close_for_session(start_session_label)
_, last_minute = self.open_and_close_for_session(end_session_label)
return self.minutes_in_range(first_minute, last_minute)
def open_and_close_for_session(self, session_label):
"""
Returns a tuple of timestamps of the open and close of the session
represented by the given label.
Parameters
----------
session_label: pd.Timestamp
The session whose open and close are desired.
Returns
-------
(Timestamp, Timestamp)
The open and close for the given session.
"""
o_and_c = self.schedule.loc[session_label]
# `market_open` and `market_close` should be timezone aware, but pandas
# 0.16.1 does not appear to support this:
# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa
return (o_and_c['market_open'].tz_localize('UTC'),
o_and_c['market_close'].tz_localize('UTC'))
@property
def all_sessions(self):
return self.schedule.index
@property
def first_session(self):
return self.all_sessions[0]
@property
def last_session(self):
return self.all_sessions[-1]
@property
@remember_last
def all_minutes(self):
"""
Returns a DatetimeIndex representing all the minutes in this calendar.
"""
opens_in_ns = \
self._opens.values.astype('datetime64[ns]')
closes_in_ns = \
self._closes.values.astype('datetime64[ns]')
deltas = closes_in_ns - opens_in_ns
# + 1 because we want 390 days per standard day, not 389
daily_sizes = (deltas / NANOS_IN_MINUTE) + 1
num_minutes = np.sum(daily_sizes).astype(np.int64)
# One allocation for the entire thing. This assumes that each day
# represents a contiguous block of minutes.
all_minutes = np.empty(num_minutes, dtype='datetime64[ns]')
idx = 0
for day_idx, size in enumerate(daily_sizes):
# lots of small allocations, but it's fast enough for now.
# size is a np.timedelta64, so we need to int it
size_int = int(size)
all_minutes[idx:(idx + size_int)] = \
np.arange(
opens_in_ns[day_idx],
closes_in_ns[day_idx] + NANOS_IN_MINUTE,
NANOS_IN_MINUTE
)
idx += size_int
return DatetimeIndex(all_minutes).tz_localize("UTC")
def minute_to_session_label(self, dt, direction="next"):
"""
Given a minute, get the label of its containing session.
Parameters
----------
dt : pd.Timestamp
The dt for which to get the containing session.
direction: str
"next" (default) means that if the given dt is not part of a
session, return the label of the next session.
"previous" means that if the given dt is not part of a session,
return the label of the previous session.
"none" means that a KeyError will be raised if the given
dt is not part of a session.
Returns
-------
pd.Timestamp (midnight UTC)
The label of the containing session.
"""
idx = searchsorted(self.market_closes_nanos, dt.value)
current_or_next_session = self.schedule.index[idx]
if direction == "previous":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt.value):
# if the exchange is closed, use the previous session
return self.schedule.index[idx - 1]
elif direction == "none":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt.value):
# if the exchange is closed, blow up
raise ValueError("The given dt is not an exchange minute!")
elif direction != "next":
# invalid direction
raise ValueError("Invalid direction parameter: "
"{0}".format(direction))
return current_or_next_session
def _special_dates(self, calendars, ad_hoc_dates, start_date, end_date):
"""
Union an iterable of pairs of the form (time, calendar)
and an iterable of pairs of the form (time, [dates])
(This is shared logic for computing special opens and special closes.)
"""
_dates = DatetimeIndex([], tz='UTC').union_many(
[
holidays_at_time(calendar, start_date, end_date, time_,
self.tz)
for time_, calendar in calendars
] + [
days_at_time(datetimes, time_, self.tz)
for time_, datetimes in ad_hoc_dates
]
)
return _dates[(_dates >= start_date) & (_dates <= end_date)]
def _calculate_special_opens(self, start, end):
return self._special_dates(
self.special_opens,
self.special_opens_adhoc,
start,
end,
)
def _calculate_special_closes(self, start, end):
return self._special_dates(
self.special_closes,
self.special_closes_adhoc,
start,
end,
)
def days_at_time(days, t, tz, day_offset=0):
"""
Shift an index of days to time t, interpreted in tz.
Overwrites any existing tz info on the input.
Parameters
----------
days : DatetimeIndex
The "base" time which we want to change.
t : datetime.time
The time we want to offset @days by
tz : pytz.timezone
The timezone which these times represent
day_offset : int
The number of days we want to offset @days by
"""
if len(days) == 0:
return days
# Offset days without tz to avoid timezone issues.
days = | DatetimeIndex(days) | pandas.DatetimeIndex |
# Finds and scores all framework mutations from input antibody file (csv format). Outputs normalized FR scores.
# Verbose mode prints full pairwise alignment of each antibody.
# output_mutations option creates a csv with all antibody scores
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
def get_position(pos, germ):
""" Place gaps for IMGT numbering scheme """
try:
return positions[positions[germ] == pos].index[0]
except:
return 0
dict = {
"1-2": "QVQLVQSGAEVKKPGASVKVSCKASGYTFTGYYMHWVRQAPGQGLEWMGRINPNSGGTNYAQKFQGRVTSTRDTSISTAYMELSRLRSDDTVVYYCAR",
"1-3": "QVQLVQSGAEVKKPGASVKVSCKASGYTFTSYAMHWVRQAPGQRLEWMGWINAGNGNTKYSQKFQGRVTITRDTSASTAYMELSSLRSEDTAVYYCAR",
"1-24": "QVQLVQSGAEVKKPGASVKVSCKVSGYTLTELSMHWVRQAPGKGLEWMGGFDPEDGETIYAQKFQGRVTMTEDTSTDTAYMELSSLRSEDTAVYYCAT",
"1-46": "QVQLVQSGAEVKKPGASVKVSCKASGYTFTSYYMHWVRQAPGQGLEWMGIINPSGGSTSYAQKFQGRVTMTRDTSTSTVYMELSSLRSEDTAVYYCAR",
"1-69": "QVQLVQSGAEVKKPGSSVKVSCKASGGTFSSYAISWVRQAPGQGLEWMGGIIPIFGTANYAQKFQGRVTITADESTSTAYMELSSLRSEDTAVYYCAR",
"2-5": "QITLKESGPTLVKPTQTLTLTCTFSGFSLSTSGVGVGWIRQPPGKALEWLALIYWNDDKRYSPSLKSRLTITKDTSKNQVVLTMTNMDPVDTATYYCAHR",
"3-7": "EVQLVESGGGLVQPGGSLRLSCAASGFTFSSYWMSWVRQAPGKGLEWVANIKQDGSEKYYVDSVKGRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR",
"3-9": "EVQLVESGGGLVQPGRSLRLSCAASGFTFDDYAMHWVRQAPGKGLEWVSGISWNSGSIGYADSVKGRFTISRDNAKNSLYLQMNSLRAEDTALYYCAKD",
"3-20": "EVQLVESGGGVVRPGGSLRLSCAASGFTFDDYGMSWVRQAPGKGLEWVSGINWNGGSTGYADSVKGRFTISRDNAKNSLYLQMNSLRAEDTALYHCAR",
"3-21": "EVQLVESGGGLVKPGGSLRLSCAASGFTFSSYSMNWVRQAPGKGLEWVSSISSSSSYIYYADSVKGRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR",
"3-23": "EVQLLESGGGLVQPGGSLRLSCAASGFTFSSYAMSWVRQAPGKGLEWVSAISGSGGSTYYADSVKGRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK",
"3-30": "QVQLVESGGGVVQPGRSLRLSCAASGFTFSSYAMHWVRQAPGKGLEWVAVISYDGSNKYYADSVKGRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR",
"3-33": "QVQLVESGGGVVQPGRSLRLSCAASGFTFSSYGMHWVRQAPGKGLEWVAVIWYDGSNKYYADSVKGRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR",
"3-48": "EVQLVESGGGLVQPGGSLRLSCAASGFTFSSYSMNWVRQAPGKGLEWVSYISSSSSTIYYADSVKGRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR",
"3-66": "EVQLVESGGGLVQPGGSLRLSCAASGFTVSSNYMSWVRQAPGKGLEWVSVIYSGGSTYYADSVKGRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR",
"3-74": "EVQLVESGGGLVQPGGSLRLSCAASGFTFSSYWMHWVRQAPGKGLVWVSRINSDGSSTSYADSVKGRFTISRDNAKNTLYLQMNSLRAEDTAVYYCAR",
"4-4": "QVQLQESGPGLVKPPGTLSLTCAVSGGSISSSNWWSWVRQPPGKGLEWIGEIYHSGSTNYNPSLKSRVTISVDKSKNQFSLKLSSVTAADTAVYCCAR",
"4-30-4": "QVQLQESGPGLVKPSQTLSLTCTVSGGSISSGDYYWSWIRQPPGKGLEWIGYIYYSGSTYYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-31": "QVQLQESGPGLVKPSQTLSLTCTVSGGSISSGGYYWSWIRQHPGKGLEWIGYIYYSGSTYYNPSLKSLVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-34": "QVQLQQWGAGLLKPSETLSLTCAVYGGSFSGYYWSWIRQPPGKGLEWIGEINHSGSTNYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-39": "QLQLQESGPGLVKPSETLSLTCTVSGGSISSSSYYWGWIRQPPGKGLEWIGSIYYSGSTYYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-59": "QVQLQESGPGLVKPSETLSLTCTVSGGSISSYYWSWIRQPPGKGLEWIGYIYYSGSTNYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-61": "QVQLQESGPGLVKPSETLSLTCTVSGGSVSSGSYYWSWIRQPPGKGLEWIGYIYYSGSTNYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"5-51": "EVQLVQSGAEVKKPGESLKISCKGSGYSFTSYWIGWVRQMPGKGLEWMGIIYPGDSDTRYSPSFQGQVTISADKSISTAYLQWSSLKASDTAMYYCAR",
"6-1": "QVQLQQSGPGLVKPSQTLSLTCAISGDSVSSNSAAWNWIRQSPSRGLEWLGRTYYRSKWYNDYAVSVKSRITINPDTSKNQFSLQLNSVTPEDTAVYYCAR"
}
verbose = False #True: output alignment and indinvidual scores, False: plot results
output_csv = True #output csv file of calculated scores
output_mutations = True #output csv file with all antibody scores
#input files
ab_filename = "FDA_Abs.csv" #input antibody file (use "Flagged" in name for phase identification)
norm_filename = "normalization.csv" #input normalization constants
numbering = "IMGT_num.csv" #index to IMGT numbering scheme
#read input files
Abs = | pd.read_csv(ab_filename) | pandas.read_csv |
import pandas as pd
def trades_to_candles(trades_data, price_column="price", timestamp_column="created_at", amount_column="amount",
time_interval="1min"):
"""
This function takes the trades data frame and gets candles data.
:param pd.DataFrame trades_data: Trades data frame.
:param str price_column: Price column.
:param str timestamp_column: Timestamp column.
:param str time_interval: Time interval. Must be one of https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
:param str amount_column: Amount column to calculate the trades volume.
:return: pd.DataFrame with candles data.
"""
# Input validation
if not isinstance(trades_data, pd.DataFrame):
raise ValueError(f"The parameter trades_data must be a data frame. Got {type(trades_data)} instead.")
elif not isinstance(price_column, str):
raise ValueError(f"The parameter price_column must be a string. Got {type(price_column)} instead.")
elif not isinstance(timestamp_column, str):
raise ValueError(f"The parameter timestamp_column must be a string. Got {type(timestamp_column)} instead.")
elif not isinstance(time_interval, str):
raise ValueError(f"The parameter time_interval must be a string. Got {type(time_interval)} instead.")
elif not isinstance(amount_column, str):
raise ValueError(f"The parameter amount_column must be a string. Got {type(amount_column)} instead.")
cols = list(trades_data.columns)
if price_column not in cols:
raise ValueError(f"The parameter price_column must be one of the columns of the trades_data data frame.")
elif timestamp_column not in cols:
raise ValueError(f"The parameter price_column must be one of the columns of the trades_data data frame.")
elif amount_column not in cols:
raise ValueError(f"The parameter price_column must be one of the columns of the trades_data data frame.")
# Cast timestamp column as datetime
trades_data[timestamp_column] = pd.to_datetime(trades_data[timestamp_column])
# Group by time_interval and get candles values
candles = trades_data.groupby(pd.Grouper(key=timestamp_column, freq=time_interval)).agg(
open=pd.NamedAgg(column=price_column, aggfunc="first"),
close= | pd.NamedAgg(column=price_column, aggfunc="last") | pandas.NamedAgg |
"""Select repositories"""
import pandas as pd
from sklearn.model_selection import train_test_split
def filter_columns(data,column_names):
"""Keep selected columns in the dataframe."""
selected_columns = data[column_names]
return selected_columns
def filter_rows_by_value(data, min):
"""Select rows where each value is greater than the given min threshhold."""
rows = data[data > min].dropna().astype(int)
return rows
def groupby_dummy_transformer(data, dummy_val, groupby_val):
"""Groupby dummies based on given columns."""
repo = data[[dummy_val,groupby_val]]
grouped = | pd.get_dummies(repo, columns=[dummy_val],dtype=int, prefix='', prefix_sep='') | pandas.get_dummies |
"""Class to process full HydReSGeo dataset.
Note: If IRUtils.py is not available, you need to download it before the
installation of the package into the `hprocessing/` folder:
.. code:: bash
wget -P hprocessing/ https://raw.githubusercontent.com/felixriese/thermal
-image-processing/master/tiprocessing/IRUtils.py
"""
import configparser
import glob
import itertools
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from .ProcessEnviFile import (ProcessEnviFile, getEnviFile, getEnviHeader,
readEnviHeader)
from .IRUtils import getIRDataFromMultipleZones
class ProcessFullDataset():
"""
Class to process the full HydReSGeo dataset.
Parameters
----------
envi_hdr_filepath : str
Path to envi header file (low resolution)
meas_name : str
Name of measurement
positions_hyp : dict
Dictionary with information of the positions config file for the
hyperspectral camera
positions_lwir : dict
Dictionary with information of the positions config file for the
lwir camera
zone_list : list
List of measurement zones in the image. That does not include the
spectralon (white reference). If a zone needs to be ignored, it needs
to be removed from this list.
lwir_path : str
Path to long-wave infrared (LWIR) data
soilmoisture_filepath : str
Path to soil moisture data
masks : pd.DataFrame or None
Masks for hyperspectral images
soilmode : str
Mode of the soil measurements (e.g. KW33, Lysimeter)
imageshape : tuple, optional (default= (50, 50))
Height and width of the image
time_window_width : int, optional (default=6)
Time window width to match the hyperspectral image to the soil moisture
data. The unit of the time window width is minutes.
hyp_stat_mode : str
Mode for calculating the "mean spectrum" of a hyperspectral image.
Possible values: median, mean, max, max10 (= maximum of the top 10
pixels), std.
hyp_spectralon_factor : float, optional (default=0.95)
Factor of how much solar radiation the spectralon reflects.
verbose : int, optional (default=0)
Controls the verbosity.
Todo
-----
- Add attributes to class docstring.
- Remove self.date and self.time, only use self.datetime. Remove all
unnecessary functions of self.date and self.time.
"""
def __init__(self,
hyp_hdr_path: str,
meas_name: str,
positions_hyp: dict,
positions_lwir: dict,
zone_list: list,
lwir_path: str,
soilmoisture_path: str,
masks: pd.DataFrame,
grid: tuple = (1, 1),
imageshape: tuple = (50, 50),
time_window_width: int = 6,
hyp_stat_mode: str = "median",
hyp_spectralon_factor: float = 0.95,
verbose=0):
"""Initialize ProcessDataset instance."""
self.hyp_hdr_path = hyp_hdr_path
self.meas_name = meas_name
self.positions_hyp = positions_hyp
self.positions_lwir = positions_lwir
self.zone_list = zone_list
self.lwir_path = lwir_path
self.soilmoisture_path = soilmoisture_path
self.masks = masks
self.grid = grid
self.imageshape = imageshape
self.time_window_width = time_window_width
self.hyp_stat_mode = hyp_stat_mode
self.hyp_spectralon_factor = hyp_spectralon_factor
self.verbose = verbose
# get Envi files
self.envi_hdr_highres_path = self.hyp_hdr_path[:-4] + "_highres.hdr"
self.hdr, self.envi_img = getEnviFile(self.hyp_hdr_path)
self.hdr_highres = getEnviHeader(self.envi_hdr_highres_path)
self.date, self.time = readEnviHeader(self.hdr_highres)
# set datetime TODO: remove hard-coded timezone
self.datetime = pd.to_datetime(self.date+" "+self.time+"+02:00",
utc=True)
# read out header file
self.wavelengths = self.hdr_highres["Wavelength"]
self.bbl = self.hdr_highres["bbl"]
# get measurement index
self.index_of_meas = int(np.argwhere(
positions_hyp["measurement"].values == meas_name))
self.mask = None
# improvised solution to translate between zone1-8 to A1-D2
self.zone_dict = {
"A1": "zone1", "A2": "zone2", "B1": "zone3", "B2": "zone4",
"C1": "zone5", "C2": "zone6", "D1": "zone7", "D2": "zone8"}
def process(self) -> pd.DataFrame:
"""
Process a full dataset.
Returns
-------
pd.DataFrame
Dataframe with hyperspectral, LWIR, and soil moisture data for
one image.
"""
# set mask
if self.masks is not None:
mask_index = self.masks.index[
self.masks["measurement"] == self.meas_name].tolist()[0]
if self.index_of_meas != mask_index:
raise IOError(("positions.csv and mask.csv don't have the"
"same sequence of dates."))
self.mask = getMask(
masks=self.masks,
index_of_meas=self.index_of_meas,
imageshape=self.imageshape)
# random check if hyperspectral image is empty
if np.sum(self.envi_img[:, :, 5]) == 0:
if self.verbose:
print("Error: The hyperspectral image is empty.")
return None
# process
envi_processor = ProcessEnviFile(
image=self.envi_img,
wavelengths=self.wavelengths,
bbl=self.bbl,
zone_list=self.zone_list,
positions=self.positions_hyp,
index_of_meas=self.index_of_meas,
mask=self.mask,
grid=self.grid,
stat_mode=self.hyp_stat_mode,
spectralon_factor=self.hyp_spectralon_factor)
df_hyp = envi_processor.getMultipleSpectra()
# add datetime as column
df_hyp["datetime"] = self.datetime
# add soil moisture data
df_hyd = self.getSoilMoistureData()
df_hyd = df_hyd.drop(labels=["zone"], axis=1)
# add IR data
df_lwir = self.getLwirData()
df_lwir = df_lwir.drop(labels=["zone"], axis=1)
return pd.concat([df_hyp, df_hyd, df_lwir], axis=1)
def getSoilMoistureData(self):
"""
Get soil moisture data.
To match the dates of the soil moisture measurements and the
hyperspectral image, the timezones are converted to UTC.
Returns
-------
pd.Dataframe
Dataframe of soil moisture measurements which correspond to the
hyperspectral image of this instance.
Todo
----
- Move the CSV file read out into process-function outside this file
- Add an optional time shift correction between soil moisture data and
the hyperspectral data.
"""
soilmoisture_sensors = getUppermostSoilMoistureSensors()
# read out soil moisture data
df_sm = pd.read_csv(self.soilmoisture_path)
df_sm["timestamp"] = pd.to_datetime(df_sm["timestamp"], utc=True)
sm_dict = {"zone": [], "volSM_vol%": [], "T_C": []}
for i, sensor in enumerate(soilmoisture_sensors["number"]):
# only consider sensors in zone_list
zone = soilmoisture_sensors["zone"].iloc[i]
if self.zone_dict[zone] not in self.zone_list:
continue
# find nearest date
nearest_date, time_delta = findNearestDate(
df_sm[df_sm["sensorID"] == "T"+str(sensor)].timestamp,
self.datetime)
if time_delta > self.time_window_width / 2:
if self.verbose:
print("Warning: Could not find a soil moisture measurement"
"for sensor {0}".format(sensor))
continue
nearest_row = df_sm[(df_sm["sensorID"] == "T"+str(sensor)) &
(df_sm["timestamp"] == nearest_date)]
sm_dict["zone"].append(self.zone_dict[zone])
sm_dict["volSM_vol%"].append(nearest_row["volSM_vol%"].values[0])
sm_dict["T_C"].append(nearest_row["T_C"].values[0])
return | pd.DataFrame(sm_dict) | pandas.DataFrame |
import h5py
import numpy as np
import pandas as pd
import random
from tqdm import tqdm
from sklearn.preprocessing import Imputer, Normalizer
def load_data(data_path, split=None, label=None, protein_name_list=None, sample_size=None, features_list=None, mode=None):
input_fo = h5py.File(data_path, 'r')
if split is not None:
if split not in ["train", "test"]:
print ("invalid split option")
return None
else:
pass
data_frame = pd.DataFrame()
if protein_name_list is None:
protein_name_list = list(input_fo[split].keys())
print("loading", len(protein_name_list), "proteins.")
if split is not None:
for i, protein_name in enumerate(tqdm(protein_name_list)):
protein_df = load_split_protein(data_path, label=label, protein_name=protein_name, split=split,
sample_size=sample_size,
features_list=features_list, mode=mode)
data_frame = pd.concat([data_frame, protein_df])
else:
for i, protein_name in enumerate(tqdm(protein_name_list)):
protein_df = load_protein(data_path, label=label, protein_name=protein_name, sample_size=sample_size,
features_list=features_list, mode=mode)
data_frame = pd.concat([data_frame, protein_df])
return data_frame
def load_protein(data_path, label=None, protein_name=None, sample_size=None, features_list=None, mode=None):
input_fo = h5py.File(data_path, 'r')
if label is None:
label = "label"
# if features_list is none then use all of the features
if features_list is None:
features_list = list(input_fo[str(protein_name)].keys())
else:
if "receptor" not in features_list:
features_list.append("receptor")
if "drugID" not in features_list:
features_list.append("drugID")
if "label" not in features_list:
features_list.append("label")
data_frame = pd.DataFrame()
for idx, feature in enumerate(features_list):
data_frame[feature] = np.ravel(input_fo[str(protein_name)+"/"+str(feature)])
return data_frame
def load_split_protein(data_path, split=None, label=None, protein_name=None, sample_size=None, features_list=None, mode=None):
if split is not None:
if split not in ["train", "test"]:
print ("caught exception")
return None
else:
pass
if split is None:
print("must supply a split")
return None
input_fo = h5py.File(data_path, 'r')
if features_list is None:
features_list = list(input_fo[split][str(protein_name)].keys())
else:
if "receptor" not in features_list:
features_list.append("receptor")
if "drugID" not in features_list:
features_list.append("drugID")
if "label" not in features_list:
features_list.append("label")
data_frame = pd.DataFrame()
for idx, feature in enumerate(features_list):
data_frame[feature] = np.ravel(input_fo[str(split)+"/"+str(protein_name)+"/"+str(feature)])
return data_frame
def generate_held_out_set(data_path,protein_name_list=None, features_list=None,mode=None,sample_size=None):
input_fo = h5py.File(data_path)
protein_list = list(input_fo.keys())
holdout_protein = protein_list[0]
proteins_to_load = protein_list
for protein in protein_list:
protein_list.remove(holdout_protein)
print("holdout_protein : ",holdout_protein)
X_train,y_train = load_data(data_path=data_path,protein_name_list=protein_list,features_list=features_list,mode=mode,sample_size=sample_size)
X_test,y_test = load_protein(data_path=data_path,protein_name=holdout_protein,features_list=features_list,mode=mode,sample_size=sample_size)
proteins_to_load.append(holdout_protein)
holdout_protein = proteins_to_load[0]
# should not do this everytime, makes things slow
X_train = Normalizer().fit_transform(Imputer().fit_transform(X_train))
X_test = Normalizer().fit_transform(Imputer().fit_transform(X_test))
yield X_train,y_train.flatten(),X_test,y_test.flatten()
def compute_intersection(small_list, big_list):
intersect = set(big_list).intersection(set(small_list))
return intersect
def compute_proportion(small_list, big_list):
ct = len(compute_intersection(small_list,big_list))
return ct/len(small_list)
def read_feature_list(feature_path):
with open(feature_path, "r") as input_file:
feature_list = []
for line in input_file:
line = line.strip('\n')
feature_list.append(line)
return feature_list
def output_feature_summary(output_path, full_feature_path, subset_feature_path, feature_list_root_path="/u/vul-d1/scratch/wdjo224/data/"):
output_file = open(output_path, mode='w+')
print("feature file: "+str(subset_feature_path),file=output_file)
feature_subset = pd.read_csv(subset_feature_path, header=None)
# load the full feature set
full_features = | pd.read_csv(full_feature_path, header=None) | pandas.read_csv |
import json
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.exceptions import PipelineScoreError
from evalml.model_understanding.prediction_explanations.explainers import (
abs_error,
cross_entropy,
explain_prediction,
explain_predictions,
explain_predictions_best_worst
)
from evalml.problem_types import ProblemTypes
def compare_two_tables(table_1, table_2):
assert len(table_1) == len(table_2)
for row, row_answer in zip(table_1, table_2):
assert row.strip().split() == row_answer.strip().split()
test_features = [[1], np.ones((15, 1)), pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}).iloc[0],
pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}), pd.DataFrame()]
@pytest.mark.parametrize("test_features", test_features)
def test_explain_prediction_value_error(test_features):
with pytest.raises(ValueError, match="features must be stored in a dataframe or datatable with exactly one row."):
explain_prediction(None, input_features=test_features, training_data=None)
explain_prediction_answer = """Feature Name Feature Value Contribution to Prediction
=========================================================
d 40.00 +++++
b 20.00 -----""".splitlines()
explain_prediction_regression_dict_answer = {
"explanations": [{
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": None
}]
}
explain_predictions_regression_df_answer = pd.DataFrame({'feature_names': ['d', 'b'],
'feature_values': [40, 20],
'qualitative_explanation': ['+++++', '-----'],
"quantitative_explanation": [None, None]})
explain_prediction_binary_dict_answer = {
"explanations": [{
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": "class_1"
}]
}
explain_prediction_binary_df_answer = pd.DataFrame({
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": ["class_1", "class_1"]
})
explain_prediction_multiclass_answer = """Class: class_0
Feature Name Feature Value Contribution to Prediction
=========================================================
a 10.00 +++++
c 30.00 ---
Class: class_1
Feature Name Feature Value Contribution to Prediction
=========================================================
a 10.00 +++
b 20.00 ++
Class: class_2
Feature Name Feature Value Contribution to Prediction
=========================================================
c 30.00 ---
d 40.00 ---
""".splitlines()
explain_prediction_multiclass_dict_answer = {
"explanations": [
{"feature_names": ["a", "c"],
"feature_values": [10, 30],
"qualitative_explanation": ["+++++", "---"],
"quantitative_explanation": [None, None],
"class_name": "class_0"},
{"feature_names": ["a", "b"],
"feature_values": [10, 20],
"qualitative_explanation": ["+++", "++"],
"quantitative_explanation": [None, None],
"class_name": "class_1"},
{"feature_names": ["c", "d"],
"feature_values": [30, 40],
"qualitative_explanation": ["---", "---"],
"quantitative_explanation": [None, None],
"class_name": "class_2"},
]
}
explain_prediction_multiclass_df_answer = pd.DataFrame({
"feature_names": ["a", "c", "a", "b", "c", "d"],
"feature_values": [10, 30, 10, 20, 30, 40],
"qualitative_explanation": ["+++++", "---", "+++", "++", "---", "---"],
"quantitative_explanation": [None, None, None, None, None, None],
"class_name": ['class_0', 'class_0', 'class_1', 'class_1', 'class_2', 'class_2']
})
@pytest.mark.parametrize("problem_type, output_format, shap_values, normalized_shap_values, answer",
[(ProblemTypes.REGRESSION,
"text",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_prediction_answer),
(ProblemTypes.REGRESSION,
"dict",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_prediction_regression_dict_answer
),
(ProblemTypes.REGRESSION,
"dataframe",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_predictions_regression_df_answer
),
(ProblemTypes.BINARY,
"text",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_answer),
(ProblemTypes.BINARY,
"dict",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_binary_dict_answer),
(ProblemTypes.BINARY,
"dataframe",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_binary_df_answer),
(ProblemTypes.MULTICLASS,
"text",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_answer),
(ProblemTypes.MULTICLASS,
"dict",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_dict_answer),
(ProblemTypes.MULTICLASS,
"dataframe",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_df_answer)
])
@pytest.mark.parametrize("input_type", ["pd", "ww"])
@patch("evalml.model_understanding.prediction_explanations._user_interface._compute_shap_values")
@patch("evalml.model_understanding.prediction_explanations._user_interface._normalize_shap_values")
def test_explain_prediction(mock_normalize_shap_values,
mock_compute_shap_values,
problem_type, output_format, shap_values, normalized_shap_values, answer,
input_type):
mock_compute_shap_values.return_value = shap_values
mock_normalize_shap_values.return_value = normalized_shap_values
pipeline = MagicMock()
pipeline.problem_type = problem_type
pipeline.classes_ = ["class_0", "class_1", "class_2"]
# By the time we call transform, we are looking at only one row of the input data.
pipeline.compute_estimator_features.return_value = ww.DataTable(pd.DataFrame({"a": [10], "b": [20], "c": [30], "d": [40]}))
features = pd.DataFrame({"a": [1], "b": [2]})
training_data = pd.DataFrame()
if input_type == "ww":
features = ww.DataTable(features)
training_data = ww.DataTable(training_data)
table = explain_prediction(pipeline, features, output_format=output_format, top_k=2, training_data=training_data)
if isinstance(table, str):
compare_two_tables(table.splitlines(), answer)
elif isinstance(table, pd.DataFrame):
pd.testing.assert_frame_equal(table, answer)
else:
assert table == answer
def test_error_metrics():
pd.testing.assert_series_equal(abs_error(pd.Series([1, 2, 3]), pd.Series([4, 1, 0])), pd.Series([3, 1, 3]))
pd.testing.assert_series_equal(cross_entropy(pd.Series([1, 0]),
pd.DataFrame({"a": [0.1, 0.2], "b": [0.9, 0.8]})),
pd.Series([-np.log(0.9), -np.log(0.2)]))
input_features_and_y_true = [([[1]], pd.Series([1]), "^Input features must be a dataframe with more than 10 rows!"),
(pd.DataFrame({"a": [1]}), pd.Series([1]), "^Input features must be a dataframe with more than 10 rows!"),
(pd.DataFrame({"a": range(15)}), pd.Series(range(12)), "^Parameters y_true and input_features must have the same number of data points.")
]
@pytest.mark.parametrize("input_features,y_true,error_message", input_features_and_y_true)
def test_explain_predictions_best_worst_value_errors(input_features, y_true, error_message):
with pytest.raises(ValueError, match=error_message):
explain_predictions_best_worst(None, input_features, y_true)
def test_explain_predictions_raises_pipeline_score_error():
with pytest.raises(PipelineScoreError, match="Division by zero!"):
def raise_zero_division(input_features):
raise ZeroDivisionError("Division by zero!")
pipeline = MagicMock()
pipeline.problem_type = ProblemTypes.BINARY
pipeline.predict_proba.side_effect = raise_zero_division
explain_predictions_best_worst(pipeline, pd.DataFrame({"a": range(15)}), pd.Series(range(15)))
def test_explain_predictions_value_errors():
with pytest.raises(ValueError, match="Parameter input_features must be a non-empty dataframe."):
explain_predictions(None, pd.DataFrame())
def test_output_format_checked():
input_features, y_true = pd.DataFrame(data=[range(15)]), pd.Series(range(15))
with pytest.raises(ValueError, match="Parameter output_format must be either text, dict, or dataframe. Received bar"):
explain_predictions(None, input_features, output_format="bar")
with pytest.raises(ValueError, match="Parameter output_format must be either text, dict, or dataframe. Received xml"):
explain_prediction(None, input_features=input_features, training_data=None, output_format="xml")
input_features, y_true = pd.DataFrame(data=range(15)), pd.Series(range(15))
with pytest.raises(ValueError, match="Parameter output_format must be either text, dict, or dataframe. Received foo"):
explain_predictions_best_worst(None, input_features, y_true=y_true, output_format="foo")
regression_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Value: 1
Target Value: 2
Absolute Difference: 1.0
Index ID: {index_0}
table goes here
Worst 1 of 1
Predicted Value: 2
Target Value: 3
Absolute Difference: 4.0
Index ID: {index_1}
table goes here
"""
regression_best_worst_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 1, "target_value": 2,
"error_name": "Absolute Difference", "error_value": 1.},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 2, "target_value": 3,
"error_name": "Absolute Difference", "error_value": 4.},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
regression_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"predicted_value": [1, 2],
"target_value": [2, 3],
"error_name": ["Absolute Difference"] * 2,
"error_value": [1., 4.],
"prefix": ["best", "worst"],
})
no_best_worst_answer = """Test Pipeline Name
Parameters go here
1 of 2
table goes here
2 of 2
table goes here
"""
no_best_worst_answer_dict = {
"explanations": [
{"explanations": ["explanation_dictionary_goes_here"]},
{"explanations": ["explanation_dictionary_goes_here"]}
]
}
no_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"prediction_number": [0, 1]
})
binary_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Probabilities: [benign: 0.05, malignant: 0.95]
Predicted Value: malignant
Target Value: malignant
Cross Entropy: 0.2
Index ID: {index_0}
table goes here
Worst 1 of 1
Predicted Probabilities: [benign: 0.1, malignant: 0.9]
Predicted Value: malignant
Target Value: benign
Cross Entropy: 0.78
Index ID: {index_1}
table goes here
"""
binary_best_worst_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": {"benign": 0.05, "malignant": 0.95},
"predicted_value": "malignant", "target_value": "malignant",
"error_name": "Cross Entropy", "error_value": 0.2},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": {"benign": 0.1, "malignant": 0.9},
"predicted_value": "malignant", "target_value": "benign",
"error_name": "Cross Entropy", "error_value": 0.78},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
binary_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"prefix": ["best", "worst"],
"label_benign_probability": [0.05, 0.1],
"label_malignant_probability": [0.95, 0.9],
"predicted_value": ["malignant", "malignant"],
"target_value": ["malignant", "benign"],
"error_name": ["Cross Entropy"] * 2,
"error_value": [0.2, 0.78]
})
multiclass_table = """Class: setosa
table goes here
Class: versicolor
table goes here
Class: virginica
table goes here"""
multiclass_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Probabilities: [setosa: 0.8, versicolor: 0.1, virginica: 0.1]
Predicted Value: setosa
Target Value: setosa
Cross Entropy: 0.15
Index ID: {{index_0}}
{multiclass_table}
Worst 1 of 1
Predicted Probabilities: [setosa: 0.2, versicolor: 0.75, virginica: 0.05]
Predicted Value: versicolor
Target Value: versicolor
Cross Entropy: 0.34
Index ID: {{index_1}}
{multiclass_table}
""".format(multiclass_table=multiclass_table)
multiclass_best_worst_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": {"setosa": 0.8, "versicolor": 0.1, "virginica": 0.1},
"predicted_value": "setosa", "target_value": "setosa",
"error_name": "Cross Entropy", "error_value": 0.15},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": {"setosa": 0.2, "versicolor": 0.75, "virginica": 0.05},
"predicted_value": "versicolor", "target_value": "versicolor",
"error_name": "Cross Entropy", "error_value": 0.34},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
multiclass_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"prefix": ["best", "worst"],
"label_setosa_probability": [0.8, 0.2],
"label_versicolor_probability": [0.1, 0.75],
"label_virginica_probability": [0.1, 0.05],
"predicted_value": ["setosa", "versicolor"],
"target_value": ["setosa", "versicolor"],
"error_name": ["Cross Entropy"] * 2,
"error_value": [0.15, 0.34]
})
multiclass_no_best_worst_answer = """Test Pipeline Name
Parameters go here
1 of 2
{multiclass_table}
2 of 2
{multiclass_table}
""".format(multiclass_table=multiclass_table)
@pytest.mark.parametrize("problem_type,output_format,answer,explain_predictions_answer,custom_index",
[(ProblemTypes.REGRESSION, "text", regression_best_worst_answer, no_best_worst_answer, [0, 1]),
(ProblemTypes.REGRESSION, "dict", regression_best_worst_answer_dict, no_best_worst_answer_dict, [0, 1]),
(ProblemTypes.REGRESSION, "dataframe", regression_best_worst_answer_df, no_best_worst_answer_df, [0, 1]),
(ProblemTypes.REGRESSION, "text", regression_best_worst_answer, no_best_worst_answer, [4, 23]),
(ProblemTypes.REGRESSION, "dict", regression_best_worst_answer_dict, no_best_worst_answer_dict, [4, 10]),
(ProblemTypes.REGRESSION, "dataframe", regression_best_worst_answer_df, no_best_worst_answer_df, [4, 10]),
(ProblemTypes.REGRESSION, "text", regression_best_worst_answer, no_best_worst_answer, ["foo", "bar"]),
(ProblemTypes.REGRESSION, "dict", regression_best_worst_answer_dict, no_best_worst_answer_dict, ["foo", "bar"]),
(ProblemTypes.REGRESSION, "dataframe", regression_best_worst_answer_df, no_best_worst_answer_df, ["foo", "bar"]),
(ProblemTypes.BINARY, "text", binary_best_worst_answer, no_best_worst_answer, [0, 1]),
(ProblemTypes.BINARY, "dict", binary_best_worst_answer_dict, no_best_worst_answer_dict, [0, 1]),
(ProblemTypes.BINARY, "dataframe", binary_best_worst_answer_df, no_best_worst_answer_df, [0, 1]),
(ProblemTypes.BINARY, "text", binary_best_worst_answer, no_best_worst_answer, [7, 11]),
(ProblemTypes.BINARY, "dict", binary_best_worst_answer_dict, no_best_worst_answer_dict, [7, 11]),
(ProblemTypes.BINARY, "dataframe", binary_best_worst_answer_df, no_best_worst_answer_df, [7, 11]),
(ProblemTypes.BINARY, "text", binary_best_worst_answer, no_best_worst_answer, ["first", "second"]),
(ProblemTypes.BINARY, "dict", binary_best_worst_answer_dict, no_best_worst_answer_dict, ["first", "second"]),
(ProblemTypes.BINARY, "dataframe", binary_best_worst_answer_df, no_best_worst_answer_df, ["first", "second"]),
(ProblemTypes.MULTICLASS, "text", multiclass_best_worst_answer, multiclass_no_best_worst_answer, [0, 1]),
(ProblemTypes.MULTICLASS, "dict", multiclass_best_worst_answer_dict, no_best_worst_answer_dict, [0, 1]),
(ProblemTypes.MULTICLASS, "dataframe", multiclass_best_worst_answer_df, no_best_worst_answer_df, [0, 1]),
(ProblemTypes.MULTICLASS, "text", multiclass_best_worst_answer, multiclass_no_best_worst_answer, [19, 103]),
(ProblemTypes.MULTICLASS, "dict", multiclass_best_worst_answer_dict, no_best_worst_answer_dict, [17, 235]),
(ProblemTypes.MULTICLASS, "dataframe", multiclass_best_worst_answer_df, no_best_worst_answer_df, [17, 235]),
(ProblemTypes.MULTICLASS, "text", multiclass_best_worst_answer, multiclass_no_best_worst_answer, ["2020-10", "2020-11"]),
(ProblemTypes.MULTICLASS, "dict", multiclass_best_worst_answer_dict, no_best_worst_answer_dict, ["2020-15", "2020-15"]),
(ProblemTypes.MULTICLASS, "dataframe", multiclass_best_worst_answer_df, no_best_worst_answer_df, ["2020-15", "2020-15"]),
])
@patch("evalml.model_understanding.prediction_explanations.explainers.DEFAULT_METRICS")
@patch("evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_shap_table")
def test_explain_predictions_best_worst_and_explain_predictions(mock_make_table, mock_default_metrics,
problem_type, output_format, answer,
explain_predictions_answer, custom_index):
if output_format == "text":
mock_make_table.return_value = "table goes here"
elif output_format == "dataframe":
shap_table = pd.DataFrame({
"feature_names": [0],
"feature_values": [0],
"qualitative_explanation": [0],
"quantitative_explanation": [0],
})
# Use side effect so that we always get a new copy of the dataframe
mock_make_table.side_effect = lambda *args, **kwargs: shap_table.copy()
else:
mock_make_table.return_value = {"explanations": ["explanation_dictionary_goes_here"]}
pipeline = MagicMock()
pipeline.parameters = "Parameters go here"
input_features = pd.DataFrame({"a": [3, 4]}, index=custom_index)
pipeline.problem_type = problem_type
pipeline.name = "Test Pipeline Name"
def _add_custom_index(answer, index_best, index_worst, output_format):
if output_format == "text":
answer = answer.format(index_0=index_best, index_1=index_worst)
elif output_format == "dataframe":
col_name = "prefix" if "prefix" in answer.columns else "rank"
n_repeats = answer[col_name].value_counts().tolist()[0]
answer['index_id'] = [index_best] * n_repeats + [index_worst] * n_repeats
else:
answer["explanations"][0]["predicted_values"]["index_id"] = index_best
answer["explanations"][1]["predicted_values"]["index_id"] = index_worst
return answer
if problem_type == ProblemTypes.REGRESSION:
abs_error_mock = MagicMock(__name__="abs_error")
abs_error_mock.return_value = pd.Series([4., 1.], dtype="float64")
mock_default_metrics.__getitem__.return_value = abs_error_mock
pipeline.predict.return_value = ww.DataColumn(pd.Series([2, 1]))
y_true = pd.Series([3, 2], index=custom_index)
answer = _add_custom_index(answer, index_best=custom_index[1],
index_worst=custom_index[0], output_format=output_format)
elif problem_type == ProblemTypes.BINARY:
pipeline.classes_.return_value = ["benign", "malignant"]
cross_entropy_mock = MagicMock(__name__="cross_entropy")
mock_default_metrics.__getitem__.return_value = cross_entropy_mock
cross_entropy_mock.return_value = pd.Series([0.2, 0.78])
pipeline.predict_proba.return_value = ww.DataTable(pd.DataFrame({"benign": [0.05, 0.1], "malignant": [0.95, 0.9]}))
pipeline.predict.return_value = ww.DataColumn(pd.Series(["malignant"] * 2))
y_true = pd.Series(["malignant", "benign"], index=custom_index)
answer = _add_custom_index(answer, index_best=custom_index[0],
index_worst=custom_index[1], output_format=output_format)
else:
# Multiclass text output is formatted slightly different so need to account for that
if output_format == "text":
mock_make_table.return_value = multiclass_table
pipeline.classes_.return_value = ["setosa", "versicolor", "virginica"]
cross_entropy_mock = MagicMock(__name__="cross_entropy")
mock_default_metrics.__getitem__.return_value = cross_entropy_mock
cross_entropy_mock.return_value = pd.Series([0.15, 0.34])
pipeline.predict_proba.return_value = ww.DataTable(pd.DataFrame({"setosa": [0.8, 0.2], "versicolor": [0.1, 0.75],
"virginica": [0.1, 0.05]}))
pipeline.predict.return_value = ww.DataColumn(pd.Series(["setosa", "versicolor"]))
y_true = pd.Series(["setosa", "versicolor"], index=custom_index)
answer = _add_custom_index(answer, index_best=custom_index[0],
index_worst=custom_index[1], output_format=output_format)
best_worst_report = explain_predictions_best_worst(pipeline, input_features, y_true=y_true,
num_to_explain=1, output_format=output_format)
if output_format == "text":
compare_two_tables(best_worst_report.splitlines(), answer.splitlines())
elif output_format == "dataframe":
# Check dataframes equal without caring about column order
assert sorted(best_worst_report.columns.tolist()) == sorted(answer.columns.tolist())
pd.testing.assert_frame_equal(best_worst_report, answer[best_worst_report.columns])
else:
assert best_worst_report == answer
report = explain_predictions(pipeline, input_features, output_format=output_format,
training_data=input_features)
if output_format == "text":
compare_two_tables(report.splitlines(), explain_predictions_answer.splitlines())
elif output_format == "dataframe":
assert report.columns.tolist() == explain_predictions_answer.columns.tolist()
pd.testing.assert_frame_equal(report, explain_predictions_answer[report.columns])
else:
assert report == explain_predictions_answer
regression_custom_metric_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Value: 1
Target Value: 2
sum: 3
Index ID: 1
table goes here
Worst 1 of 1
Predicted Value: 2
Target Value: 3
sum: 5
Index ID: 0
table goes here
"""
regression_custom_metric_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 1, "target_value": 2,
"error_name": "sum", "error_value": 3,
"index_id": 1},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 2, "target_value": 3,
"error_name": "sum", "error_value": 5,
"index_id": 0},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
@pytest.mark.parametrize("output_format,answer",
[("text", regression_custom_metric_answer),
("dict", regression_custom_metric_answer_dict)])
@patch("evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_shap_table")
def test_explain_predictions_best_worst_custom_metric(mock_make_table, output_format, answer):
mock_make_table.return_value = "table goes here" if output_format == "text" else {"explanations": ["explanation_dictionary_goes_here"]}
pipeline = MagicMock()
pipeline.parameters = "Parameters go here"
input_features = pd.DataFrame({"a": [5, 6]})
pipeline.problem_type = ProblemTypes.REGRESSION
pipeline.name = "Test Pipeline Name"
pipeline.predict.return_value = ww.DataColumn( | pd.Series([2, 1]) | pandas.Series |
"""
This module implements dynamic visualizations for EOPatch
Credits:
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2022 <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import dataclasses
import datetime as dt
from typing import Optional, List, cast
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame
from shapely.geometry import Polygon
try:
import xarray as xr
import holoviews as hv
import geoviews as gv
import hvplot # pylint: disable=unused-import
import hvplot.xarray # pylint: disable=unused-import
import hvplot.pandas # pylint: disable=unused-import
from cartopy import crs as ccrs
except ImportError as exception:
raise ImportError(
"This module requires an installation of dynamic plotting package extension. It can be installed with:\n"
"pip install eo-learn-visualization[HVPLOT]"
) from exception
from sentinelhub import BBox, CRS
from eolearn.core import EOPatch, FeatureType, FeatureTypeSet
from eolearn.core.utils.parsing import parse_feature
from .xarray import array_to_dataframe, get_new_coordinates, string_to_variable
from ..eopatch_base import BasePlotConfig, BaseEOPatchVisualization
@dataclasses.dataclass
class HvPlotConfig(BasePlotConfig):
"""Additional advanced configurations for `hvplot` visualization.
:param plot_width: Width of the plot.
:param plot_height: Height of the plot.
:param plot_per_pixel: Whether to plot data for each pixel (line), for `FeatureType.DATA` and `FeatureType.MASK`.
:param vdims: Value dimensions for plotting a `GeoDataFrame`.
"""
plot_width: int = 800
plot_height: int = 500
plot_per_pixel: bool = False
vdims: Optional[str] = None
class HvPlotVisualization(BaseEOPatchVisualization):
"""EOPatch visualization using `HvPlot` framework."""
def __init__(
self, eopatch: EOPatch, feature, *, mask_feature=None, config: Optional[HvPlotConfig] = None, **kwargs
):
"""
:param eopatch: An EOPatch with a feature to plot.
:param feature: A feature from the given EOPatch to plot.
:param mask_feature: A mask feature to be applied as a mask to the feature that is being plotted
"""
config = config or HvPlotConfig()
super().__init__(eopatch, feature, config=config, **kwargs)
self.config = cast(HvPlotConfig, self.config)
self.mask_feature = parse_feature(mask_feature) if mask_feature else None
def plot(self):
"""Creates a `hvplot` of the feature from the given `EOPatch`."""
feature_type, _ = self.feature
data, timestamps = self.collect_and_prepare_feature()
eopatch = self.eopatch.copy()
eopatch[self.feature] = data
eopatch.timestamp = timestamps
if self.config.plot_per_pixel and feature_type in FeatureTypeSet.RASTER_TYPES_4D:
vis = self._plot_pixel(eopatch)
elif feature_type in (FeatureType.MASK, *FeatureTypeSet.RASTER_TYPES_3D):
vis = self._plot_raster(eopatch)
elif feature_type is FeatureType.DATA:
vis = self._plot_data(eopatch)
elif feature_type is FeatureType.VECTOR:
vis = self._plot_vector(eopatch)
elif feature_type is FeatureType.VECTOR_TIMELESS:
vis = self._plot_vector_timeless(eopatch)
else:
vis = self._plot_scalar_label(eopatch)
return vis.opts(plot=dict(width=self.config.plot_width, height=self.config.plot_height))
def _plot_data(self, eopatch: EOPatch):
"""Plots the FeatureType.DATA of eopatch."""
crs = eopatch.bbox.crs
crs = CRS.POP_WEB if crs is CRS.WGS84 else crs
data_da = array_to_dataframe(eopatch, self.feature, crs=crs)
if self.mask_feature:
data_da = self._mask_data(data_da, eopatch)
timestamps = eopatch.timestamp
crs = eopatch.bbox.crs
if not self.rgb:
return data_da.hvplot(x="x", y="y", crs=ccrs.epsg(crs.epsg))
_, feature_name = self.feature
data_rgb = self._eopatch_da_to_rgb(data_da, feature_name, crs)
rgb_dict = {timestamp_: self._plot_rgb_one(data_rgb, timestamp_) for timestamp_ in timestamps}
return hv.HoloMap(rgb_dict, kdims=["time"])
@staticmethod
def _plot_rgb_one(eopatch_da: xr.DataArray, timestamp: dt.datetime):
"""Returns visualization for one timestamp for FeatureType.DATA"""
return eopatch_da.sel(time=timestamp).drop("time").hvplot(x="x", y="y")
def _plot_raster(self, eopatch: EOPatch):
"""Makes visualization for raster data (except for FeatureType.DATA)"""
crs = eopatch.bbox.crs
crs = CRS.POP_WEB if crs is CRS.WGS84 else crs
data_da = array_to_dataframe(eopatch, self.feature, crs=crs)
data_min = data_da.values.min()
data_max = data_da.values.max()
data_levels = len(np.unique(data_da))
data_levels = 11 if data_levels > 11 else data_levels
data_da = data_da.where(data_da > 0).fillna(-1)
vis = data_da.hvplot(x="x", y="y", crs=ccrs.epsg(crs.epsg)).opts(
clim=(data_min, data_max), clipping_colors={"min": "transparent"}, color_levels=data_levels
)
return vis
def _plot_vector(self, eopatch: EOPatch):
"""A visualization for vector feature"""
crs = eopatch.bbox.crs
timestamps = eopatch.timestamp
data_gpd = self._fill_vector(eopatch)
if crs is CRS.WGS84:
crs = CRS.POP_WEB
data_gpd = data_gpd.to_crs(crs.pyproj_crs())
shapes_dict = {timestamp_: self._plot_shapes_one(data_gpd, timestamp_, crs) for timestamp_ in timestamps}
return hv.HoloMap(shapes_dict, kdims=["time"])
def _fill_vector(self, eopatch: EOPatch) -> GeoDataFrame:
"""Adds timestamps from eopatch to GeoDataFrame."""
vector = eopatch[self.feature].copy()
vector["valid"] = True
eopatch_timestamps = eopatch.timestamp
vector_timestamps = set(vector[self.config.timestamp_column])
blank_timestamps = [timestamp for timestamp in eopatch_timestamps if timestamp not in vector_timestamps]
dummy_geometry = self._create_dummy_polygon(eopatch.bbox, 0.0000001)
temp_df = self._create_dummy_dataframe(vector, blank_timestamps=blank_timestamps, dummy_geometry=dummy_geometry)
final_vector = GeoDataFrame(pd.concat((vector, temp_df), ignore_index=True), crs=vector.crs)
return final_vector
def _create_dummy_dataframe(
self,
geodataframe: GeoDataFrame,
blank_timestamps: List[dt.datetime],
dummy_geometry: Polygon,
fill_str: str = "",
fill_numeric: float = 1.0,
) -> pd.DataFrame:
"""Creates a `GeoDataFrame` to fill with dummy data (for visualization)
:param geodataframe: dataframe to append rows to
:param blank_timestamps: timestamps for constructing dataframe
:param dummy_geometry: geometry to plot when there is no data
:param fill_str: insert when there is no value in str column
:param fill_numeric: insert when
:return: dataframe with dummy data
"""
dataframe = | pd.DataFrame(data=blank_timestamps, columns=[self.config.timestamp_column]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 4 2021, last edited 27 Oct 2021
Fiber flow emissions calculations module - class version
Inputs:
Excel file with old PPI market & emissions data ('FiberModelAll_Python_v3-yields.xlsx')
Outputs:
Dict of keys 'old','new','forest','trade' with emissions calcs
(*testing inputs*
x = 'FiberModelAll_Python_v2.xlsx'
f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
pbpVolOld.columns = [x[:-2] for x in pbpVolOld.columns]
consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=34, nrows=3, index_col=0)
rLevel = pd.read_excel(x, 'Demand', usecols="F:K", skiprows=16, nrows=5)
rLevel = {t: list(rLevel[t][np.isfinite(rLevel[t])].values) for t in fProd}
fProd = [t for t in f2pVolOld.iloc[:,:6].columns]
fProdM = [t for t in f2pVolOld.iloc[:,:7].columns]
rFiber = f2pVolOld.index[:16]
vFiber = f2pVolOld.index[16:]
rPulp = [p for p in pbpVolOld.index if 'Rec' in p]
vPulp = [q for q in pbpVolOld.index if 'Vir' in q]
fPulp = [f for f in pbpVolOld.index]
import numpy as np
f2pYld = pd.read_excel(x, 'Fiber', usecols="I:O", skiprows=1, nrows=21)
f2pYld.index = np.concatenate([rFiber.values, vFiber.values], axis=0)
pulpYld = pd.read_excel(x, 'Pulp', usecols="D", skiprows=1, nrows=14)
pulpYld.index = rPulp + vPulp
transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
rsdlbio = rsdlbio.fillna(0)
rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
rsdlfos = rsdlfos.fillna(0)
woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
exportOld.iloc[:,:-1] = exportOld.iloc[:,:-1]
exportNew = exportOld.iloc[:,:-1] * 1.5
exportNew.columns = ['exportNew']
exportNew = exportNew.assign(TransCode=exportOld['TransCode'].values)
fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
)
@author: <NAME>
"""
import pandas as pd
import numpy as np
class en_emissions(): # energy & emissions
def __init__(cls,xls,fProd,rLevel,f2pYld,pulpYld,f2pVolNew,pbpVolNew,consCollNew,exportNew,demandNew):
# xls (str) - name of Excel spreadsheet to pull data from
# fProd (list) - list of products in current scenario
# rLevel (df) - recycled content level by product
# f2pYld (df) - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld (df) - pulp to product yield; pulp as index
# f2pVolNew (df) - fiber to pulp volume (in short tons); indexed by pulp name
# pbpVolNew (df) - pulp by product volume; indexed by pulp name
# consCollNew (df) - domestic consumption, collection, and recovery by product
# demandNew (df) - new demand by product; indexed by rec level
uC = 0.907185 # unit conversion of MM US ton to Mg/metric ton
cls.fProd = fProd
cls.fProdM = fProd + ['Market']
cls.rLevel = rLevel
cls.f2pYld = f2pYld
cls.pulpYld = pulpYld
cls.f2pVolNew = f2pVolNew * uC
cls.pbpVolNew = pbpVolNew * uC
cls.consCollNew = consCollNew * uC
cls.exportNew = exportNew * uC
cls.demandNew = {t: demandNew[t] * uC for t in demandNew.keys()}
with pd.ExcelFile(xls) as x:
# Old data
cls.f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
cls.f2pVolOld.iloc[:,:-1] = cls.f2pVolOld.iloc[:,:-1] * uC * 1000
cls.f2pVolNew = cls.f2pVolNew.assign(TransCode=cls.f2pVolOld['TransCode'].values)
cls.pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
cls.pbpVolOld.columns = [x[:-2] for x in cls.pbpVolOld.columns] # has .1 after column names for pandas duplicate
cls.pbpVolOld.iloc[:,:-1] = cls.pbpVolOld.iloc[:,:-1] * uC * 1000
cls.pbpVolNew = cls.pbpVolNew.assign(TransCode=cls.pbpVolOld['TransCode'].values)
cls.prodLD = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=19, nrows=5, index_col=0) * uC * 1000
cls.prodDemand = pd.read_excel(x, 'OldData', usecols="A:G", skiprows=26, nrows=1, index_col=0) * uC * 1000
cls.consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=29, nrows=3, index_col=0) * uC * 1000
cls.exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
cls.exportOld.iloc[:,:-1] = cls.exportOld.iloc[:,:-1] * uC * 1000
cls.exportNew = cls.exportNew.assign(TransCode=cls.exportOld['TransCode'].values)
cls.fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
cls.rFiber = cls.f2pVolOld.index[:16]
cls.vFiber = cls.f2pVolOld.index[16:]
cls.rPulp = [p for p in cls.pbpVolOld.index if 'Rec' in p]
cls.vPulp = [q for q in cls.pbpVolOld.index if 'Vir' in q]
cls.fPulp = [f for f in cls.pbpVolOld.index]
# Emissions Info
cls.chemicals = pd.read_excel(x, 'nonFiber', usecols="A:B,E:L", skiprows=2, nrows=42, index_col=0)
cls.eolEmissions = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=2, nrows=3, index_col=0)
cls.bfEI = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=2, nrows=3, index_col=0)
cls.bfEI.columns = [x[:-2] for x in cls.bfEI.columns] # has .1 after column names for some reason
cls.bioPct = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=8, nrows=2, index_col=0)
cls.pwpEI = pd.read_excel(x, 'EmTables', usecols="O:P", skiprows=14, nrows=5, index_col=0)
cls.bfCO2 = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=9, nrows=2, index_col=0)
cls.fuelTable = pd.read_excel(x, 'EmTables', usecols="A:M", skiprows=15, nrows=13, index_col=0)
cls.fuelTable = cls.fuelTable.fillna(0)
cls.rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
cls.rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
cls.rsdlbio = cls.rsdlbio.fillna(0)
cls.rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
cls.rsdlfos = cls.rsdlfos.fillna(0)
cls.transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
cls.transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
cls.transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
cls.woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
cls.wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
cls.wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
cls.wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
cls.wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
cls.wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
cls.wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
cls.chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
cls.chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
cls.fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
def calculateTrans(cls,transVol):
# transVol [df] - item, volume (in Mg) by product, TransCode; indexed by fiberCode or other label
# transPct [df] - % traversed for transMode by transCode; indexed by transCode
# transKM [df] - distance traversed for transMode by transCode; indexed by transCode
# transUMI [s] - unit impact by mode (truck, train, boat); indexed by "transUMI"
transImpact = pd.Series(0, index = cls.fProd)
tC = transVol['TransCode']
tC = tC[(tC != 0) & (tC != 1)] # index non-zero/non-NaN elements only
transVol = transVol.loc[tC.index]
for t in cls.fProd:
for m in cls.transUMI.columns:
transImpact[t] += sum(transVol[t] * cls.transPct.loc[tC,m].values * cls.transKM.loc[tC,m].values * cls.transUMI[m].values * 1)
return transImpact
def calculateChem(cls,chemicals,prodDemand):
# chemicals [df] - nonfiber name, % use by product, transCode, impact factor; indexed by number
# prodDemand [df] - total demand; indexed by product
chemImpact = pd.Series(0, index = cls.fProd, name = 'chemImp')
chemVol = pd.DataFrame(0, index = chemicals.index, columns = cls.fProd)
for t in cls.fProd:
chemImpact[t] = sum(prodDemand[t].values * chemicals[t] * chemicals['Impact Factor'])
chemVol[t] = chemicals[t] * prodDemand[t].values
chemVol = chemVol.join(chemicals['TransCode'])
chemTrans = pd.Series(cls.calculateTrans(chemVol), name = 'chemTrans')
chemImpact = pd.DataFrame(chemImpact)
return pd.concat([chemImpact, chemTrans], axis=1)
def calculateEoL(cls,eolEmissions,consColl):
# eolEmissions [df] - biogenic and fossil CO2 emission factors & transportation code by product; indexed by bio/fosCO2
# consColl [df] - domestic consumption, collection, and recovery by product; indexed by name
prod2landfill = pd.Series(consColl.loc['Domestic Consumption'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'prod2landfill')
mrf2landfill = pd.Series(consColl.loc['Collection Volume'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'mrf2landfill')
bioEoL = pd.Series(prod2landfill * eolEmissions.loc['bioCO2'], index = cls.fProd, name = 'bioEoL')
mrf2landfill = pd.DataFrame(mrf2landfill) # works b/c all prods have same TransCode
transEoL = pd.Series(cls.calculateTrans(mrf2landfill.T.assign(TransCode=eolEmissions.loc['TransCode'].values[0])),
index = cls.fProd, name = 'eolTrans')
fesTransEoL = pd.Series(prod2landfill * eolEmissions.loc['fossilCO2'] + transEoL, index = cls.fProd,
name = 'fesTransEoL')
bftEoL = pd.Series(bioEoL + fesTransEoL, name = 'bftEoL')
return pd.concat([bioEoL, fesTransEoL, bftEoL, transEoL], axis=1)
def getEnergyYldCoeff(cls,f2pVol,pbpVol):
# f2pVol [df] - recycled fiber to pulp (in Mg); indexed by fiber code
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
#
# PYCoeff [s] - pulp yield coeffient; indexed by pulp
f2pByPulp = pd.Series(0, index = pbpVol.index, name = 'fiber2pulp')
for p in cls.rPulp:
f2pByPulp[p] = sum([f2pVol.loc[cls.rFiber,t].sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
for q in cls.vPulp:
f2pByPulp[q] = sum([f2pVol.loc[cls.vFiber,t].sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
pulpProd = pd.Series([pbpVol.loc[i].sum() for i in pbpVol.index], index = pbpVol.index, name = 'pulpProd')
PYCoeff = (pd.Series(f2pByPulp / pulpProd, name = 'pulpYldCoeff'))
PYCoeff.replace([np.inf, -np.inf], np.nan, inplace=True)
PYCoeff = PYCoeff.fillna(0)
return PYCoeff
def getEnergyPulpPct(cls,pbpVol):
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
#
# pulpPct [df] - % of rec/vir pulp used in product; indexed by pulp name
pulpPct = pbpVol.copy().drop(['TransCode'], axis=1)
for t in pulpPct.columns:
rTotalPulp = pulpPct.loc[cls.rPulp,t].sum()
vTotalPulp = pulpPct.loc[cls.vPulp,t].sum()
pulpPct.loc[cls.rPulp,t] = pulpPct.loc[cls.rPulp,t] / rTotalPulp
pulpPct.loc[cls.vPulp,t] = pulpPct.loc[cls.vPulp,t] / vTotalPulp
return pulpPct.fillna(0)
def getEnergyMultiProd(cls,PYMult,pulpPct):
# PYMult [s] - pulp yield multiplier; indexed by pulp name
# pulpPct [df] - % of rec/vir pulp used in product; indexed by pulp name
#
# (return) [df] - rec/vir yield multiprod by product; index by r/vYldMultiProd
rYldMultiProd = pd.Series([sum(pulpPct.loc[cls.rPulp,t] * PYMult[cls.rPulp]) for t in cls.fProd],
index = cls.fProd, name = 'rYldMultiProd')
vYldMultiProd = pd.Series([sum(pulpPct.loc[cls.vPulp,t] * PYMult[cls.vPulp]) for t in cls.fProd],
index = cls.fProd, name = 'vYldMultiProd')
rYldMultiProd.replace([np.inf, -np.inf], np.nan, inplace=True)
vYldMultiProd.replace([np.inf, -np.inf], np.nan, inplace=True)
return pd.concat([rYldMultiProd.fillna(0), vYldMultiProd.fillna(0)], axis=1)
def calculateEnergy(cls,pbpVol,prodLD,multiProd,pwpEI,paperEI):
# prodLD (df) - demand by product; indexed by % recycled content level
# bfEI (df) - bio & fes energy intensity fitting parameters by product; indexed by name
# bioPct (df) - bio fitting parameter for PWP; indexed by name
# pwpEI (df) - energy intensity of PWP pulp; indexed by pulp name
# paperEI (df) - paper production energy intensity; indexed by 'PPE'
# pbpVol (df) - pulp by product (in Mg); indexed by pulp name
# multiProd (df) - rec/vir yield multiprod by product; indexed by product
bioEnergy = pd.Series(0, index = cls.fProd, name = "bioEnergy")
fesEnergy = pd.Series(0, index = cls.fProd, name = 'fesEnergy')
totalEnergy = pd.Series(0, index = cls.fProd, name = 'totalEnergy')
for t in cls.fProd:
bioEnergy[t] = sum(prodLD[t].values[:len(cls.rLevel[t])] *
sum([r * cls.bfEI.loc['bioEI b1',t] + cls.bfEI.loc['bioEI b0',t] for r in cls.rLevel[t]]))
fesEnergy[t] = sum(prodLD[t].values[:len(cls.rLevel[t])] *
cls.bfEI.loc['fesEI',t] * multiProd.loc[t,'rYldMultiProd'])
if 'P&W' or 'News' in t:
avgrecPct = sum(prodLD[t].values[:len(cls.rLevel[t])] * cls.rLevel[t]) / prodLD[t].sum()
bioPctPW = avgrecPct * cls.bioPct.loc['bioPct b1',t] + cls.bioPct.loc['bioPct b0',t]
pulpProdEnergy = sum([pbpVol.loc[p,t] * pwpEI.loc[p].values[0] for p in pwpEI.index])
ppEnergy = pulpProdEnergy + prodLD[t].sum() * paperEI.values[0]
bioEnergy[t] = bioPctPW * ppEnergy
fesEnergy[t] = (1 - bioPctPW) * ppEnergy * multiProd.loc[t,'rYldMultiProd']
totalEnergy[t] = bioEnergy[t] + fesEnergy[t]
return pd.concat([bioEnergy, fesEnergy, totalEnergy], axis=1)
def calculateProduction(cls,calcEnergy):
# calcEnergy (df) - bio, fes, and total energy from calculateEnergy; indexed by product
# bfCO2 (df) - bio & fes CO2 fitting parameters; indexed by product
bioCO2 = pd.Series(0, index = cls.fProd, name = 'bioCO2')
fesCO2 = pd.Series(0, index = cls.fProd, name = 'fesCO2')
totalCO2 = pd.Series(0, index = cls.fProd, name = 'totalCO2')
for t in cls.fProd:
bioCO2[t] = calcEnergy.loc[t,'bioEnergy'] * cls.bfCO2.loc['bioCO2 b1',t]
fesCO2[t] = calcEnergy.loc[t,'fesEnergy'] * cls.bfCO2.loc['fesCO2 b1',t]
totalCO2[t] = bioCO2[t] + fesCO2[t]
return pd.concat([bioCO2, fesCO2, totalCO2], axis=1)
def calculateFuel(cls,calcEnergy):
# calcEnergy (df) - bio, fes, and total energy from calculateEnergy; indexed by product
# fuelTable (df) - fuel impact by product; indexed by fuel type
fuels = cls.fuelTable.index
bioFI = pd.Series(0, index = cls.fProd, name = 'bioFuelImp')
fesFI = pd.Series(0, index = cls.fProd, name = 'fesFuelImp')
fuelImp = pd.Series(0, index = cls.fProd, name = 'fuelImp')
for t in cls.fProd:
bioFI[t] = calcEnergy.loc[t,'bioEnergy'] * sum([cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'Upstream Impact Factor']
for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 1])
fesFI[t] = calcEnergy.loc[t,'fesEnergy'] * sum([cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'Upstream Impact Factor']
for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 2])
fuelImp[t] = bioFI[t] + fesFI[t]
fuelTransVol = cls.fuelTable.copy()
fuel1 = [f for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 1]
fuel2 = [f for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 2]
for t in cls.fProd:
fuelTransVol.loc[fuel1,t] = [calcEnergy.loc[t,'bioEnergy'] * cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'FU/GJ']
for f in fuel1]
fuelTransVol.loc[fuel2,t] = [calcEnergy.loc[t,'fesEnergy'] * cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'FU/GJ']
for f in fuel2]
fuelTrans = pd.Series(cls.calculateTrans(fuelTransVol), name = 'fuelTrans')
return pd.concat([bioFI, fesFI, fuelImp, fuelTrans], axis=1)
def calculateResidual(cls,pbpVol,f2pVol):
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
# f2pVol [df] - recycled fiber to pulp (in Mg); indexed by fiber code
# f2pYld [df] - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld [df] - pulp to product yield; indexed by pulp
# rsdlModes [df] - residual treatments modes; indexed by residual type
# rsdlbio [df] - transport and biogenic emissions factors; indexed by residual treatment mode
# rsdlfos [df] - transport and fossil emissions factors; indexed by residual treatment mode
pulpProd = pd.Series(0, index = cls.rPulp + cls.vPulp, name = 'pulpProduced')
fiberRes = pd.Series(0, index = cls.rPulp + cls.vPulp, name = 'fiberResidue')
for p in cls.rPulp: # order of fPulp must match order of r/vPulp
pulpProd[p] = sum([(f2pVol.loc[cls.rFiber,t].mul(cls.f2pYld.loc[cls.rFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
fiberRes[p] = sum([(f2pVol.loc[cls.rFiber,t].mul(1 - cls.f2pYld.loc[cls.rFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
for q in cls.vPulp:
pulpProd[q] = sum([(f2pVol.loc[cls.vFiber,t].mul(cls.f2pYld.loc[cls.vFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
fiberRes[q] = sum([(f2pVol.loc[cls.vFiber,t].mul(1 - cls.f2pYld.loc[cls.vFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
pulpUP = pbpVol.iloc[:,:-1].div(pulpProd, axis=0).fillna(0) # pulpUsePct
rFiberRsd = pd.Series((pulpUP.loc[cls.rPulp].mul(fiberRes[cls.rPulp], axis=0)).sum(), index = cls.fProd, name = 'rFiberRsd')
rPulpRsd = pd.Series((pulpUP.loc[cls.rPulp].mul(1 - cls.pulpYld.iloc[:,0].loc[cls.rPulp], axis=0)).sum(), index = cls.fProd, name = 'rPulpRsd')
rTotalRsd = rFiberRsd + rPulpRsd
vFiberRsd = pd.Series((pulpUP.loc[cls.vPulp].mul(fiberRes[cls.vPulp], axis=0)).sum(), index = cls.fProd, name = 'vFiberRsd')
vPulpRsd = pd.Series((pulpUP.loc[cls.vPulp].mul(1 - cls.pulpYld.iloc[:,0].loc[cls.vPulp], axis=0)).sum(), index = cls.fProd, name = 'vPulpRsd')
vTotalRsd = vFiberRsd + vPulpRsd
rsdlType = cls.rsdlModes.index
rsdlQuantity = pd.DataFrame(0, index = rsdlType, columns = cls.fProd)
for rt in rsdlType:
if cls.rsdlModes.loc[rt,'Input Base'] == 1:
rsdlQuantity.loc[rt,:] = rTotalRsd * cls.rsdlModes.loc[rt,'Intensity']
if cls.rsdlModes.loc[rt,'Input Base'] == 2:
rsdlQuantity.loc[rt,:] = vTotalRsd * cls.rsdlModes.loc[rt,'Intensity']
rsdlMode = cls.rsdlModes.columns[:-2]
rsdlModeVol = {rM: pd.DataFrame(0, index = rsdlType, columns = cls.fProd)
for rM in rsdlMode}
for rM in rsdlMode:
rsdlModeVol[rM] = rsdlQuantity.mul(cls.rsdlModes[rM], axis=0)
rsdlModeVol[rM] = rsdlModeVol[rM].assign(TransCode=cls.rsdlbio.loc[rM,'TransCode'] * np.ones(len(rsdlType)))
rsdlModeVol[rM].replace([np.inf, -np.inf], np.nan, inplace=True) # TODO: what happens to make this inf?
rsdlModeVol[rM].fillna(0)
bioImp = pd.Series(0, index = cls.fProd, name = 'bioImp')
fosImp = pd.Series(0, index = cls.fProd, name = 'fossilImp')
for t in cls.fProd:
bioImp[t] = sum([rsdlModeVol[rM][t].sum() * cls.rsdlbio.loc[rM,t] for rM in rsdlMode])
fosImp[t] = sum([rsdlModeVol[rM][t].sum() * cls.rsdlfos.loc[rM,t] for rM in rsdlMode])
biofosImp = pd.Series(bioImp + fosImp, name = 'bio+fos')
rsdlTrans = pd.Series(0, index = cls.fProd, name = 'rsdlTrans')
for rM in rsdlMode:
rsdlTrans += cls.calculateTrans(rsdlModeVol[rM])
return pd.concat([bioImp, fosImp, biofosImp, rsdlTrans], axis=1)
def getExportTrans(cls,transVol):
transImpact = pd.Series(0, index = transVol.columns[:-1])
tC = transVol['TransCode']
tC = tC[(tC != 0) & (tC != 1)] # index non-zero/non-NaN elements only
transVol = transVol.loc[tC.index]
for n in transVol.columns[:-1]:
for m in cls.transUMI.columns:
transImpact[n] += sum(transVol[n] * cls.transPct.loc[tC,m].values * cls.transKM.loc[tC,m].values * cls.transUMI[m].values)
return transImpact.values
def calculateExport(cls,exportOld,exportNew):
# exportOld [df] old export from US; indexed by rec fiber
# exportNew [df] new export from US; indexed by rec fiber
impChange = pd.Series(0, index = cls.fYield.index, name = 'impChangeByGroup')
sumChange = pd.Series(0, index = cls.fYield.index, name = 'sumNetChange')
for r in impChange.index:
typeMask = cls.fiberType[cls.fiberType['fiberType'] == r].index
# impChange[r] = (exportOld.loc[typeMask, 'exportOld'] - exportNew.loc[typeMask, 'exportNew']).sum()
impChange[r] = (exportNew.loc[typeMask, 'exportNew'] - exportOld.loc[typeMask, 'exportOld']).sum()
sumChange[r] = impChange[r] * (1 - cls.fYield.loc[r,'US'] / cls.fYield.loc[r,'China'])
beta = sumChange.sum() / (cls.chinaCons.loc['totalVir'].values + cls.chinaCons.loc['domesticRec'].values +
cls.chinaCons.loc['importRec-US'].values + cls.chinaCons.loc['importRec-nonUS'].values)
# chinaTrans = cls.getExportTrans(exportOld) - cls.getExportTrans(exportNew)
chinaTrans = cls.getExportTrans(exportNew) - cls.getExportTrans(exportOld)
return cls.chinaVals.loc['Production'] * cls.chinaVals.loc['Energy Intensity'] * cls.chinaVals.loc['Emission Factor'] * beta + chinaTrans
def getForestVirginGHG(cls,virCons,woodint,slope,intercept):
# virCons [df] change in virgin consumption; products as columns
# woodint [df] intervals of virgin wood consumption
# slope [s] b1 value for GHG emissions
# intercept[s] b0 value for GHG emissions
for n in range(1,len(woodint.columns)):
if (woodint[n].values <= virCons) & (virCons < woodint[n+1].values):
return virCons * slope[n] + intercept[n]
return 0 # catch values outside of interval
def calculateForest(cls,virCons,forYear):
# virCons [float] change in virgin consumption, sum of all products
# forYear [int] forest year length for cumulative emissions calcs; 10-90 by ten
deltaTotalGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wtotalGHGb1[forYear], cls.wtotalGHGb0[forYear]),
name = 'totalGHG') * 1e6
deltabioGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wbioGHGb1[forYear], cls.wbioGHGb0[forYear]),
name = 'bioGHG') * 1e6
deltafosGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wfosGHGb1[forYear], cls.wfosGHGb0[forYear]),
name = 'fosGHG') * 1e6
return pd.concat([deltaTotalGHG, deltabioGHG, deltafosGHG], axis=1)
def calculateEmissions(cls):
# xls [df] - name of Excel spreadsheet to pull data from
# fProd [df] - list of products in current scenario
# rL [dict] - recycled content level by product
# f2pYld [df] - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld [df] - pulp to product yield; indexed by pulp
# f2pVolNew [df] - fiber to pulp volume (in Mg); indexed by fiber code
# pbpVolNew [df] - pulp by product volume; indexed by pulp name
# consCollNew [df] - domestic consumption, collection, and recovery by product
pulpNames = cls.rPulp + cls.vPulp
mvO = [cls.pbpVolOld.loc[p] for p in pulpNames if 'Deinked' in p or 'Market' in p]
marketVolOld = pd.concat([mvO[0],mvO[1]], axis=1).T
mvN = [cls.pbpVolNew.loc[p] for p in pulpNames if 'Deinked' in p or 'Market' in p]
marketVolNew = pd.concat([mvN[0],mvN[1]], axis=1).T
# Chemical
chemImp = cls.calculateChem(cls.chemicals, cls.prodDemand)
# EoL
oldEoL = cls.calculateEoL(cls.eolEmissions, cls.consCollOld)
newEoL = cls.calculateEoL(cls.eolEmissions, cls.consCollNew)
# Energy
oldPulpPct = cls.getEnergyPulpPct(cls.pbpVolOld)
newPulpPct = cls.getEnergyPulpPct(cls.pbpVolNew)
oldPYCoeff = cls.getEnergyYldCoeff(cls.f2pVolOld, cls.pbpVolOld)
newPYCoeff = cls.getEnergyYldCoeff(cls.f2pVolNew, cls.pbpVolNew)
oldYldMultiplier = (oldPYCoeff / oldPYCoeff).fillna(0)
newYldMultiplier = (newPYCoeff / oldPYCoeff).fillna(0)
oldMP = cls.getEnergyMultiProd(oldYldMultiplier, oldPulpPct)
newMP = cls.getEnergyMultiProd(newYldMultiplier, newPulpPct)
oldEnergy = cls.calculateEnergy(cls.pbpVolOld, cls.prodLD, oldMP, cls.pwpEI.iloc[:-1], cls.pwpEI.iloc[-1])
newEnergy = cls.calculateEnergy(cls.pbpVolNew, cls.demandNew, newMP, cls.pwpEI.iloc[:-1], cls.pwpEI.iloc[-1])
# Production
oldProd = cls.calculateProduction(oldEnergy)
newProd = cls.calculateProduction(newEnergy)
# Fuel
oldFuel = cls.calculateFuel(oldEnergy)
newFuel = cls.calculateFuel(newEnergy)
# Residual
oldRsdl = cls.calculateResidual(cls.pbpVolOld, cls.f2pVolOld)
newRsdl = cls.calculateResidual(cls.pbpVolNew, cls.f2pVolNew)
# Transportation
oldFiberTrans = pd.Series(cls.calculateTrans(cls.f2pVolOld), name = 'fiberTrans')
oldMarketTrans = pd.Series(cls.calculateTrans(marketVolOld), name = 'marketTrans')
oldTrans = pd.concat([oldFiberTrans, oldMarketTrans, chemImp['chemTrans'], oldFuel['fuelTrans'],
oldRsdl['rsdlTrans'], oldEoL['eolTrans']], axis=1)
newFiberTrans = pd.Series(cls.calculateTrans(cls.f2pVolNew), name = 'fiberTrans')
newMarketTrans = pd.Series(cls.calculateTrans(marketVolNew), name = 'marketTrans')
newTrans = pd.concat([newFiberTrans, newMarketTrans, chemImp['chemTrans'], newFuel['fuelTrans'],
newRsdl['rsdlTrans'], newEoL['eolTrans']], axis=1)
# Export
exportImp = cls.calculateExport(cls.exportOld,cls.exportNew)
# FASOM/LURA
forestGHG = cls.calculateForest(cls.f2pVolNew.iloc[:,:-1].loc[cls.vFiber].sum().sum() -
cls.f2pVolOld.iloc[:,:-1].loc[cls.vFiber].sum().sum(), 90)
# Summary calcs for plotting
oldSums = pd.concat([pd.Series(chemImp['chemImp'], name='chemImp'),
pd.Series(oldFuel['bioFuelImp'], name='fuelbio'),
pd.Series(oldFuel['fesFuelImp'], name='fuelfos'),
pd.Series(oldProd['totalCO2'], name='prodImp'),
pd.Series(oldProd['bioCO2'], name='prodbio'),
pd.Series(oldProd['fesCO2'], name='prodfos'),
pd.Series(oldEnergy['totalEnergy'], name='energy'),
| pd.Series(oldEnergy['bioEnergy'], name='energybio') | pandas.Series |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
###############################################
# #
# Interfacing with Excel Module to build DSM #
# #
# Contrib: uChouinard #
# V0 03/03/2019 #
# #
###############################################
import DSM as dsmx
import SystemDependencies as dpx
import DependencyIndex as dpi
import pandas as pds
from openpyxl import load_workbook
#purely Based on Negative Dependencies DSM
class NDI_Interfacer:
def __init__(self, input_filename, output_filename=''):
self.input_filename=input_filename
self.output_filename=''
if output_filename is '':
self.output_filename=input_filename
self.sys=sys=dpx.System('')
def dsmBuilder(self):
#Fetching Relevant Affecting/Affected Info
df=pds.read_excel(self.input_filename, 'Input_Level')
compList=[]
mat_nan=df.isnull()
for _component in list(df['Components'].values):
tmp_comp=dpx.Component(_component)
cmp_index=list(df['Components']).index(_component)
if not mat_nan['AD_Heat'][ cmp_index]:
tmp_comp.addAffected(dpx.AdverseEffect('Heat',df['AD_Heat'][ cmp_index]))
if not mat_nan['AD_Vibration'][ cmp_index]:
tmp_comp.addAffected(dpx.AdverseEffect('Vibration', df['AD_Vibration'][ cmp_index]))
if not mat_nan['AD_EMF'][ cmp_index]:
tmp_comp.addAffected(dpx.AdverseEffect('EMF', df['AD_EMF'][ cmp_index]))
if not mat_nan['AR_Heat'][ cmp_index]:
tmp_comp.addAffecting(dpx.AdverseEffect('Heat', df['AR_Heat'][ cmp_index]))
if not mat_nan['AR_Vibration'][ cmp_index]:
tmp_comp.addAffecting(dpx.AdverseEffect('Vibration', df['AR_Vibration'][ cmp_index]))
if not mat_nan['AR_EMF'][ cmp_index]:
tmp_comp.addAffecting(dpx.AdverseEffect('EMF', df['AR_EMF'][ cmp_index]))
compList.append(tmp_comp)
#Fetching Layout Information
dfc=pds.read_excel(self.input_filename, 'Input_Closeness')
mat_nan_c=dfc.isnull()
closeness={}
other_components=list(dfc.keys())
other_components.remove('Components')
for _component in list(dfc['Components'].values):
cmp_index=list(df['Components']).index(_component)
for _other_component in other_components:
if not mat_nan_c[_other_component][cmp_index]:
closeness[('/'.join(sorted([_component,_other_component])))]=dfc[_other_component][cmp_index]
#print(closeness)
sys=dpx.System('Drone')
sys.addComponents(compList)
sys.addCloseness(closeness)
dfa=pds.read_excel(self.input_filename, 'Input_Attenuation')
if (not dfa.isnull().any().any()) and (list(dfa.index)):
att_level=dfa.to_dict('records')
sys.attenuation=att_level[0]
sys.build('float')
sys.adverseDSM.toExcel(self.output_filename)
self.sys=sys
#analysis of the system using the NDI
def sysAnalysis(self, _indexType='randic', _normType='linear'):
dfm=pds.read_excel(self.input_filename, 'Input_Effect_Fuzzy_Measures')
if (not dfm.isnull().any().any()) and (list(dfm.index)):
fuzzy_Measures=dfm.to_dict('records')[0]
else:
fuzzy_Measures={'ehv':1.0, 'ev':0.75, 'e':0.70, 'v':0.30, 'h':0.50, 'hv':0.55, 'eh':0.85}
NDI=dpi.calcNDI(self.sys.adverseDSM, fuzzy_Measures, len(self.sys.components), _indexType, _normType)
book = load_workbook(self.output_filename)
writer = pds.ExcelWriter(self.output_filename, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
ndiDict={'NDI':[NDI]}
#print(ndiDict)
dfNDI=pds.DataFrame.from_dict(ndiDict)
dfNDI.to_excel(writer,'NDI_Analysis')
writer.save()
#Based on spatial/information,material/energy DSM
class Interactions_Interfacer:
def __init__(self, input_filename, output_filename='', _directed='yes'):
self.input_filename=input_filename
self.output_filename=''
if output_filename is '':
self.output_filename=input_filename
self.dsm=dsmx.DSM(name='none', dsmType='interactions', directed=_directed)
def dsmBuilder(self):
dfs=pds.read_excel(self.input_filename, 'Spatial')
dfe=pds.read_excel(self.input_filename, 'Energy')
dfi=pds.read_excel(self.input_filename, 'Information')
dfm=pds.read_excel(self.input_filename, 'Material')
dfs=dfs.fillna(0)
dfe=dfe.fillna(0)
dfi=dfi.fillna(0)
dfm=dfm.fillna(0)
other_components=list(dfs.keys())
other_components.remove('Components')
for _component in list(dfs['Components'].values):
cmp_index=list(dfs['Components']).index(_component)
for _other_component in other_components:
tmp_rel={'s':dfs[_other_component][cmp_index],
'e':dfe[_other_component][cmp_index],
'i':dfi[_other_component][cmp_index],
'm':dfm[_other_component][cmp_index]}
#print(tmp_rel)
self.dsm.addRelation([_component], [_other_component], [tmp_rel])
def sysAnalysis(self, _indexType='2D',_calculation_method='randic' ,_weights={'s':1, 'e':1, 'i':1, 'm':1}):
res_aggregation=0
res_index=0
if _indexType is '2D':
self.dsm.to2Dinteractions(_weights)
if _calculation_method is 'energy':
res_real=dpi.energyIndex(self.dsm.real2D)
res_imag=dpi.energyIndex(self.dsm.imag2D)
if _calculation_method is 'randic':
res_real=dpi.randicIndex(self.dsm.real2D)
res_imag=dpi.randicIndex(self.dsm.imag2D)
if _calculation_method is 'wiener':
res_real=dpi.wienerIndex(self.dsm.real2D)
res_imag=dpi.wienerIndex(self.dsm.imag2D)
if _calculation_method is 'sum_connectivity':
res_real=dpi.scIndex(self.dsm.real2D)
res_imag=dpi.scIndex(self.dsm.imag2D)
#print(res_aggregation)
#res_index=dpi.calc2DIndex(res_aggregation.real, res_aggregation.imag)
res_index=dpi.calc2DIndex(res_real, res_imag)
#print(res_index)
book = load_workbook(self.output_filename)
writer = pds.ExcelWriter(self.output_filename, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
ndiDict={'2D_Index':[res_index]}
#print(ndiDict)
dfNDI= | pds.DataFrame.from_dict(ndiDict) | pandas.DataFrame.from_dict |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/06_score.ipynb (unless otherwise specified).
__all__ = ['filter_score', 'filter_precursor', 'get_q_values', 'cut_fdr', 'cut_global_fdr', 'get_x_tandem_score',
'score_x_tandem', 'filter_with_x_tandem', 'filter_with_score', 'score_psms', 'get_ML_features', 'train_RF',
'score_ML', 'filter_with_ML', 'assign_proteins', 'get_shared_proteins', 'get_protein_groups',
'perform_protein_grouping', 'get_ion', 'ion_dict', 'ecdf', 'score_hdf', 'protein_grouping_all']
# Cell
import numpy as np
import pandas as pd
import logging
import alphapept.io
def filter_score(df: pd.DataFrame, mode: str='multiple') -> pd.DataFrame:
"""
Filter psms feature table by keeping only the best scoring psm per experimental spectrum.
TODO: psms could still have the same score when having modifications at multiple positions that are not distinguishable.
Only keep one.
Args:
df (pd.DataFrame): psms table of search results from alphapept.
mode (str, optional): string specifying which mode to use for psms filtering. The two options are 'single' and 'multiple'. 'single' will only keep one feature per experimental spectrum. 'multiple' will allow multiple features per experimental spectrum. In either option, each feature can only occur once. Defaults to 'multiple'.
Returns:
pd.DataFrame: table containing the filtered psms results.
"""
if "localexp" in df.columns:
additional_group = ['localexp']
else:
additional_group = []
df["rank"] = df.groupby(["query_idx"] + additional_group)["score"].rank("dense", ascending=False).astype("int")
df = df[df["rank"] == 1]
# in case two hits have the same score and therfore the same rank only accept the first one
df = df.drop_duplicates(["query_idx"] + additional_group)
if 'dist' in df.columns:
df["feature_rank"] = df.groupby(["feature_idx"] + additional_group)["dist"].rank("dense", ascending=True).astype("int")
df["raw_rank"] = df.groupby(["raw_idx"] + additional_group)["score"].rank("dense", ascending=False).astype("int")
if mode == 'single':
df_filtered = df[(df["feature_rank"] == 1) & (df["raw_rank"] == 1) ]
df_filtered = df_filtered.drop_duplicates(["raw_idx"] + additional_group)
elif mode == 'multiple':
df_filtered = df[(df["feature_rank"] == 1)]
else:
raise NotImplementedError('Mode {} not implemented yet'.format(mode))
else:
df_filtered = df
# TOD: this needs to be sorted out, for modifications -> What if we have MoxM -> oxMM, this will screw up with the filter sequence part
return df_filtered
# Cell
def filter_precursor(df: pd.DataFrame) -> pd.DataFrame:
"""
Filter psms feature table by precursor.
Allow each precursor only once.
Args:
df (pd.DataFrame): psms table of search results from alphapept.
Returns:
pd.DataFrame: table containing the filtered psms results.
"""
if "localexp" in df.columns:
additional_group = ['localexp']
else:
additional_group = []
df["rank_precursor"] = (
df.groupby(["precursor"] + additional_group)["score"].rank("dense", ascending=False).astype("int")
)
df_filtered = df[df["rank_precursor"] == 1]
return df_filtered
# Cell
from numba import njit
@njit
def get_q_values(fdr_values: np.ndarray) -> np.ndarray:
"""
Calculate q-values from fdr_values.
Args:
fdr_values (np.ndarray): np.ndarray of fdr values.
Returns:
np.ndarray: np.ndarray of q-values.
"""
q_values = np.zeros_like(fdr_values)
min_q_value = np.max(fdr_values)
for i in range(len(fdr_values) - 1, -1, -1):
fdr = fdr_values[i]
if fdr < min_q_value:
min_q_value = fdr
q_values[i] = min_q_value
return q_values
# Cell
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Note that the test function for cut_fdr is further down in the notebook to also test protein-level FDR.
def cut_fdr(df: pd.DataFrame, fdr_level:float=0.01, plot:bool=True) -> (float, pd.DataFrame):
"""
Cuts a dataframe with a given fdr level
Args:
df (pd.DataFrame): psms table of search results from alphapept.
fdr_level (float, optional): fdr level that should be used for filtering. The value should lie between 0 and 1. Defaults to 0.01.
plot (bool, optional): flag to enable plot. Defaults to 'True'.
Returns:
float: numerical value of the applied score cutoff
pd.DataFrame: df with psms within fdr
"""
df["target"] = ~df["decoy"]
df = df.sort_values(by=["score","decoy"], ascending=False)
df = df.reset_index()
df["target_cum"] = np.cumsum(df["target"])
df["decoys_cum"] = np.cumsum(df["decoy"])
df["fdr"] = df["decoys_cum"] / df["target_cum"]
df["q_value"] = get_q_values(df["fdr"].values)
last_q_value = df["q_value"].iloc[-1]
first_q_value = df["q_value"].iloc[0]
if last_q_value <= fdr_level:
logging.info('Last q_value {:.3f} of dataset is smaller than fdr_level {:.3f}'.format(last_q_value, fdr_level))
cutoff_index = len(df)-1
elif first_q_value >= fdr_level:
logging.info('First q_value {:.3f} of dataset is larger than fdr_level {:.3f}'.format(last_q_value, fdr_level))
cutoff_index = 0
else:
cutoff_index = df[df["q_value"].gt(fdr_level)].index[0] - 1
cutoff_value = df.loc[cutoff_index]["score"]
cutoff = df[df["score"] >= cutoff_value]
targets = df.loc[cutoff_index, "target_cum"]
decoy = df.loc[cutoff_index, "decoys_cum"]
fdr = df.loc[cutoff_index, "fdr"]
logging.info(f"{targets:,} target ({decoy:,} decoy) of {len(df)} PSMs. fdr {fdr:.6f} for a cutoff of {cutoff_value:.2f} (set fdr was {fdr_level})")
if plot:
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 5))
plt.plot(df["score"], df["fdr"])
plt.axhline(0.01, color="k", linestyle="--")
plt.axvline(cutoff_value, color="r", linestyle="--")
plt.title("fdr vs Cutoff value")
plt.xlabel("Score")
plt.ylabel("fdr")
# plt.savefig('fdr.png')
plt.show()
bins = np.linspace(np.min(df["score"]), np.max(df["score"]), 100)
plt.figure(figsize=(10, 5))
plt.hist(df[df["decoy"]]["score"].values, label="decoy", bins=bins, alpha=0.5)
plt.hist(df[~df["decoy"]]["score"].values, label="target", bins=bins, alpha=0.5)
plt.xlabel("Score")
plt.ylabel("Frequency")
plt.title("Score vs Class")
plt.legend()
plt.show()
cutoff = cutoff.reset_index(drop=True)
return cutoff_value, cutoff
# Cell
def cut_global_fdr(data: pd.DataFrame, analyte_level: str='sequence', fdr_level: float=0.01, plot: bool=True, **kwargs) -> pd.DataFrame:
"""
Function to estimate and filter by global peptide or protein fdr
Args:
data (pd.DataFrame): psms table of search results from alphapept.
analyte_level (str, optional): string specifying the analyte level to apply the fdr threshold. Options include: 'precursor', 'sequence', 'protein_group' and 'protein'. Defaults to 'sequence'.
fdr_level (float, optional): fdr level that should be used for filtering. The value should lie between 0 and 1. Defaults to 0.01.
plot (bool, optional): flag to enable plot. Defaults to 'True'.
Returns:
pd.DataFrame: df with filtered results
"""
logging.info('Global FDR on {}'.format(analyte_level))
data_sub = data[[analyte_level,'score','decoy']]
data_sub_unique = data_sub.groupby([analyte_level,'decoy'], as_index=False).agg({"score": "max"})
analyte_levels = ['precursor', 'sequence', 'protein_group','protein']
if analyte_level in analyte_levels:
agg_score = data_sub_unique.groupby([analyte_level,'decoy'])['score'].max().reset_index()
else:
raise Exception('analyte_level should be either sequence or protein. The selected analyte_level was: {}'.format(analyte_level))
agg_cval, agg_cutoff = cut_fdr(agg_score, fdr_level=fdr_level, plot=plot)
agg_report = data.reset_index().merge(
agg_cutoff,
how = 'inner',
on = [analyte_level,'decoy'],
suffixes=('', '_'+analyte_level),
validate="many_to_one").set_index('index') #retain the original index
return agg_report
# Cell
import networkx as nx
def get_x_tandem_score(df: pd.DataFrame) -> np.ndarray:
"""
Function to calculate the x tandem score
Args:
df (pd.DataFrame): psms table of search results from alphapept.
Returns:
np.ndarray: np.ndarray with x_tandem scores
"""
b = df['b_hits'].astype('int').apply(lambda x: np.math.factorial(x)).values
y = df['y_hits'].astype('int').apply(lambda x: np.math.factorial(x)).values
x_tandem = np.log(b.astype('float')*y.astype('float')*df['matched_int'].values)
x_tandem[x_tandem==-np.inf] = 0
return x_tandem
def score_x_tandem(df: pd.DataFrame, fdr_level: float = 0.01, plot: bool = True, **kwargs) -> pd.DataFrame:
"""
Filters the psms table by using the x_tandem score and filtering the results for fdr_level.
Args:
df (pd.DataFrame): psms table of search results from alphapept.
fdr_level (float, optional): fdr level that should be used for filtering. The value should lie between 0 and 1. Defaults to 0.01.
Returns:
pd.DataFrame: psms table with an extra 'score' column for x_tandem, filtered for no feature or precursor to be assigned multiple times.
"""
logging.info('Scoring using X-Tandem')
if 'localexp' not in df.columns:
df['localexp'] =0
df['score'] = get_x_tandem_score(df)
df['decoy'] = df['sequence'].str[-1].str.islower()
df = filter_score(df)
df = filter_precursor(df)
cval, cutoff = cut_fdr(df, fdr_level, plot)
return cutoff
def filter_with_x_tandem(df: pd.DataFrame) -> pd.DataFrame:
"""
Filters the psms table by using the x_tandem score, no fdr filter.
TODO: Remove redundancy with score functions, see issue: #275
Args:
df (pd.DataFrame): psms table of search results from alphapept.
Returns:
pd.DataFrame: psms table with an extra 'score' column for x_tandem, filtered for no feature or precursor to be assigned multiple times.
"""
logging.info('Filter df with x_tandem score')
df['score'] = get_x_tandem_score(df)
df['decoy'] = df['sequence'].str[-1].str.islower()
df = filter_score(df)
df = filter_precursor(df)
return df
def filter_with_score(df: pd.DataFrame):
"""
Filters the psms table by using the score column, no fdr filter.
TODO: Remove redundancy with score functions, see issue: #275
Args:
df (pd.DataFrame): psms table of search results from alphapept.
Returns:
pd.DataFrame: psms table filtered for no feature or precursor to be assigned multiple times.
"""
logging.info('Filter df with custom score')
df['decoy'] = df['sequence'].str[-1].str.islower()
df = filter_score(df)
df = filter_precursor(df)
return df
# Cell
def score_psms(df: pd.DataFrame, score: str='y_hits', fdr_level: float=0.01, plot: bool=True, **kwargs) -> pd.DataFrame:
"""
Uses the specified score in df to filter psms and to apply the fdr_level threshold.
Args:
df (pd.DataFrame): psms table of search results from alphapept.
score (str, optional): string specifying the column in df to use as score. Defaults to 'y_hits'.
fdr_level (float, optional): fdr level that should be used for filtering. The value should lie between 0 and 1. Defaults to 0.01.
plot (bool, optional): flag to enable plot. Defaults to 'True'.
Returns:
pd.DataFrame: filtered df with psms within fdr
"""
if score in df.columns:
df['score'] = df[score]
else:
raise ValueError("The specified 'score' {} is not available in 'df'.".format(score))
df['decoy'] = df['sequence'].str[-1].str.islower()
df = filter_score(df)
df = filter_precursor(df)
cval, cutoff = cut_fdr(df, fdr_level, plot)
return cutoff
# Cell
import numpy as np
import pandas as pd
import sys
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
from .fasta import count_missed_cleavages, count_internal_cleavages
def get_ML_features(df: pd.DataFrame, protease: str='trypsin', **kwargs) -> pd.DataFrame:
"""
Uses the specified score in df to filter psms and to apply the fdr_level threshold.
Args:
df (pd.DataFrame): psms table of search results from alphapept.
protease (str, optional): string specifying the protease that was used for proteolytic digestion. Defaults to 'trypsin'.
Returns:
pd.DataFrame: df including additional scores for subsequent ML.
"""
df['decoy'] = df['sequence'].str[-1].str.islower()
df['abs_delta_m_ppm'] = np.abs(df['delta_m_ppm'])
df['naked_sequence'] = df['sequence'].apply(lambda x: ''.join([_ for _ in x if _.isupper()]))
df['n_AA']= df['naked_sequence'].str.len()
df['matched_ion_fraction'] = df['hits']/(2*df['n_AA'])
df['n_missed'] = df['naked_sequence'].apply(lambda x: count_missed_cleavages(x, protease))
df['n_internal'] = df['naked_sequence'].apply(lambda x: count_internal_cleavages(x, protease))
df['x_tandem'] = get_x_tandem_score(df)
return df
def train_RF(df: pd.DataFrame,
exclude_features: list = ['precursor_idx','ion_idx','fasta_index','feature_rank','raw_rank','rank','db_idx', 'feature_idx', 'precursor', 'query_idx', 'raw_idx','sequence','decoy','naked_sequence','target'],
train_fdr_level: float = 0.1,
ini_score: str = 'x_tandem',
min_train: int = 1000,
test_size: float = 0.8,
max_depth: list = [5,25,50],
max_leaf_nodes: list = [150,200,250],
n_jobs: int = -1,
scoring: str = 'accuracy',
plot:bool = False,
random_state: int = 42,
**kwargs) -> (GridSearchCV, list):
"""
Function to train a random forest classifier to separate targets from decoys via semi-supervised learning.
Args:
df (pd.DataFrame): psms table of search results from alphapept.
exclude_features (list, optional): list with features to exclude for ML. Defaults to ['precursor_idx','ion_idx','fasta_index','feature_rank','raw_rank','rank','db_idx', 'feature_idx', 'precursor', 'query_idx', 'raw_idx','sequence','decoy','naked_sequence','target'].
train_fdr_level (float, optional): Only targets below the train_fdr_level cutoff are considered for training the classifier. Defaults to 0.1.
ini_score (str, optional): Initial score to select psms set for semi-supervised learning. Defaults to 'x_tandem'.
min_train (int, optional): Minimum number of psms in the training set. Defaults to 1000.
test_size (float, optional): Fraction of psms used for testing. Defaults to 0.8.
max_depth (list, optional): List of clf__max_depth parameters to test in the grid search. Defaults to [5,25,50].
max_leaf_nodes (list, optional): List of clf__max_leaf_nodes parameters to test in the grid search. Defaults to [150,200,250].
n_jobs (int, optional): Number of jobs to use for parallelizing the gridsearch. Defaults to -1.
scoring (str, optional): Scoring method for the gridsearch. Defaults to'accuracy'.
plot (bool, optional): flag to enable plot. Defaults to 'False'.
random_state (int, optional): Random state for initializing the RandomForestClassifier. Defaults to 42.
Returns:
[GridSearchCV, list]: GridSearchCV: GridSearchCV object with trained RandomForestClassifier. list: list of features used for training the classifier.
"""
if getattr(sys, 'frozen', False):
logging.info('Using frozen pyinstaller version. Setting n_jobs to 1')
n_jobs = 1
features = [_ for _ in df.columns if _ not in exclude_features]
# Setup ML pipeline
scaler = StandardScaler()
rfc = RandomForestClassifier(random_state=random_state) # class_weight={False:1,True:5},
## Initiate scaling + classification pipeline
pipeline = Pipeline([('scaler', scaler), ('clf', rfc)])
parameters = {'clf__max_depth':(max_depth), 'clf__max_leaf_nodes': (max_leaf_nodes)}
## Setup grid search framework for parameter selection and internal cross validation
cv = GridSearchCV(pipeline, param_grid=parameters, cv=5, scoring=scoring,
verbose=0,return_train_score=True,n_jobs=n_jobs)
# Prepare target and decoy df
df['decoy'] = df['sequence'].str[-1].str.islower()
df['target'] = ~df['decoy']
df['score'] = df[ini_score]
dfT = df[~df.decoy]
dfD = df[df.decoy]
# Select high scoring targets (<= train_fdr_level)
df_prescore = filter_score(df)
df_prescore = filter_precursor(df_prescore)
scored = cut_fdr(df_prescore, fdr_level = train_fdr_level, plot=False)[1]
highT = scored[scored.decoy==False]
dfT_high = dfT[dfT['query_idx'].isin(highT.query_idx)]
dfT_high = dfT_high[dfT_high['db_idx'].isin(highT.db_idx)]
# Determine the number of psms for semi-supervised learning
n_train = int(dfT_high.shape[0])
if dfD.shape[0] < n_train:
n_train = int(dfD.shape[0])
logging.info("The total number of available decoys is lower than the initial set of high scoring targets.")
if n_train < min_train:
raise ValueError("There are fewer high scoring targets or decoys than required by 'min_train'.")
# Subset the targets and decoys datasets to result in a balanced dataset
df_training = dfT_high.sample(n=n_train, random_state=random_state).append(dfD.sample(n=n_train, random_state=random_state))
# Select training and test sets
X = df_training[features]
y = df_training['target'].astype(int)
X_train, X_test, y_train, y_test = train_test_split(X.values, y.values, test_size=test_size, random_state=random_state, stratify=y.values)
# Train the classifier on the training set via 5-fold cross-validation and subsequently test on the test set
logging.info('Training & cross-validation on {} targets and {} decoys'.format(np.sum(y_train),X_train.shape[0]-np.sum(y_train)))
cv.fit(X_train,y_train)
logging.info('The best parameters selected by 5-fold cross-validation were {}'.format(cv.best_params_))
logging.info('The train {} was {}'.format(scoring, cv.score(X_train, y_train)))
logging.info('Testing on {} targets and {} decoys'.format(np.sum(y_test),X_test.shape[0]-np.sum(y_test)))
logging.info('The test {} was {}'.format(scoring, cv.score(X_test, y_test)))
feature_importances=cv.best_estimator_.named_steps['clf'].feature_importances_
indices = np.argsort(feature_importances)[::-1][:40]
top_features = X.columns[indices][:40]
top_score = feature_importances[indices][:40]
feature_dict = dict(zip(top_features, top_score))
logging.info(f"Top features {feature_dict}")
# Inspect feature importances
if plot:
import seaborn as sns
g = sns.barplot(y=X.columns[indices][:40],
x = feature_importances[indices][:40],
orient='h', palette='RdBu')
g.set_xlabel("Relative importance",fontsize=12)
g.set_ylabel("Features",fontsize=12)
g.tick_params(labelsize=9)
g.set_title("Feature importance")
plt.show()
return cv, features
def score_ML(df: pd.DataFrame,
trained_classifier: GridSearchCV,
features: list = None,
fdr_level: float = 0.01,
plot: bool = True,
**kwargs) -> pd.DataFrame:
"""
Applies a trained ML classifier to df and uses the ML score to filter psms and to apply the fdr_level threshold.
Args:
df (pd.DataFrame): psms table of search results from alphapept.
trained_classifier (GridSearchCV): GridSearchCV object returned by train_RF.
features (list): list with features returned by train_RF. Defaults to 'None'.
fdr_level (float, optional): fdr level that should be used for filtering. The value should lie between 0 and 1. Defaults to 0.01.
plot (bool, optional): flag to enable plot. Defaults to 'True'.
Returns:
pd.DataFrame: filtered df with psms within fdr
"""
logging.info('Scoring using Machine Learning')
# Apply the classifier to the entire dataset
df_new = df.copy()
df_new['score'] = trained_classifier.predict_proba(df_new[features])[:,1]
df_new = filter_score(df_new)
df_new = filter_precursor(df_new)
cval, cutoff = cut_fdr(df_new, fdr_level, plot)
return cutoff
def filter_with_ML(df: pd.DataFrame,
trained_classifier: GridSearchCV,
features: list = None,
**kwargs) -> pd.DataFrame:
"""
Filters the psms table by using the x_tandem score, no fdr filter.
TODO: Remove redundancy with score functions, see issue: #275
Args:
df (pd.DataFrame): psms table of search results from alphapept.
trained_classifier (GridSearchCV): GridSearchCV object returned by train_RF.
features (list): list with features returned by train_RF. Defaults to 'None'.
Returns:
pd.DataFrame: psms table with an extra 'score' column from the trained_classifier by ML, filtered for no feature or precursor to be assigned multiple times.
"""
logging.info('Filter df with x_tandem score')
# Apply the classifier to the entire dataset
df_new = df.copy()
df_new['score'] = trained_classifier.predict_proba(df_new[features])[:,1]
df_new = filter_score(df_new)
df_new = filter_precursor(df_new)
return df_new
# Cell
import networkx as nx
def assign_proteins(data: pd.DataFrame, pept_dict: dict) -> (pd.DataFrame, dict):
"""
Assign psms to proteins.
This function appends the dataframe with a column 'n_possible_proteins' which indicates how many proteins a psm could be matched to.
It returns the appended dataframe and a dictionary `found_proteins` where each protein is mapped to the psms indices.
Args:
data (pd.DataFrame): psms table of scored and filtered search results from alphapept.
pept_dict (dict): dictionary that matches peptide sequences to proteins
Returns:
pd.DataFrame: psms table of search results from alphapept appended with the number of matched proteins.
dict: dictionary mapping psms indices to proteins.
"""
data = data.reset_index(drop=True)
data['n_possible_proteins'] = data['sequence'].apply(lambda x: len(pept_dict[x]))
unique_peptides = (data['n_possible_proteins'] == 1).sum()
shared_peptides = (data['n_possible_proteins'] > 1).sum()
logging.info(f'A total of {unique_peptides:,} unique and {shared_peptides:,} shared peptides.')
sub = data[data['n_possible_proteins']==1]
psms_to_protein = sub['sequence'].apply(lambda x: pept_dict[x])
found_proteins = {}
for idx, _ in enumerate(psms_to_protein):
idx_ = psms_to_protein.index[idx]
p_str = 'p' + str(_[0])
if p_str in found_proteins:
found_proteins[p_str] = found_proteins[p_str] + [str(idx_)]
else:
found_proteins[p_str] = [str(idx_)]
return data, found_proteins
def get_shared_proteins(data: pd.DataFrame, found_proteins: dict, pept_dict: dict) -> dict:
"""
Assign peptides to razor proteins.
Args:
data (pd.DataFrame): psms table of scored and filtered search results from alphapept, appended with `n_possible_proteins`.
found_proteins (dict): dictionary mapping psms indices to proteins
pept_dict (dict): dictionary mapping peptide indices to the originating proteins as a list
Returns:
dict: dictionary mapping peptides to razor proteins
"""
G = nx.Graph()
sub = data[data['n_possible_proteins']>1]
for i in range(len(sub)):
seq, score = sub.iloc[i][['sequence','score']]
idx = sub.index[i]
possible_proteins = pept_dict[seq]
for p in possible_proteins:
G.add_edge(str(idx), 'p'+str(p), score=score)
connected_groups = np.array([list(c) for c in sorted(nx.connected_components(G), key=len, reverse=True)], dtype=object)
n_groups = len(connected_groups)
logging.info('A total of {} ambigious proteins'.format(len(connected_groups)))
#Solving with razor:
found_proteins_razor = {}
for a in connected_groups[::-1]:
H = G.subgraph(a).copy()
shared_proteins = list(np.array(a)[np.array(list(i[0] == 'p' for i in a))])
while len(shared_proteins) > 0:
neighbors_list = []
for node in shared_proteins:
shared_peptides = list(H.neighbors(node))
if node in G:
if node in found_proteins.keys():
shared_peptides += found_proteins[node]
n_neigbhors = len(shared_peptides)
neighbors_list.append((n_neigbhors, node, shared_peptides))
#Check if we have a protein_group (e.g. they share the same everythin)
neighbors_list.sort()
# Check for protein group
node_ = [neighbors_list[-1][1]]
idx = 1
while idx < len(neighbors_list): #Check for protein groups
if neighbors_list[-idx][0] == neighbors_list[-idx-1][0]: #lenght check
if set(neighbors_list[-idx][2]) == set(neighbors_list[-idx-1][2]): #identical peptides
node_.append(neighbors_list[-idx-1][1])
idx += 1
else:
break
else:
break
#Remove the last entry:
shared_peptides = neighbors_list[-1][2]
for node in node_:
shared_proteins.remove(node)
for _ in shared_peptides:
if _ in H:
H.remove_node(_)
if len(shared_peptides) > 0:
if len(node_) > 1:
node_ = tuple(node_)
else:
node_ = node_[0]
found_proteins_razor[node_] = shared_peptides
return found_proteins_razor
def get_protein_groups(data: pd.DataFrame, pept_dict: dict, fasta_dict: dict, decoy = False, callback = None, **kwargs) -> pd.DataFrame:
"""
Function to perform protein grouping by razor approach.
This function calls `assign_proteins` and `get_shared_proteins`.
ToDo: implement callback for solving
Each protein is indicated with a p -> protein index
Args:
data (pd.DataFrame): psms table of scored and filtered search results from alphapept.
pept_dict (dict): A dictionary mapping peptide indices to the originating proteins as a list.
fasta_dict (dict): A dictionary with fasta sequences.
decoy (bool, optional): Defaults to False.
callback (bool, optional): Defaults to None.
Returns:
pd.DataFrame: alphapept results table now including protein level information.
"""
data, found_proteins = assign_proteins(data, pept_dict)
found_proteins_razor = get_shared_proteins(data, found_proteins, pept_dict)
report = data.copy()
assignment = np.zeros(len(report), dtype=object)
assignment[:] = ''
assignment_pg = assignment.copy()
assignment_idx = assignment.copy()
assignment_idx[:] = ''
razor = assignment.copy()
razor[:] = False
if decoy:
add = 'REV__'
else:
add = ''
for protein_str in found_proteins.keys():
protein = int(protein_str[1:])
protein_name = add+fasta_dict[protein]['name']
indexes = [int(_) for _ in found_proteins[protein_str]]
assignment[indexes] = protein_name
assignment_pg[indexes] = protein_name
assignment_idx[indexes] = str(protein)
for protein_str in found_proteins_razor.keys():
indexes = [int(_) for _ in found_proteins_razor[protein_str]]
if isinstance(protein_str, tuple):
proteins = [int(_[1:]) for _ in protein_str]
protein_name = ','.join([add+fasta_dict[_]['name'] for _ in proteins])
protein = ','.join([str(_) for _ in proteins])
else:
protein = int(protein_str[1:])
protein_name = add+fasta_dict[protein]['name']
assignment[indexes] = protein_name
assignment_pg[indexes] = protein_name
assignment_idx[indexes] = str(protein)
razor[indexes] = True
report['protein'] = assignment
report['protein_group'] = assignment_pg
report['razor'] = razor
report['protein_idx'] = assignment_idx
return report
def perform_protein_grouping(data: pd.DataFrame, pept_dict: dict, fasta_dict: dict, **kwargs) -> pd.DataFrame:
"""
Wrapper function to perform protein grouping by razor approach
Args:
data (pd.DataFrame): psms table of scored and filtered search results from alphapept.
pept_dict (dict): A dictionary mapping peptide indices to the originating proteins as a list.
fasta_dict (dict): A dictionary with fasta sequences.
Returns:
pd.DataFrame: alphapept results table now including protein level information.
"""
data_sub = data[['sequence','score','decoy']]
data_sub_unique = data_sub.groupby(['sequence','decoy'], as_index=False).agg({"score": "max"})
targets = data_sub_unique[data_sub_unique.decoy == False]
targets = targets.reset_index(drop=True)
protein_targets = get_protein_groups(targets, pept_dict, fasta_dict, **kwargs)
protein_targets['decoy_protein'] = False
decoys = data_sub_unique[data_sub_unique.decoy == True]
decoys = decoys.reset_index(drop=True)
protein_decoys = get_protein_groups(decoys, pept_dict, fasta_dict, decoy=True, **kwargs)
protein_decoys['decoy_protein'] = True
protein_groups = protein_targets.append(protein_decoys)
protein_groups_app = protein_groups[['sequence','decoy','protein','protein_group','razor','protein_idx','decoy_protein','n_possible_proteins']]
protein_report = pd.merge(data,
protein_groups_app,
how = 'inner',
on = ['sequence','decoy'],
validate="many_to_one")
return protein_report
# Cell
ion_dict = {}
ion_dict[0] = ''
ion_dict[1] = '-H20'
ion_dict[2] = '-NH3'
def get_ion(i: int, df: pd.DataFrame, ions: pd.DataFrame)-> (list, np.ndarray):
"""
Helper function to extract the ion-hits for a given DataFrame index.
This function extracts the hit type and the intensities.
E.g.: ['b1','y1'], np.array([10,20]).
Args:
i (int): Row index for the DataFrame
df (pd.DataFrame): DataFrame with PSMs
ions (pd.DataFrame): DataFrame with ion hits
Returns:
list: List with strings that describe the ion type.
np.ndarray: Array with intensity information
"""
start = df['ion_idx'].iloc[i]
end = df['n_ions'].iloc[i]+start
ion = [('b'+str(int(_))).replace('b-','y') for _ in ions.iloc[start:end]['ion_index']]
losses = [ion_dict[int(_)] for _ in ions.iloc[start:end]['ion_type']]
ion = [a+b for a,b in zip(ion, losses)]
ints = ions.iloc[start:end]['ion_int'].astype('int').values
return ion, ints
# Cell
def ecdf(data:np.ndarray)-> (np.ndarray, np.ndarray):
"""Compute ECDF.
Helper function to calculate the ECDF of a score distribution.
This is later used to normalize the score from an arbitrary range to [0,1].
Args:
data (np.ndarray): Array containting the score.
Returns:
np.ndarray: Array containg the score, sorted.
np.ndarray: Noramalized counts.
"""
x = np.sort(data)
n = x.size
y = np.arange(1, n+1) / n
return (x,y)
# Cell
import os
from multiprocessing import Pool
from scipy.interpolate import interp1d
from typing import Callable, Union
#This function has no unit test and is covered by the quick_test
def score_hdf(to_process: tuple, callback: Callable = None, parallel: bool=False) -> Union[bool, str]:
"""Apply scoring on an hdf file to be called from a parallel pool.
This function does not raise errors but returns the exception as a string.
Args:
to_process: (int, dict): Tuple containg a file index and the settings.
callback: (Callable): Optional callback
parallel: (bool): Parallel flag (unused).
Returns:
Union[bool, str]: True if no eo exception occured, the exception if things failed.
"""
logging.info('Calling score_hdf')
try:
index, settings = to_process
#This part collects all ms_data files that belong to one sample.
exp_name = sorted(settings['experiment']['fraction_dict'].keys())[index]
shortnames = settings['experiment']['fraction_dict'].get(exp_name)
file_paths = settings['experiment']['file_paths']
relevant_files = []
for shortname in shortnames:
for file_path in file_paths:
if shortname in file_path:
relevant_files.append(file_path)
break
ms_file_names = [os.path.splitext(x)[0]+".ms_data.hdf" for x in relevant_files]
skip = False
all_dfs = []
ms_file2idx = {}
idx_start = 0
for ms_filename in ms_file_names:
ms_file_ = alphapept.io.MS_Data_File(ms_filename, is_overwritable=True)
try:
df = ms_file_.read(dataset_name='second_search')
logging.info('Found second search psms for scoring.')
except KeyError:
try:
df = ms_file_.read(dataset_name='first_search')
logging.info('No second search psms for scoring found. Using first search.')
except KeyError:
df = | pd.DataFrame() | pandas.DataFrame |
import os
import re
import json
import abc
import warnings
from typing import MutableMapping, List, Union
from functools import reduce
from enum import Enum
import pandas as pd
import numpy as np
from scipy import sparse
import loompy as lp
from loomxpy import __DEBUG__
from loomxpy._specifications import (
ProjectionMethod,
LoomXMetadataEmbedding,
LoomXMetadataClustering,
LoomXMetadataCluster,
LoomXMetadataClusterMarkerMetric,
)
from loomxpy._s7 import S7
from loomxpy._errors import BadDTypeException
from loomxpy._hooks import WithInitHook
from loomxpy._matrix import DataMatrix
from loomxpy.utils import df_to_named_matrix, compress_encode
def custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return str(msg) + "\n"
warnings.formatwarning = custom_formatwarning
##########################################
# MODES #
##########################################
class ModeType(Enum):
NONE = "_"
RNA = "rna"
class Mode(S7):
def __init__(self, mode_type: ModeType, data_matrix: DataMatrix):
"""
constructor for Mode
"""
self._mode_type = mode_type
# Data Matrix
self._data_matrix = data_matrix
# Global
self._global_attrs = GlobalAttributes(mode=self)
# Features
self._feature_attrs = FeatureAttributes(mode=self)
self._fa_annotations = FeatureAnnotationAttributes(mode=self)
self._fa_metrics = FeatureMetricAttributes(mode=self)
# Observations
self._observation_attrs = ObservationAttributes(mode=self)
self._oa_annotations = ObservationAnnotationAttributes(mode=self)
self._oa_metrics = ObservationMetricAttributes(mode=self)
self._oa_embeddings = ObservationEmbeddingAttributes(mode=self)
self._oa_clusterings = ObservationClusteringAttributes(mode=self)
@property
def X(self):
return self._data_matrix
@property
def g(self):
return self._global_attrs
@property
def f(self):
return self._feature_attrs
@property
def o(self):
return self._observation_attrs
def export(
self,
filename: str,
output_format: str,
title: str = None,
genome: str = None,
compress_metadata: bool = False,
cluster_marker_metrics: List[LoomXMetadataClusterMarkerMetric] = [
{
"accessor": "avg_logFC",
"name": "Avg. logFC",
"description": f"Average log fold change from Wilcoxon test",
"threshold": 0,
"threshold_method": "lte_or_gte", # lte, lt, gte, gt, lte_or_gte, lte_and_gte
},
{
"accessor": "pval",
"name": "Adjusted P-Value",
"description": f"Adjusted P-Value from Wilcoxon test",
"threshold": 0.05,
"threshold_method": "lte", # lte, lt, gte, gt, lte_or_gte, lte_and_gte
},
],
):
"""
Export this LoomX object to Loom file
Parameters
---------
cluster_marker_metrics: dict, optional
List of dict (ClusterMarkerMetric) containing metadata of each metric available for the cluster markers.
Expects each metric to be of type float.
Return
------
None
"""
if output_format == "scope_v1":
#
_feature_names = self._data_matrix._feature_names
# Init
_row_attrs: MutableMapping = {}
_col_attrs: MutableMapping = {}
_global_attrs: MutableMapping = {
"title": os.path.splitext(os.path.basename(filename))[0]
if title is None
else title,
"MetaData": {
"annotations": [],
"metrics": [],
"embeddings": [],
"clusterings": [],
},
"Genome": genome,
}
# Add row attributes (in Loom specifications)
for _attr_key, _attr in self._feature_attrs:
_row_attrs[_attr_key] = _attr.values
# Add columns attributes (in Loom specifications)
_default_embedding = None
_embeddings_X = pd.DataFrame(index=self._data_matrix._observation_names)
_embeddings_Y = pd.DataFrame(index=self._data_matrix._observation_names)
_clusterings = pd.DataFrame(index=self._data_matrix._observation_names)
for _attr_key, _attr in self._observation_attrs:
if _attr.attr_type.value == AttributeType.ANNOTATION.value:
# Categorical not valid, ndarray is required
_col_attrs[_attr_key] = np.asarray(_attr.values)
_global_attrs["MetaData"]["annotations"].append(
{
"name": _attr_key,
"values": list(
map(
lambda x: x.item()
if type(x).__module__ == "numpy"
else x,
sorted(
np.unique(_attr.values),
reverse=False,
),
)
),
}
)
if _attr.attr_type.value == AttributeType.METRIC.value:
_col_attrs[_attr_key] = np.asarray(_attr.values)
_global_attrs["MetaData"]["metrics"].append({"name": _attr_key})
if _attr.attr_type.value == AttributeType.EMBEDDING.value:
_attr: EmbeddingAttribute
_data = _attr.data.iloc[:, 0:2]
_data.columns = ["_X", "_Y"]
# Number of embeddings (don't count the default embedding since this will be use to determine the id of the embedding)
_num_embeddings = len(
list(
filter(
lambda x: int(x["id"]) != -1,
_global_attrs["MetaData"]["embeddings"],
)
)
)
_embedding_id = (
_attr.id
if _attr.id is not None
else (
-1
if _attr._default
else 0
if _num_embeddings == 0
else _num_embeddings + 1
)
)
_embeddings_X = pd.merge(
_embeddings_X,
_data["_X"]
.to_frame()
.rename(columns={"_X": str(_embedding_id)})
.astype("float32"),
left_index=True,
right_index=True,
)
_embeddings_Y = pd.merge(
_embeddings_Y,
_data["_Y"]
.to_frame()
.rename(columns={"_Y": str(_embedding_id)})
.astype("float32"),
left_index=True,
right_index=True,
)
_global_attrs["MetaData"]["embeddings"].append(
{
"id": str(
_embedding_id
), # TODO: type not consistent with clusterings
"name": _attr.name,
}
)
if _attr.default:
_default_embedding = _data
if _attr.attr_type.value == AttributeType.CLUSTERING.value:
_attr: ClusteringAttribute
if _attr.name is None:
raise Exception(
f"The clustering with key '{_attr.key}' does not have a name. This is required when exporting to SCope."
)
# Clustering
_col_name = (
_attr.data.columns[0]
if isinstance(_attr, pd.DataFrame)
else _attr.name
)
_num_clusterings = len(_global_attrs["MetaData"]["clusterings"])
_clustering_id = (
0 if _num_clusterings == 0 else _num_clusterings + 1
)
_clustering_data = (
_attr.data.rename(columns={_col_name: str(_clustering_id)})
if isinstance(_attr.data, pd.DataFrame) # pd.DataFrame
else _attr.data.rename(str(_clustering_id)) # pd.Series
)
_clusterings = pd.merge(
_clusterings,
_clustering_data,
left_index=True,
right_index=True,
)
_clustering_md = LoomXMetadataClustering.from_dict(
{
"id": _clustering_id,
**_attr.metadata.to_dict(),
}
).to_dict()
# Markers
# Dictionary of DataFrame (value) containing the the values of the different metric (key) across features (rows) and for each cluster (columns)
_cluster_markers_dict = {}
if cluster_marker_metrics:
has_cluster_markers = [
cluster.markers is not None
for cluster in _attr._metadata.clusters
]
if not all(has_cluster_markers):
continue
# Init DataFrame mask of genes representing markers
cluster_markers = pd.DataFrame(
index=_feature_names,
columns=[str(x.id) for x in _attr._metadata.clusters],
).fillna(0, inplace=False)
# Init DataFrame containing metric valuess
_cluster_marker_metric: LoomXMetadataClusterMarkerMetric
for _cluster_marker_metric in cluster_marker_metrics:
_cluster_markers_dict[
_cluster_marker_metric["accessor"]
] = pd.DataFrame(
index=_feature_names,
columns=[str(x.id) for x in _attr._metadata.clusters],
).fillna(
0, inplace=False
)
_cluster: LoomXMetadataCluster
for _cluster in _attr._metadata.clusters:
_features_df = pd.Series(
_cluster.markers.index.values,
index=_cluster.markers.index.values,
)
# Dictionary of Series (value) containing the values of the different metric (key) for the current cluster
_cluster_marker_metric_values_dict = {}
# Dictionary of Series (value) containing a boolean mask of the features that pass the filter criteria for the different metrics (key)
_cluster_marker_metric_masks_dict = {}
_cluster_marker_metric: LoomXMetadataClusterMarkerMetric
for _cluster_marker_metric in cluster_marker_metrics:
# Check if metric exists in markers table
if (
_cluster_marker_metric["accessor"]
not in _cluster.markers.columns
):
raise Exception(
f"The cluster_marker_metrics argument was not properly defined. Missing {_cluster_marker_metric['accessor']} metric in the markers table. Available columns in markers table are f{''.join(_cluster.markers.columns)}."
)
cluster_marker_metric_values = pd.Series(
_cluster.markers[
_cluster_marker_metric["accessor"]
].values,
index=_cluster.markers.index.values,
).astype(float)
if | pd.isnull(cluster_marker_metric_values) | pandas.isnull |
# Author: <NAME>
# Date: 11/08/2018
# Git-Hub: Data-is-Life
import numpy as np
from ast import literal_eval
from pandas import DataFrame, to_datetime, to_timedelta, to_numeric
'''All functions used to clean up game log from Chess.com'''
def custom_round(x, base=20):
'''Helps to round digits'''
return int(base * round(float(x) / base))
def initial_chess_data(filename):
'''First function:
Input:
filename = Game log from Chess.com
Cleans the file for any unnessacery lines from the game log file
Returns:
icd_t = All game information as a text'''
with open(filename, 'r+') as file:
icd_l = file.readlines()
icd_t = " ".join(num for num in icd_l if len(num) > 4)
return icd_t
def chess_data_cleanup(chess_text):
'''Second function:
Input:
chess_text = All game information as a text
Creates a df where one row is for game information, the following row
is moves in that game.
Returns:
df = dataframe with game information and moves'''
chess_text = chess_text.replace('[', '')
chess_text = chess_text.replace(']', '')
chess_text = chess_text.replace('\n', ' ')
chess_text = chess_text.replace(' ', ' ').replace(' ', ' ')
chess_text = chess_text.replace('... ', 'b":"').replace('. ', 'w":"')
chess_text = chess_text.replace('", ', '", "').replace(' {%clk ', '^')
chess_text = chess_text.replace(' {%clk', '^')
chess_text = chess_text.replace('}', '",').replace('", ', '", "')
chess_text = chess_text.replace(' Site "Chess.com" D', ', D')
chess_text = chess_text.replace('Event ', '}~{"Event":')
chess_text = chess_text.replace('", Date ', '", "Date": ')
chess_text = chess_text.replace('" Result ', '", "Result": ')
chess_text = chess_text.replace('" Round ', '", "Round": ')
chess_text = chess_text.replace('" White ', '", "White": ')
chess_text = chess_text.replace('" Black ', '", "Black": ')
chess_text = chess_text.replace('" WhiteElo ', '", "WhiteElo": ')
chess_text = chess_text.replace('" TimeControl ', '", "TimeControl": ')
chess_text = chess_text.replace('" EndTime ', '", "EndTime": ')
chess_text = chess_text.replace('" BlackElo ', '", "BlackElo": ')
chess_text = chess_text.replace('" Termination ', '", "Termination": ')
chess_text = chess_text.replace(' PST', '')
chess_text = chess_text.replace(' PDT', '')
chess_text = chess_text.replace(' ', ' ').replace(' ', ' ')
chess_text = chess_text.replace(' ', ' ')
chess_text = chess_text.replace('" 1w":[', '"}~{"1w":[')
chess_text = chess_text.replace('" 1w":"', '"}~{"1w":"')
chess_text = chess_text.replace(', "1/2-1/2 }~{', '}~{')
chess_text = chess_text.replace(', "1-0 }~{', '}~{')
chess_text = chess_text.replace(', "0-1 }~{', '}~{')
chess_text = chess_text.replace(', "1-0 ', '}').replace(', "}', '}')
chess_text = chess_text.replace(', "1-0', '}').replace(', "0-1', '}')
# Using '~' as a separator
cl = ''.join([num for num in chess_text]).split("~")
# Named the only column "a", so it is easier in the next function
df = DataFrame(cl, columns=['a'])
# If length of any string is less than 3, it is not needed
df = df[df['a'].str.len() > 3]
return df
def data_cleaning_1(df):
'''Third function:
Input:
df = df with all information
Creates two dfs. First df is all games information. Second df is for all
the moves in those games.
Output:
m_df = moves df with all the moves
d_df = information df with all the game information'''
c_df = DataFrame(data=list(df['a'].apply(literal_eval)))
c_df['Date'].fillna(method='ffill', inplace=True)
c_df['EndTime'].fillna(method='ffill', inplace=True)
# Convert all the dates and time to dates and times
c_df.loc[:, 'date_time'] = to_datetime(
c_df['Date'] + ' ' + c_df['EndTime'])
c_df.loc[:, 'Date'] = to_datetime(c_df['Date'])
c_df.loc[:, 'EndTime'] = to_timedelta(c_df['EndTime'])
# Split moves to a new df drop columns not needed
m_df = c_df[c_df['White'].isnull()].copy()
m_df.sort_values('date_time', inplace=True)
m_df.reset_index(inplace=True)
m_df.drop(columns=[
'index', 'Date', 'White', 'Black', 'Result', 'WhiteElo',
'BlackElo', 'TimeControl', 'EndTime', 'Termination', 'date_time',
'Round', 'Event'], inplace=True)
# Split game information to a new df
d_df = c_df[c_df['1w'].isnull()].copy()
d_df = d_df[['Date', 'White', 'Black', 'Result', 'WhiteElo', 'BlackElo',
'TimeControl', 'EndTime', 'Termination', 'date_time']]
d_df.sort_values('date_time', inplace=True)
d_df.reset_index(inplace=True)
d_df.drop(columns=['index', 'date_time'], inplace=True)
# Rename all columns to lower case and insert "_" to split words
d_df.rename(columns={
'Date': 'date', 'White': 'white', 'Black': 'black',
'Result': 'result', 'WhiteElo': 'white_elo', 'BlackElo': 'black_elo',
'TimeControl': 'game_time', 'EndTime': 'end_time',
'Termination': 'termination'}, inplace=True)
d_df.loc[:, 'num_moves'] = m_df.count(axis=1)
d_df.loc[:, 'white_elo'] = | to_numeric(d_df['white_elo']) | pandas.to_numeric |
"""
Loading results, formatting and adding columns
result is the raw result metric computed from predictions at the end the benchmark. For classification problems, it is usually auc for binomial classification and logloss for multinomial classification.
score ensures a standard comparison between tasks: higher is always better.
norm_score is a normalization of score on a [0, 1] scale, with {{zero_one_refs[0]}} score as 0 and {{zero_one_refs[1]}} score as 1.
imp_result and imp_score for imputed results/scores. Given a task and a framework:
if all folds results/scores are missing, then no imputation occurs, and the result is nan for each fold.
if only some folds results/scores are missing, then the missing result is imputed by the {{impute_missing_with}} result for this fold.
"""
import numpy as np
import pandas as pd
import report.config as config
from .metadata import load_dataset_metadata
from .util import Namespace, display
def load_results(files):
return pd.concat([pd.read_csv(file) for file in files], ignore_index=True)
def task_prop(row, metadata, prop):
return getattr(metadata.get(row.task), prop)
def impute_result(row, results_df, res_col='result', imp_framework=None, imp_value=None):
if pd.notna(row[res_col]):
return row[res_col]
# if all folds are failed or missing, don't impute
if pd.isna(results_df.loc[(results_df.task==row.task)
&(results_df.framework==row.framework)][res_col]).all():
return np.nan
if imp_framework is not None:
# impute with ref framework corresponding value
return (results_df.loc[(results_df.framework==imp_framework)
&(results_df.task==row.task)
&(results_df.fold==row.fold)][res_col]
.item())
return imp_value
def imputed(row):
return | pd.isna(row.result) | pandas.isna |
"""
Python tools and algorithms gathered through out the development projects and tutorials.
Sections:
1. File Read/Write/Convert/Save Operations
2. Pandas Utils
3. Path Operations for File/Folder/System
4. Algorithms for Hierarchical Structures
5. Utility functions for xlrd library and read_spir function
"""
import os
import re
from tkinter import Tk, filedialog, messagebox
import pandas as pd
import xlrd as xl
from advanced_tools.IO_path_utils import write_lol_to_csv
from advanced_tools.pandas_utils import (_convert_empty_cells, _find_pattern,
_get_cell_range,
_get_horizontal_range,
_get_sheet_dimension,
convert_to_hyperlink, get_filepaths,
get_hierarchy_as_list)
##############################################################################################
### SPIR Codification Program and Utilities
##############################################################################################
def codify_spir(
path_to_spir='ENI_SPIR_Ref_form.xlsx', raise_error=True, tag_cell=(4, 2), mm_cell=(10, 29),
):
"""
Read SPIR forms for ENI Goliat Project or another companies.
First tag cell is manual input while last_tag_cell, first/last MM cell are found based
on script. Scripts are stored in utils.py
Keyword Arguments:
path_to_spir {str} -- Full path to SPIR file (default: {'ENI_SPIR_Ref_form.xlsx'})
tag_cell {tuple} -- First Tag Cell (default: {(4, 2)})
mm_cell {tuple} -- First Material Cell, this is used for overwriting if find_mm_column
function is not returning right value (default: {(10, 20)})
raise_error {bool} -- Raised or printed errors getting silenced for multiple SPIRs,
error handling can be done in wrapper structure (default: {False})
"""
wb = xl.open_workbook(path_to_spir)
print(f"Codification started for: {os.path.split(path_to_spir)[1]}")
# From this point and on it is Platform Dependent ------------>
try:
spir_sheet_name = [
i for i in wb.sheet_names() if re.match(r"^spir$", i.strip(), re.IGNORECASE)][0]
spir_sheet = wb.sheet_by_name(spir_sheet_name)
_convert_empty_cells(spir_sheet)
except IndexError:
raise (NameError("There is no SPIR spreadsheet for found in the excel file"))
try:
cover_page = [i for i in wb.sheet_names() if re.match("front|cover", i, re.IGNORECASE)][0]
coversheet = wb.sheet_by_name(cover_page)
_convert_empty_cells(coversheet)
spir_name = list(set(_find_pattern(coversheet, pattern=r".+-MC-.+")))[0]
except IndexError as err:
spir_name = os.path.split(path_to_spir)[1]
if raise_error:
print(f"{os.path.split(path_to_spir)[1]} : {err}")
# Set reference cells, xlrd uses zero index row and column reference
# xlrd works like range(x,y) function for col_values so increase last value by 1.
ftc = tag_cell # First tag cell coordinate
# ltc = (4, 4) # Last tag cell coordinate, kept for overwriting purposes
# fmc = (10, 26) # First material cell coordinate, kept for overwriting purposes
# lmc = (42, 26) # Last material cell coordinate, kept for overwriting purposes
# Calculate number of spare parts in the SPIR form
fmc = _find_mm_column(spir_sheet, first_mm=mm_cell)
number_of_spares = len(spir_sheet.col_values(colx=fmc[1], start_rowx=fmc[0]))
ltc = _get_horizontal_range(spir_sheet, row=ftc[0], start_col=ftc[1])['last_cell']
lmc = (fmc[0] + number_of_spares - 1, fmc[1]) # Last material cell coordinate
fqc = (fmc[0], ftc[1]) # First quantity cell
lqc = (lmc[0], ltc[1]) # Last quantity cell
# From this point and on it is Platform Independent ------------>
# Read tag numbers as a simple list, row values works like range function so +1 on column
tags = spir_sheet.row_values(rowx=ftc[0], start_colx=ftc[1], end_colx=ltc[1] + 1)
# Create Tag-Spare quantity matrix table ($C$7:~)
# Return two level nested list i.e. (row > columns)
qty_tbl_rng = _get_cell_range(spir_sheet, fqc[0], fqc[1], lqc[0], lqc[1])
qty_tbl = list(map(lambda x: list(map(lambda y: y.value, x)), qty_tbl_rng))
# Key Algorithm 1
# Create tag_table matrix using tag-spare quantity range (qty_tbl) ("C7:~")
# Return three level nested list i.e. (table-wrapper > row > columns)
tag_tbl = []
ctr_row1 = 0
while ctr_row1 < number_of_spares:
ctr_col1 = 0
temp_col_list = []
while ctr_col1 < len(tags):
if qty_tbl[ctr_row1][ctr_col1] is not None:
temp_col_list.append(tags[ctr_col1])
else:
temp_col_list.append(None)
ctr_col1 += 1
tag_tbl.append(temp_col_list)
ctr_row1 += 1
# Filter None from tables, None.__ne__ to keep other falsify values such as 0, [], {} etc.
tag_tbl = list(map(lambda x: list(filter(None.__ne__, x)), tag_tbl))
qty_tbl = list(map(lambda x: list(filter(None.__ne__, x)), qty_tbl))
# Create material number list (simple list)
mat_tbl_rng = _get_cell_range(spir_sheet, fmc[0], fmc[1], lmc[0], lmc[1])
mat_tbl_rng_value = [cell.value for row in mat_tbl_rng for cell in row]
mat_tbl_rng_value = [i.strip().strip("'") if i is not None else i for i in mat_tbl_rng_value]
# Replace trailng quote at the end of MM number
pattern_mm = re.compile(r"[0-9]{15,20}", re.UNICODE)
try:
# First fill na values in mat_tbl with 999999... and then regex match the material
mat_tbl = [i if i is not None else "99999999999999999999" for i in mat_tbl_rng_value]
mat_tbl = list(map(lambda x: re.search(pattern_mm, x).group(0), mat_tbl))
except (TypeError, AttributeError) as err:
mat_tbl = [i if i is not None else "99999999999999999999" for i in mat_tbl_rng_value]
if raise_error:
print("Error while looking for material number regex match: ", err)
# print("Some material number has wrong syntax, needs to be checked")
# Validate lenght of tag, qty and material lists
if len(tag_tbl) == len(mat_tbl) == len(qty_tbl):
max_row_ctr = len(tag_tbl)
else:
# Python 3.6 new feature 'f string' is used
raise (IndexError(
f"""
Inconsistent table!
len(tag_tbl)==len(mat_tbl)==len(qty_tbl) condition is not confirmed
Length of Tag table: {len(tag_tbl)}
Length of Qty table: {len(qty_tbl)}
Length of Mat table: {len(mat_tbl)}
"""
))
# Key Algorithm 2
# Replace any char other than hyphen around tag.
# Split tag numbers written in same cell using ':;\n' separator.
pattern = re.compile(r'[a-zA-Z0-9-]+', re.UNICODE)
tag_tbl = list(map(lambda x: list(map(lambda y: re.findall(pattern, y), x)), tag_tbl))
# Key Algorithm 3
# Zip Tag number with material number and specified quantity as list of tuples of 3
zipped_data = []
ctr_row2 = 0
while ctr_row2 < max_row_ctr:
ctr_col2 = 0
for i in tag_tbl[ctr_row2]:
if len(i) == 1:
zipped_data.append(
(i[0], qty_tbl[ctr_row2][ctr_col2], mat_tbl[ctr_row2], spir_name)
)
ctr_col2 += 1
else:
for j in i:
zipped_data.append(
(j, qty_tbl[ctr_row2][ctr_col2], mat_tbl[ctr_row2], spir_name)
)
ctr_col2 += 1
ctr_row2 += 1
output_folder = os.path.join(os.path.split(path_to_spir)[0], "CodifiedFilesResults")
if os.path.isdir(output_folder):
pass
else:
os.makedirs(output_folder)
tag_mat_qty_output = os.path.join(output_folder, "Tag_Mat_Qty.csv")
spare_detail_output = os.path.join(output_folder, "Sparepart_Details.csv")
write_lol_to_csv(
output_csv=tag_mat_qty_output,
headers=['Tag', 'Quantity', 'EniMM', 'SpirNo'], data=zipped_data, seperator=";")
# Read from Spare part unit type to last column as a dataframe with util function
spir_detail_export = _create_mm_table(
sheet_obj=spir_sheet, srow=fmc[0], scol=fmc[1] - 19, erow=lmc[0], ecol=lmc[1] + 6
)
spir_detail_export['SpirNo'] = spir_name
write_lol_to_csv(
output_csv=spare_detail_output,
headers=spir_detail_export.columns.tolist(),
data=spir_detail_export.values.tolist(), seperator=";")
os.rename(path_to_spir, os.path.join(output_folder, os.path.split(path_to_spir)[1]))
def codify_multiple_spir(tag_cell=(4, 2), mm_cell=(10, 29)):
window = Tk()
window.wm_withdraw()
folder = filedialog.askdirectory(title='Please choose SPIR folder to codify')
# filenames is obtained with os.scandir, because subfolder contains output files.
fnames = [
i.path for i in os.scandir(folder)
if os.path.splitext(i.path)[1].lower() in ['.xls', '.xlsx', '.xlsm'] and "$" not in i.path
]
if os.path.isfile(os.path.join(folder, '__Quality Report for Updated SPIR(s)__.xlsx')):
pass
else:
# quality_assurance_check(folder)
messagebox.showinfo(
title="SPIR Quality Assurance",
message="Consider checking SPIR qualities with the aid of quality_assurance_check()",
detail=""
)
for i in fnames:
try:
codify_spir(path_to_spir=i, tag_cell=tag_cell, mm_cell=mm_cell, raise_error=False)
except Exception as err:
spir_errors = os.path.join(folder, 'SPIR_ERRORs.csv')
with open(spir_errors, "a", encoding='utf-8') as report:
report.write(os.path.split(i)[1]+";"+str(err) + "\n")
continue
messagebox.showinfo(
title="Complete",
message="Done! For possible errors check 'Quality Report' and 'Unstructured_SPIRs.txt'",
detail=""
)
def _find_mm_column(sheet_obj, pattern=r"^[0-9]{15,20}", first_mm=(None, None)):
"""
Find MM columns with the help of regex pattern
Arguments:
sheet_obj {xlrd worksheet object}
Keyword Arguments:
pattern {regexp} -- (default: {r"^[0-9]{15,20}"})
first_mm {tuple} -- Fallback value for first material number cell in case of unsuccessful
parsing (default: {None})
Returns:
tuple -- Tuple of cell cordinates
"""
row_col_list = []
for r in range(0, sheet_obj.nrows):
for c in range(0, sheet_obj.ncols):
if re.search(pattern, str(sheet_obj.cell_value(r, c))):
row_col_list.append((r, c))
seen = set()
dups = set()
for r, c in row_col_list:
if c in seen:
dups.add(c)
seen.add(c)
try:
column = max(dups)
row = min([r for r, c in row_col_list if c == column])
return (row, column)
except (TypeError, ValueError):
print("Issue: MM number can't be fetched by find_mm_column method")
return first_mm
finally:
pass
def _create_mm_table(sheet_obj, srow, scol, erow, ecol):
"""Get MM table, by putting mm number as index.
Arguments:
sheet_obj {xlrd worksheet object}
srow {int} -- Start row for file
scol {int} -- Start column for file
erow {int} -- End row for file
ecol {int} -- End column for file
Returns:
pandas Dataframe
"""
table_columns = [
"SpareUnitCode", "SparePartDescription", "LongText", "DetailDocumentNo",
"DetailDocumentItemRef", "Material", "SupplierPartNo", "ManufacturerPartNo",
"ManufacturerName", "SupplierRecommCommQty", "EngineeringRecommCommQty", "OrderedCommQty",
"SupplierRecommOperationalQty", "EngineeringRecommOperationalQty", "OrderedOperationalQty",
"SupplierRecommCapitalQty", "EngineeringRecommCapitalnQty", "OrderedCapitalQty",
"MeasureUnit", "EniMM", "AchillesCode", "BatchManagement", "SerialNo", "UnitPriceNOK",
"OperatinalSpareDeliveryTimeInMonth", "PreviouslyDelivered"
]
# Read from Spare part unit type to last column
range_for_df = _get_cell_range(
sheet_obj, start_row=srow, start_col=scol, end_row=erow, end_col=ecol
)
# Convert range to xlrd values
range_for_df = list(map(lambda x: list(map(lambda y: y.value, x)), range_for_df))
# Read as Dataframe
df = pd.DataFrame(range_for_df, columns=table_columns)
# Pandas method for extracting regex match from column
df['EniMM'] = df['EniMM'].str.extract("([0-9]{15,20})", expand=False)
df['EniMM'] = df['EniMM'].fillna('99999999999999999999')
return df
def quality_assurance_check(path_to_folder=r"./ENI_test_spirs", use_relative_path=True):
"""Use Utility functions to validate quality of SPIRs.
Keyword Arguments:
path_to_folder {path} -- Path to SPIR folder (default: {r"./ENI_test_spirs"})
"""
fnames = get_filepaths(path_to_folder, file_type=['.xls', '.xlsx', '.xlsm'])
fnames = [i for i in fnames if "Original" not in i]
report_list = []
for file in fnames:
wb = xl.open_workbook(file)
try:
spir_name = [
i for i in wb.sheet_names() if re.match(r"^spir$", i.strip(), re.IGNORECASE)][0]
spir_sheet = wb.sheet_by_name(spir_name)
spirname = _get_sheet_dimension(spir_sheet)['spirname']
max_row_col = _get_sheet_dimension(
spir_sheet)['maxrow'], _get_sheet_dimension(spir_sheet)['maxcol']
material_row_col = _find_mm_column(spir_sheet)
except IndexError:
spirname = 'NoSpirSheet'
max_row_col = ('NoSpirSheet', 'NoSpirSheet')
material_row_col = ('NoSpirSheet', 'NoSpirSheet')
report_header = [
"FileName", "SpirSheet", "LastCellRow", "LastCellCol", "MaterialRow", "MaterialCol",
"Link"
]
report_list.append(
[
os.path.split(file)[1], spirname, max_row_col[0], max_row_col[1],
material_row_col[0], material_row_col[1], file
]
)
df = | pd.DataFrame(data=report_list, columns=report_header) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Ce script traite les erreurs de correspondance d'OSRM.
Ensuite, les distances entre chaque match sont calcules en
suivant le reseau.
De ces distances sont extraites les vitesses entre chaque match.
Enfin, les vitesses pour chaque segment sont attribues.
"""
__authors__ = ("<NAME>")
__contact__ = ("<EMAIL>")
__date__ = "05/03/19"
__version__ = "47"
# importer les lib
import sys
sys.path.append("C:\SYNC\SYNCulb\Memoire\DB\Python\Fonctions")
import json
import psycopg2
import numpy as np
import pandas as pd
import geopandas as gpd
import time
import sqlalchemy
from shapely.geometry import Point, LineString
from psycopg2.extensions import register_adapter, AsIs
# Importer mes fonctions.
import correct_roundabout as roundabout
import same_from_to as sft
import backward
import overlay
import azimut
import edge_intersect_by_match as eibm
import distance_following_network as dfn
import get_distance_to_next_match as get_dtnm
import assign_edge_speed as aes
def wkb_hexer(line):
# Generation des WKB
return line.wkb_hex
def process_time():
# Enregistrer et afficher le temps de traitement
gdf_all_trips['python_computation_time'] = computation_time = (
(time.time()) - start_time)
print('Traitement :', computation_time, 'sec')
def re_assign_edge_id(gdf_edges):
# Reassigner les edge_id apres le traitement
first_edge_id = gdf_edges['edge_id'].iloc[0]
a = 0
for i in gdf_edges.index:
gdf_edges.loc[i, 'edge_id'] = first_edge_id + a
a += 1
return gdf_edges
def addapt_numpy_float64(numpy_float64):
# Transformer les float64 numpy en float classique.
return AsIs(numpy_float64)
def addapt_numpy_int64(numpy_int64):
# Transformer les int64 numpy en int classique.
return AsIs(numpy_int64)
# Variables d'affichage et d'attribution des id.
number_of_trip = total_edge_id = 0
# Liste des trips a traiter.
trips_id = []
# Confidence en dessous de laquelle le trip ne sera pas traite.
confidence_treshold = 0.7
# Marge d'erreur (en millieme) entre la distance du trip et les
# distance_to_next_match calculees.
error_range = 5
# Changement d'azimut (en degree) au dela duquel le match sera
# supprime.
max_bearing = 160
# Table contenant les points a traiter.
points_table = 'citygis.points'
points_name = points_table.replace('citygis.', '')
# Table contenant les interventions
interventions_table = 'citygis.interventions'
# Table allant contenir les segments des traces.
osrm_edges_table = 'citygis.osrm_edges'
osrm_edges_name = osrm_edges_table.replace('citygis.', '')
# Table allant contenir les trips.
osrm_trips_table = 'citygis.osrm_trips'
osrm_trips_name = osrm_trips_table.replace('citygis.', '')
# Tables temporaires.
temp_table_1 = 'citygis.temp_table_1'
temp_name_1 = temp_table_1.replace('citygis.', '')
temp_table_2 = 'citygis.temp_table_2'
temp_name_2 = temp_table_2.replace('citygis.', '')
# Connexion a la DB.
try:
connection = psycopg2.connect(
"dbname='postgres' user='postgres' host='127.0.0.1' port = '5432' password=''")
engine = sqlalchemy.create_engine(
"postgresql://**************")
cursor = connection.cursor()
except BaseException:
print ("Unable to connect to the database!")
# Recuperer la liste de trips a traiter
with open('first24000.txt', 'r') as inputfile:
trips_id = inputfile.readlines()
# Changer le type de str a int
trips_id = list(map(int, trips_id))
# Recuperer la liste de trip deja traitee
try:
sql = """
SELECT DISTINCT trip_id
FROM {}
""".format(temp_table_1)
df_already_done = pd.read_sql_query(sql, connection)
except Exception:
pass
for trip in trips_id:
# Suivre le temps de traitement.
start_time = time.time()
# Liste pour updater la table point_table.
osrm_answer = []
# Listes pour l'update de la table osm_edges.
osm_point, osm_node_id, osm_node_from, osm_node_to, list_edge_id = [
], [], [], [], []
# Compte a afficher
number_of_trip += 1
# Creer gdf_all_trips pour stocker les informations generales
# sur le trip.
gdf_all_trips = gpd.GeoDataFrame(
columns=[
'trip_id',
'geojson',
'confidence',
'osrm_msg',
'geometry',
'total_trip_length',
'total_dtnm_length',
'delta',
'is_equality',
'osrm_computation_time',
'python_computation_time',
'confidence_treshold',
'error_range',
'backward',
'u_turn',
'roundabout',
'illegal_turn'])
# Creer gdf_trips qui contiendra les donnees sur les points et
# les matchs.
gdf_trip = gpd.GeoDataFrame(
columns=[
'point_id',
'time',
'timestamp',
'long_match',
'lat_match',
'geometry',
'bearing',
'bearing_delta',
'point_is_intersected',
'edge_id_intersected',
'order_of_match_on_same_edge',
'edge_id_who_is_intersected',
'second_edge_id_who_is_intersected',
'dist_from_edge_start',
'total_next_edge_length_whitout_match',
'distance_to_next_match',
'speed_to_next_match'])
# Creation du gdf_edges allant contenir les segments OSM.
gdf_edges = gpd.GeoDataFrame(
columns=[
'edge_id',
'trip_id',
'osm_node_from',
'osm_node_to',
'long_from',
'lat_from',
'long_to',
'lat_to',
'geom_from',
'geom_to',
'edgeline',
'edge_length',
'edge_is_intesected',
'nb_point_who_intersect',
'edge_speed',
'u_turn',
'same_from_to'])
# Attribution de projection
gdf_edges.crs = {'init': 'epsg:31370'}
# Modifier le type par defaut.
gdf_all_trips[['trip_id',
'total_trip_length',
'total_dtnm_length',
'confidence_treshold']].astype(int)
gdf_all_trips[['confidence',
'python_computation_time',
'delta']].astype(np.float64)
gdf_all_trips[['is_equality',
'backward',
'roundabout',
'illegal_turn']].astype(bool)
# Afficher dans la console.
print('\n')
print('N de trip:', number_of_trip)
print('trips_id:', trip)
try:
# Si le trip a deja ete traite par le passe:
if trip in df_already_done.values:
# Passer au trip suivant
print('Trip deja traite')
continue
except Exception:
pass
# Recuperer la geometrie du lieu d'intervention sur base de
# intervention_id (pour une facilite de visualisation sur QGIS).
sql = """
SELECT DISTINCT point
FROM {} inter
JOIN {} pt ON inter.id = pt.intervention_id
WHERE pt.trips_id = {}
""".format(interventions_table, points_table, trip)
cursor.execute(sql)
# Injecter le trip_id et la geometrie dans le gdf. Attribuer
# une projection a la geometrie.
gdf_all_trips['trip_id'] = | pd.Series(trip) | pandas.Series |
#Trabalhando com groupby no Pandas
import pandas as pd
import numpy as np
ipl_data = {'Team': ['Riders', 'Riders', 'Devils', 'Devils', 'Kings',
'kings', 'Kings', 'Kings', 'Riders', 'Royals', 'Royals', 'Riders'],
'Rank': [1, 2, 2, 3, 3,4 ,1 ,1,2 , 4,1,2],
'Year': [2014,2015,2014,2015,2014,2015,2016,2017,2016,2014,2015,2017],
'Points':[876,789,863,673,741,812,756,788,694,701,804,690]}
df = | pd.DataFrame(ipl_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
CF-FM call segmentation accuracy
================================
This page will illustrate the accuracy with which `itsfm` can segment CF-FM parts of a
CF-FM call. To see what a CF-FM call looks like check out the bat-call example
in the 'Basic Examples' page.
The synthetic data has already been generated and run with the :code:`segment_and_measure`
function, and now we'll compare the accuracy with which it has all happened.
A CF-FM bat call typically has three parts to it, 1) an 'up' FM, where the
frequency of the call increases, 2) a 'CF' part, where the frequency is
stable, and then 3) a 'down' FM, where the frequency drops. The synthetic
data is basically a set of CF-FM calls with a combination of upFM, downFM
and CF part durations, bandwidths,etc.
Here we will only be seeing if the durations of each of the segment parts have been picked
up properly or not. We will *not* be performing any accuracy assessments on
the exact parameters (eg. peak frequency, rms, etc) because it is assumed that
if the call parts can be identified by their durations then the measurements will
in turn be as expected.
There is no silence in the synthetic calls, and no noise too. This is the
situation which should provide the highest accuracy.
What happened before
~~~~~~~~~~~~~~~~~~~~
To see more on the details of the generation and running of the synthetic data
see the modules `CF/FM call segmentation` and `Generating the CF-FM synthetic calls`
"""
import itsfm
import matplotlib.pyplot as plt
plt.rcParams['agg.path.chunksize'] = 10000
import numpy as np
import pandas as pd
import seaborn as sns
import tqdm
obtained = pd.read_csv('obtained_pwvd_horseshoe_sim.csv')
synthesised = pd.read_csv('horseshoe_test_parameters.csv')
# %%
# Let's look at the obtained regions and their durations
obtained
# %%
# There are some calls with *multiple* segments detected.
# This multiplicity of segments typically results from false positive detections,
# where the CF-FM ratio jumps above 0 spuriously for a few samples. Let's take a look
# at some of these situations.
def identify_valid_segmentations(df):
'''
Identifies if a segmentation output has valid (numeric)
entries for cf1, fm1, fm2, and NaN for all other columns.
Parameters
----------
df : pd.DataFrame
with at least the following column names, 'cf1','fm1','fm2'
Returns
-------
valid_segmentation: bool.
True, if the segmentation is valid.
'''
all_columns = df.columns
target_columns = ['cf1','fm1','fm2']
rest_columns = set(all_columns)-set(target_columns)
rest_columns = rest_columns - set(['call_number'])
valid_cf1fm1fm2 = lambda row, target_columns: np.all([ ~np.isnan(row[each]) for each in target_columns])
all_otherrows_nan = lambda row, rest_columns: np.all([ np.isnan(row[each]) for each in rest_columns])
all_valid_rows = np.zeros(df.shape[0],dtype=bool)
for i, row in df.iterrows():
all_valid_rows[i] = np.all([valid_cf1fm1fm2(row, target_columns),
all_otherrows_nan(row, rest_columns)])
return all_valid_rows
valid_pwvd_segmentations = identify_valid_segmentations(obtained)
print(f'{sum(valid_pwvd_segmentations)/valid_pwvd_segmentations.size} of all calls could be segmented correctly')
# %%
# We can see the output has each CF/FM region labelled by the order in which
# they're found. Let's re-label these to match the names of the synthesised
# call parameter dataframe. 'upfm' is fm1, 'downfm' is fm2.
valid_obtained = obtained[valid_pwvd_segmentations]
valid_obtained.columns = ['call_number','cf_duration',
'upfm_duration', 'downfm_duration', 'other']
# %%
# Let's look at the synthetic call parameters. There's a bunch of parameters
# that're not interesting for this accuracy exercise and so let's remove them
synthesised
synthesised.columns
synth_regions = synthesised.loc[valid_pwvd_segmentations,['cf_duration', 'upfm_duration','downfm_duration']]
synth_regions['other'] = np.nan
synth_regions['call_number'] = valid_obtained['call_number']
# %%
# Comparing the synthetic and the obtained results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# We have the two datasets formatted properly, now let's compare the
# accuracy of `itsfm`.
pwvd_accuracy = valid_obtained/synth_regions
pwvd_accuracy['call_number'] = valid_obtained['call_number']
# %%
# Overall accuracy of segmentation:
pwvd_accuracy_reformat = pwvd_accuracy.melt(id_vars=['call_number'],
var_name='Region type',
value_name='Accuracy')
pwvd_accuracy_reformat = pwvd_accuracy_reformat[pwvd_accuracy_reformat['Region type']!='other']
plt.figure()
ax = sns.boxplot(x='Region type', y = 'Accuracy',
data=pwvd_accuracy_reformat)
ax = sns.swarmplot(x='Region type', y = 'Accuracy',
data=pwvd_accuracy_reformat,
alpha=0.5)
# %%
# Peak-percentage method accuracy
# -------------------------------
# Now let's take a look at the peak percentage method's accuracy
obtained_pkpct = pd.read_csv('obtained_pkpct_horseshoe_sim.csv')
obtained_pkpct.head()
calls_w_3segs = identify_valid_segmentations(obtained_pkpct)
print(f'{np.sum(calls_w_3segs)/calls_w_3segs.size} % of calls have 3 segments')
# %%
# 6% of calls don't have 3 components - let's remove these poorly segmented calls and
# quantify their segmentation accuracy.
pkpct_well_segmented = obtained_pkpct.loc[calls_w_3segs,:]
pkpct_well_segmented = pkpct_well_segmented.drop(['cf2','fm3','fm4'],axis=1)
pkpct_well_segmented.columns = ['call_number','cf_duration',
'upfm_duration', 'downfm_duration', 'other']
synth_regions_pkpct = synthesised.loc[calls_w_3segs,['cf_duration', 'upfm_duration','downfm_duration']]
synth_regions_pkpct['other'] = np.nan
synth_regions_pkpct['call_number'] = pkpct_well_segmented['call_number']
pkpct_accuracy = pkpct_well_segmented/synth_regions_pkpct
pkpct_accuracy['call_number'] = pkpct_well_segmented['call_number']
# Overall accuracy of segmentation:
pkpct_accuracy_reformat = pkpct_accuracy.melt(id_vars=['call_number'],
var_name='Region type',
value_name='Accuracy')
pkpct_accuracy_reformat = pkpct_accuracy_reformat[pkpct_accuracy_reformat['Region type']!='other']
plt.figure()
ax = sns.violinplot(x='Region type', y = 'Accuracy',
data=pkpct_accuracy_reformat)
ax = sns.swarmplot(x='Region type', y = 'Accuracy',
data=pkpct_accuracy_reformat,
alpha=0.5)
# %%
# Putting it all together: PWVD vs peak percentage
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pwvd_accuracy = pwvd_accuracy_reformat.copy()
pwvd_accuracy['method'] = 'pwvd'
pkpct_accuracy = pkpct_accuracy_reformat.copy()
pkpct_accuracy['method'] = 'pkpct'
both_accuracy = pd.concat([pwvd_accuracy, pkpct_accuracy])
both_accuracy['combined_id'] = both_accuracy['Region type']+both_accuracy['method']
grouped_accuracy = both_accuracy.groupby(['Region type','method'])
plt.figure(figsize=(8,6))
ax = sns.swarmplot(x='Region type', y = 'Accuracy',
data=both_accuracy, hue='method',hue_order=["pwvd", "pkpct"],
dodge=True,alpha=0.5, s=3)
ax2 = sns.violinplot(x='Region type', y = 'Accuracy',
data=both_accuracy, hue='method',hue_order=["pwvd", "pkpct"],
dodge=True,alpha=0.5, s=2.5)
ax2.legend_.remove()
handles, labels = ax2.get_legend_handles_labels() # thanks Ffisegydd@ https://stackoverflow.com/a/35539098
l = plt.legend(handles[0:2], ['PWVD','Peak percentage'], loc=2, fontsize=11,
borderaxespad=0., frameon=False)
plt.xticks([0,1,2],['CF','iFM','tFM'], fontsize=11)
plt.xlabel('Call component',fontsize=12);plt.ylabel('Accuracy of segmentation, $\\frac{obtained}{actual}$',fontsize=12);
plt.yticks(fontsize=11)
plt.ylim(0,1.5)
plt.tight_layout()
plt.savefig('pwvd-pkpct-comparison.png')
# %%
# What are the 95%ile limits of the accuracy?
accuracy_ranges = grouped_accuracy.apply(lambda X: np.nanpercentile(X['Accuracy'],[2.5,97.5]))
accuracy_ranges
# %%
# Troubleshooting the 'bad' fixes - what went wrong?
# ---------------------------------------------------
# Some bad PWVD identifications
# ~~~~~~~~~~~~~~~~~~~~~~~~
# As we can see there are a few regions where the accuracy is very low, let's
# investigate which of these calls are doing badly.
poor_msmts = accuracy[accuracy['cf_duration']<0.5].index
# %%
# Now, let's troubleshooot this particular set of poor measurements fully.
simcall_params = pd.read_csv('horseshoe_test_parameters.csv')
obtained_params = pd.read_csv('obtained_pwvd_horseshoe_sim.csv')
obtained_params.loc[poor_msmts,:]
# %%
# There are two CF regions being recognised, one of them is just extremely short.
# Where is this coming from? Let's take a look at the actual frequency tracking output,
# by re-running the ```itsfm``` routine once more:
import h5py
f = h5py.File('horseshoe_test.hdf5', 'r')
fs = float(f['fs'][:])
parameters = {}
parameters['segment_method'] = 'pwvd'
parameters['window_size'] = int(fs*0.001)
parameters['fmrate_threshold'] = 2.0
parameters['max_acc'] = 10
parameters['extrap_window'] = 75*10**-6
raw_audio = {}
for call_num in tqdm.tqdm(poor_msmts.to_list()):
synthetic_call = f[str(call_num)][:]
raw_audio[str(call_num)] = synthetic_call
output = itsfm.segment_and_measure_call(synthetic_call, fs, **parameters)
seg_output, call_parts, measurements= output
# # save the long format output into a wide format output to
# # allow comparison
# sub = measurements[['region_id', 'duration']]
# sub['call_number'] = call_num
# region_durations = sub.pivot(index='call_number',
# columns='region_id', values='duration')
# obtained.append(region_durations)
call_num = str(poor_msmts[0])
plt.figure()
plt.subplot(211)
plt.specgram(raw_audio[call_num], Fs=fs)
plt.plot(np.linspace(0,raw_audio[call_num].size/fs,raw_audio[call_num].size),
seg_output[2]['raw_fp'])
plt.plot(np.linspace(0,raw_audio[call_num].size/fs,raw_audio[call_num].size),
seg_output[2]['fitted_fp'])
plt.plot(np.linspace(0,raw_audio[call_num].size/fs,raw_audio[call_num].size),
seg_output[0]*4000,'w')
plt.plot(np.linspace(0,raw_audio[call_num].size/fs,raw_audio[call_num].size),
seg_output[1]*4000,'k')
plt.subplot(212)
plt.plot(raw_audio[call_num])
plt.figure()
plt.subplot(311)
plt.plot(np.linspace(0,raw_audio[call_num].size/fs,raw_audio[call_num].size),
seg_output[2]['raw_fp'])
plt.subplot(312)
plt.plot(np.linspace(0,raw_audio[call_num].size/fs,raw_audio[call_num].size),
seg_output[2]['fmrate'])
#plt.plot(np.linspace(0,raw_audio[call_num].size/fs,raw_audio[call_num].size),
# seg_output[0]*5,'k',label='CF')
#plt.plot(np.linspace(0,raw_audio[call_num].size/fs,raw_audio[call_num].size),
# seg_output[1]*5,'r', label='FM')
plt.hlines(2, 0, raw_audio[call_num].size/fs, linestyle='dotted', alpha=0.5,
label='2kHz/ms fm rate')
plt.legend()
plt.subplot(313)
plt.plot(raw_audio[call_num])
# %%
# Making some corrections to the PWVD output
# ------------------------------------------
# Here, we can see that the 'error' is that the FM rate is very slightly below the
# 2 kHz/ms FM rate, and thus appears as a false CF region. This slight drop in
# FM rate is also because of edge effects. The frequency profile correction methods
# in place were able to recognise the odd spike in frequency profile and interpolate
# between two regions with reliable frequency profiles. This interpolation thus lead
# to a slight drop in the FM rate.
# %%
# Considering that the CF measurement is actually there, but labelled as CF2,
# let's correct this labelling error and then see the final accuracy. We will not
# attempt to compensate for this error by adjusting the iFM duration here.
corrected_obtained = obtained_params.copy()
for each in poor_msmts:
corrected_obtained.loc[each,'cf1'] = corrected_obtained.loc[each,'cf2']
corrected_obtained.loc[each,'other'] = np.nan
corrected_obtained = corrected_obtained.loc[:,corrected_obtained.columns!='cf2']
corrected_obtained.columns = ['call_number','cf_duration',
'upfm_duration', 'downfm_duration', 'other']
corrected_accuracy = corrected_obtained/synth_regions
corrected_accuracy['call_number'] = corrected_obtained['call_number']
corrected_accuracy_reformat = corrected_accuracy.melt(id_vars=['call_number'],
var_name='Region type',
value_name='Accuracy')
corrected_accuracy_reformat = corrected_accuracy_reformat.loc[corrected_accuracy_reformat['Region type']!='other',:]
plt.figure()
ax = sns.boxplot(x='Region type', y = 'Accuracy',
data=corrected_accuracy_reformat)
ax = sns.swarmplot(x='Region type', y = 'Accuracy',
data=corrected_accuracy_reformat,
alpha=0.5)
# %%
# Figuring out what happened with the peak-percentage segmentation
# ----------------------------------------------------------------
#
calls_w_3segs = identify_valid_segmentations(obtained_pkpct)
poor_pkpct = obtained_pkpct[~calls_w_3segs]
synthesised['upfm_bandwidth'] = synthesised['cf_peak_frequency']-synthesised['upfm_terminal_frequency']
synthesised['downfm_bandwidth'] = synthesised['cf_peak_frequency']-synthesised['downfm_terminal_frequency']
for each in ['upfm','downfm']:
values, counts = np.unique(synthesised[~calls_w_3segs][each+'_bandwidth'], return_counts=True)
print(values, counts)
# %%
# It seems like bandwidth has a role, and somehow only the iFM components are more affected than the tFM components - why is this?
pkpctg_parameters = {}
pkpctg_parameters['segment_method'] = 'peak_percentage'
pkpctg_parameters['peak_percentage'] = 0.99
pkpctg_parameters['window_size'] = 125
pkpctg_parameters['double_pass'] = True
for call_num in tqdm.tqdm(poor_pkpct['call_number'].tolist()[-2:]):
synthetic_call = f[str(call_num)][:]
raw_audio[str(call_num)] = synthetic_call
output = itsfm.segment_and_measure_call(synthetic_call, fs, **pkpctg_parameters)
seg_output, call_parts, measurements= output
out_vis = itsfm.itsFMInspector(output, synthetic_call, fs)
out_vis.visualise_cffm_segmentation()
out_vis.visualise_pkpctage_profiles()
# %%
# It'd be good to compare the accuracy with a bunch of other potentially relevant parameters.
# %%
# Which call parameters correspond to lower accuracy in general?
# Let's compare the poor segmentation calls (arbit defined as 0.8 relative accuracy)
# to those that are more accurate
pkpct_low_accuracy = pkpct_accuracy_reformat[pkpct_accuracy_reformat['Accuracy']<0.8]
pkpct_lowacc_calls = np.unique(pkpct_low_accuracy['call_number'])
lowacc_callprops = synthesised[synthesised['Unnamed: 0'].isin(pkpct_lowacc_calls)]
lowacc_callprops['highacc'] = False
highacc_callprops = synthesised[~synthesised['Unnamed: 0'].isin(pkpct_lowacc_calls)]
highacc_callprops['highacc'] = True
highlow_callprops = | pd.concat([lowacc_callprops, highacc_callprops]) | pandas.concat |
import os
import sys
DIR_TASK = os.path.basename(os.getcwd())
DIR_LIB = os.path.abspath(os.path.join(os.path.dirname(__file__),"../"))
DIR_TASK = os.path.dirname(os.path.abspath(__file__))
import json, csv, time, string, itertools, copy, yaml
import pyodbc
import numpy as np
import pandas as pd
import datetime as dt
config = yaml.load( stream = file( DIR_TASK + '\\config.yml', 'r'))
#yaml.dump( config, file( DIR_TASK + '\\config.yml', 'w') )
sys.path.append( DIR_LIB )
from lib.router import Router
router = Router( )
# --------------------------------------------------------------------------
#STEP: get LCG values from DB
cnxn = pyodbc.connect(r'Driver={SQL Server};Server=.\SQLEXPRESS;Database=qlik_medical;Trusted_Connection=yes;')
cursor = cnxn.cursor()
cursor.execute("SELECT * FROM A_POSTCODE")
queryResult = cursor.fetchall()
#com: result as dicctionary
# queryDict = {}
# for item in queryResult:
# queryDict[ item[1] ] = item[0]
#com: result as datafram
queryData = [ list(x) for x in queryResult]
queryDescriptor = cursor.description
columNames = list( map(lambda x: x[0], queryDescriptor) )
queryDf = pd.DataFrame( queryData, columns= columNames )
#STEP: modify version?
configVersion = config['version']
config['version'] = round( float(configVersion) + .1, 1 ) if config['options']['increment-version'] == True else configVersion
#STEP: Collect all source-files as a single dataframe
def sourceToDf( _year, _month ):
#params: files_2018, april
inputFilePath = router.getRoute( config['source']['route'] ) \
+ config['source']['dir'] \
+ config['source'][_year][_month]
return pd.read_csv( filepath_or_buffer=inputFilePath, sep=',', quoting=0 ) #,encoding="utf-8-sig"
dfs2018 = []
yearFileKey = 'files_2018'
for iFileKey in config['source'][ yearFileKey ]:
dfBuffer = sourceToDf( yearFileKey, iFileKey )
dfBuffer['month'] = iFileKey
dfs2018.append( dfBuffer )
dfBuf = pd.concat( dfs2018 )
#com: Clean columns
dfBuf['Registered_Patients'] = dfBuf['Registered_Patients'].fillna( dfBuf['Registered Patients'] )
dfBuf['PracticeName'] = dfBuf['PracticeName'].fillna( dfBuf['Practice Name'] )
dfBuf['PracticeName'] = dfBuf['PracticeName'].str.strip()
dfBuf = dfBuf.dropna(subset=['PracticeName'])
dfBuf['PracNo'] = dfBuf['PracNo'].fillna( dfBuf['Practice No'] )
dfBuf["PracNo"] = pd.to_numeric( dfBuf["PracNo"], downcast='integer')
dfBuf = dfBuf.drop(['Practice No','Registered Patients', 'Practice Name', 'Unnamed: 8', 'Unnamed: 9'], axis=1)
dfBuf["Registered_Patients"] = pd.to_numeric( dfBuf["Registered_Patients"], downcast='integer')
dfBuf['year'] = 2018
dfsOld = []
yearFileKey = 'files_old'
for iFileKey in config['source'][ yearFileKey ]:
dfBuffer = sourceToDf( yearFileKey, iFileKey )
dfBuffer['month'] = iFileKey
dfsOld.append( dfBuffer )
def sanitize(data):
return str(data).replace(',','')
panBufO = pd.concat( dfsOld )
#com: Clean columns
panBufO = panBufO.rename(columns={ 'Registered Patients':'Registered_Patients' })
panBufO = panBufO.dropna(subset=['PracticeName'])
panBufO["PracNo"] = pd.to_numeric( panBufO["PracNo"], downcast='integer')
panBufO = panBufO.dropna(subset=['Registered_Patients'])
panBufO["Registered_Patients"] = panBufO["Registered_Patients"].apply(sanitize)
panBufO["Registered_Patients"] = | pd.to_numeric(panBufO['Registered_Patients'], downcast='integer') | pandas.to_numeric |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import sys
import numpy as np
import pandas.compat as compat
from pandas.compat import lrange, range, u
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, Series, date_range, option_context,
period_range, timedelta_range)
from pandas.core.base import StringMixin
from pandas.core.index import MultiIndex
import pandas.util.testing as tm
from .common import TestData
class TestSeriesRepr(TestData):
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second", "foo one 0",
" two 1", " three 2",
"bar one 3", " two 4",
"baz two 5", " three 6",
"qux one 7", " two 8",
" three 9", "Name: sth, dtype: int64"]
expected = "\n".join(expected)
assert repr(s) == expected
def test_name_printing(self):
# Test small Series.
s = Series([0, 1, 2])
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
# Test big Series (diff code path).
s = Series(lrange(0, 1000))
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
s = Series(index=date_range('20010101', '20020101'), name='test')
assert "Name: test" in repr(s)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series( | tm.randn(1000) | pandas.util.testing.randn |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
from typing import Dict
import pandas as pd
import spacy
from gensim.models import CoherenceModel, Phrases, nmf
from gensim.models.phrases import Phraser
from gensim.utils import simple_preprocess
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
def compute_coherence_values(
corpus,
id2word,
texts,
limit,
start=2,
step=3,
random_state=42,
chunk_size=500,
):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_dict : Dict of NMF topic models
coherence_values : Coherence values corresponding to the NMF model with
respective number of topics
"""
coherence_values = []
model_dict = {}
for num_topics in range(start, limit + 1, step):
print(f" > Applying NMR for k={num_topics:0d}...", end="")
model = nmf.Nmf(
corpus=corpus,
id2word=id2word,
num_topics=num_topics,
chunksize=chunk_size, # no. of docs to be used per training chunk
passes=10,
kappa=1.0,
minimum_probability=0.01,
w_max_iter=200,
w_stop_condition=0.0001,
h_max_iter=50,
h_stop_condition=0.001,
eval_every=10,
normalize=True,
random_state=random_state,
)
model_dict[num_topics] = model
print("computing coherence score...", end="")
coherence_model = CoherenceModel(
model=model, texts=texts, dictionary=id2word, coherence="c_v"
)
model_coherence_value = coherence_model.get_coherence()
print(f"found coherence={model_coherence_value:.4f}")
coherence_values.append(model_coherence_value)
return model_dict, coherence_values
def plot_coherence_scores(coherence_vals, start, stop, step, fig_size=(8, 4)):
_, ax = plt.subplots(figsize=fig_size)
x = range(start, stop + 1, step)
ax.plot(x, coherence_vals)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
max_coherence = max(coherence_vals)
max_coherence_num_topics = coherence_vals.index(max_coherence)
best_k = start + max_coherence_num_topics
ax.annotate(
f"{best_k:0d} topics",
xy=(best_k, max_coherence),
xytext=(best_k, max_coherence),
textcoords="offset points",
fontsize=16,
# arrowprops=dict(facecolor="black", shrink=0.05),
bbox=dict(boxstyle="round,pad=0.3", fc=(0.8, 0.9, 0.9), ec="b", lw=2),
)
ax.set_title(
"Coherence Score versus no. of topics", loc="left", fontweight="bold"
)
def get_bigrams_trigrams(
data_words, min_count_of_words=5, phrase_score_threshold=100
):
# Build the bigram and trigram models
bigram = Phrases(
data_words,
min_count=min_count_of_words,
threshold=phrase_score_threshold,
) # higher threshold fewer phrases.
trigram = Phrases(bigram[data_words], threshold=phrase_score_threshold)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_model = Phraser(bigram)
trigram_model = Phraser(trigram)
return bigram_model, trigram_model
def format_topics_sentences(model, corpus, df_source, topic_mapper_dict):
# Init output
sent_topics_df = | pd.DataFrame() | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = | DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"]) | pandas.DataFrame |
import pandas as pd
def parse_date(series):
"""
Separates date object into separate days and months columns
and returns dataframe
:param df: pd.DataFrame
:param date_column_name: str
:return: pd.DataFrame
"""
if type(series) != pd.Series:
raise TypeError(series + ' is not of type pd.Series')
date = pd.to_datetime(series)
days = {0: 'Mon', 1: 'Tues', 2: 'Weds', 3: 'Thurs', 4: 'Fri', 5: 'Sat',
6: 'Sun'}
df = | pd.DataFrame() | pandas.DataFrame |
"""
This script was made to anaylse the relation. between and novelty and depts.
Ressonance is from 5-lda scripts (novelty, transience, ressonance).
VERY IMPORTANT: In papers where two or more coautors are from the same
university we counted repeated the line for the calculations purpose.
Example: paper 'x' with coautors from: unb, unb and ufpa.
--> we counted unb twice!
IMPORTANT: The citation database was partly created out of this python
project.
We googled the paper and save the quantity of citation in a excel file that
is now located on "dados" file.
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
root = os.path.abspath(os.curdir)
local_final = r'dados/data_final'
# =============================================================================
# First treatment of databases
# =============================================================================
kld = pd.read_pickle(f'{root}/{local_final}/data_KLDv4(30_gensim).pkl')
data = pd.read_pickle(f'{root}/{local_final}/data_final_v2.pkl')
data['docs'] = data.index
data['ano'] = data.docs.str.split('_')
data.loc[:, 'ano'] = data.ano.map(lambda x: x[1])
data = data.drop('docs', 1)
xls = pd.ExcelFile(f'{root}/dados/outside/citations.xlsx')
citations_base = | pd.read_excel(xls, 'base', index_col='doc_num') | pandas.read_excel |
import pandas as pd
import numpy as np
from dplypy.dplyframe import DplyFrame
from dplypy.pipeline import drop_na
def test_drop_na():
pandas_df = pd.DataFrame(
{
"col1": ["A", "B", "C"],
"col2": [np.nan, "D", "E"],
"col3": [pd.NaT, "F", pd.NaT],
}
)
df = DplyFrame(pandas_df)
# Default
output1 = df + drop_na()
expected1 = pandas_df.dropna()
pd.testing.assert_frame_equal(output1.pandas_df, expected1)
# axes
output2 = df + drop_na(axis=1)
expected2 = pandas_df.dropna(axis=1)
pd.testing.assert_frame_equal(output2.pandas_df, expected2)
output3 = df + drop_na(axis="index")
expected3 = pandas_df.dropna(axis="index")
pd.testing.assert_frame_equal(output3.pandas_df, expected3)
output4 = df + drop_na(axis="columns")
expected4 = pandas_df.dropna(axis="columns")
pd.testing.assert_frame_equal(output4.pandas_df, expected4)
# how
output5 = df + drop_na(how="all")
expected5 = pandas_df.dropna(how="all")
| pd.testing.assert_frame_equal(output5.pandas_df, expected5) | pandas.testing.assert_frame_equal |
import pandas as pd
from flask import Blueprint, request, jsonify
from __init__ import db, cache
from ml.recommendation import train_rating_model_with_svd, get_n_popular_movies, \
get_n_rating_movies, predict_rating_with_svd, get_n_recommended_movies_for_user, predict_rating_with_nn, \
get_n_trending_movies, get_n_similar_movies, calc_tfidf_matrix
recommender = Blueprint('recommender', __name__)
DEFAULT_N_TOP = 10
@recommender.route('/trend/now', methods=['GET'])
@cache.cached(timeout=300, query_string=True)
def get_top_trending_movies():
query = {}
genres = request.args.get('genres')
top = int(request.args.get('top', DEFAULT_N_TOP))
if genres is not None:
query = {'genres': {'$regex': '{}'.format(genres), '$options': 'i'}}
movies = list(db.tmdb_movies.find(query, {'_id': False}))
data = pd.DataFrame(movies)
if data.size == 0:
return jsonify([])
return jsonify(get_n_trending_movies(data, top).to_dict(orient='records'))
@recommender.route('/trend/popular', methods=['GET'])
@cache.cached(timeout=300, query_string=True)
def get_top_popular_movies():
query = {}
genres = request.args.get('genres')
top = int(request.args.get('top', DEFAULT_N_TOP))
if genres is not None:
query = {'genres': {'$regex': '{}'.format(genres), '$options': 'i'}}
movies = list(db.tmdb_movies.find(query, {'_id': False}))
data = pd.DataFrame(movies)
if data.size == 0:
return jsonify([])
return jsonify(get_n_popular_movies(data, top).to_dict(orient='records'))
@recommender.route('/trend/rating', methods=['GET'])
@cache.cached(timeout=300, query_string=True)
def get_top_rating_movies():
query = {}
genres = request.args.get('genres')
top = int(request.args.get('top', DEFAULT_N_TOP))
if genres is not None:
query = {'genres': {'$regex': '{}'.format(genres), '$options': 'i'}}
movies = list(db.tmdb_movies.find(query, {'_id': False}))
data = | pd.DataFrame(movies) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
import pandas as pd
from numpy import nan, isnan, arange, dtype, zeros
from pandapower.auxiliary import pandapowerNet, get_free_id, _preserve_dtypes
from pandapower.results import reset_results
from pandapower.std_types import add_basic_std_types, load_std_type
from pandapower import __version__
def create_empty_network(name="", f_hz=50., sn_kva=1e3):
"""
This function initializes the pandapower datastructure.
OPTIONAL:
**f_hz** (float, 50.) - power system frequency in hertz
**name** (string, None) - name for the network
**sn_kva** (float, 1e3) - reference apparent power for per unit system
OUTPUT:
**net** (attrdict) - PANDAPOWER attrdict with empty tables:
EXAMPLE:
net = create_empty_network()
"""
net = pandapowerNet({
# structure data
"bus": [('name', dtype(object)),
('vn_kv', 'f8'),
('type', dtype(object)),
('zone', dtype(object)),
('in_service', 'bool'), ],
"load": [("name", dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("q_kvar", "f8"),
("const_z_percent", "f8"),
("const_i_percent", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"sgen": [("name", dtype(object)),
("bus", "i8"),
("p_kw", "f8"),
("q_kvar", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"gen": [("name", dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("vm_pu", "f8"),
("sn_kva", "f8"),
("min_q_kvar", "f8"),
("max_q_kvar", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"switch": [("bus", "i8"),
("element", "i8"),
("et", dtype(object)),
("type", dtype(object)),
("closed", "bool"),
("name", dtype(object))],
"shunt": [("bus", "u4"),
("name", dtype(object)),
("q_kvar", "f8"),
("p_kw", "f8"),
("vn_kv", "f8"),
("step", "u4"),
("max_step", "u4"),
("in_service", "bool")],
"ext_grid": [("name", dtype(object)),
("bus", "u4"),
("vm_pu", "f8"),
("va_degree", "f8"),
("in_service", 'bool')],
"line": [("name", dtype(object)),
("std_type", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("length_km", "f8"),
("r_ohm_per_km", "f8"),
("x_ohm_per_km", "f8"),
("c_nf_per_km", "f8"),
("max_i_ka", "f8"),
("df", "f8"),
("parallel", "u4"),
("type", dtype(object)),
("in_service", 'bool')],
"trafo": [("name", dtype(object)),
("std_type", dtype(object)),
("hv_bus", "u4"),
("lv_bus", "u4"),
("sn_kva", "f8"),
("vn_hv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_percent", "f8"),
("vscr_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_degree", "f8"),
("tp_side", dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_st_degree", "f8"),
("tp_pos", "i4"),
("parallel", "u4"),
("df", "f8"),
("in_service", 'bool')],
"trafo3w": [("name", dtype(object)),
("std_type", dtype(object)),
("hv_bus", "u4"),
("mv_bus", "u4"),
("lv_bus", "u4"),
("sn_hv_kva", "u8"),
("sn_mv_kva", "u8"),
("sn_lv_kva", "u8"),
("vn_hv_kv", "f8"),
("vn_mv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_hv_percent", "f8"),
("vsc_mv_percent", "f8"),
("vsc_lv_percent", "f8"),
("vscr_hv_percent", "f8"),
("vscr_mv_percent", "f8"),
("vscr_lv_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_mv_degree", "f8"),
("shift_lv_degree", "f8"),
("tp_side", dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_pos", "i4"),
("in_service", 'bool')],
"impedance": [("name", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("rft_pu", "f8"),
("xft_pu", "f8"),
("rtf_pu", "f8"),
("xtf_pu", "f8"),
("sn_kva", "f8"),
("in_service", 'bool')],
"dcline": [("name", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("p_kw", "f8"),
("loss_percent", 'f8'),
("loss_kw", 'f8'),
("vm_from_pu", "f8"),
("vm_to_pu", "f8"),
("max_p_kw", "f8"),
("min_q_from_kvar", "f8"),
("min_q_to_kvar", "f8"),
("max_q_from_kvar", "f8"),
("max_q_to_kvar", "f8"),
("in_service", 'bool')],
"ward": [("name", dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("in_service", "bool")],
"xward": [("name", dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("r_ohm", "f8"),
("x_ohm", "f8"),
("vm_pu", "f8"),
("in_service", "bool")],
"measurement": [("name", dtype(object)),
("type", dtype(object)),
("element_type", dtype(object)),
("value", "f8"),
("std_dev", "f8"),
("bus", "u4"),
("element", dtype(object))],
"piecewise_linear_cost": [("type", dtype(object)),
("element", dtype(object)),
("element_type", dtype(object)),
("p", dtype(object)),
("f", dtype(object))],
"polynomial_cost": [("type", dtype(object)),
("element", dtype(object)),
("element_type", dtype(object)),
("c", dtype(object))],
# geodata
"line_geodata": [("coords", dtype(object))],
"bus_geodata": [("x", "f8"), ("y", "f8")],
# result tables
"_empty_res_bus": [("vm_pu", "f8"),
("va_degree", "f8"),
("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_ext_grid": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_line": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8"),
("i_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo3w": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_mv_kw", "f8"),
("q_mv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_mv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_load": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_sgen": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_gen": [("p_kw", "f8"),
("q_kvar", "f8"),
("va_degree", "f8"),
("vm_pu", "f8")],
"_empty_res_shunt": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_impedance": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8")],
"_empty_res_dcline": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("vm_from_pu", "f8"),
("va_from_degree", "f8"),
("vm_to_pu", "f8"),
("va_to_degree", "f8")],
"_empty_res_ward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_xward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
# internal
"_ppc": None,
"_is_elements": None,
"_pd2ppc_lookups": {"bus": None,
"ext_grid": None,
"gen": None},
"version": float(__version__[:3]),
"converged": False,
"name": name,
"f_hz": f_hz,
"sn_kva": sn_kva
})
for s in net:
if isinstance(net[s], list):
net[s] = pd.DataFrame(zeros(0, dtype=net[s]), index=[])
add_basic_std_types(net)
reset_results(net)
net['user_pf_options'] = dict()
return net
def create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b",
zone=None, in_service=True, max_vm_pu=nan,
min_vm_pu=nan, **kwargs):
"""create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b", \
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan)
Adds one bus in table net["bus"].
Busses are the nodes of the network that all other elements connect to.
INPUT:
**net** (pandapowerNet) - The pandapower network in which the element is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force a specified ID if it is available. If None, the \
index one higher than the highest already existing index is selected.
**vn_kv** (float) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default "b") - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
**max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF
**min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF
OUTPUT:
**index** (int) - The unique ID of the created element
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index and index in net["bus"].index:
raise UserWarning("A bus with index %s already exists" % index)
if index is None:
index = get_free_id(net["bus"])
# store dtypes
dtypes = net.bus.dtypes
net.bus.loc[index, ["name", "vn_kv", "type", "zone", "in_service"]] = \
[name, vn_kv, type, zone, bool(in_service)]
# and preserve dtypes
_preserve_dtypes(net.bus, dtypes)
if geodata is not None:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[index, ["x", "y"]] = geodata
if not isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_buses(net, nr_buses, vn_kv, index=None, name=None, type="b", geodata=None,
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan):
"""create_buses(net, nr_buses, vn_kv, index=None, name=None, type="b", geodata=None, \
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan)
Adds several buses in table net["bus"] at once.
Busses are the nodal points of the network that all other elements connect to.
Input:
**net** (pandapowerNet) - The pandapower network in which the element is created
**nr_buses** (int) - The number of buses that is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force specified IDs if available. If None, the indeces \
higher than the highest already existing index are selected.
**vn_kv** (float) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default "b") - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
**max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF
**min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF
OUTPUT:
**index** (int) - The unique indices ID of the created elements
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index:
for idx in index:
if idx in net.bus.index:
raise UserWarning("A bus with index %s already exists" % index)
else:
bid = get_free_id(net["bus"])
index = arange(bid, bid + nr_buses, 1)
# TODO: not needed when concating anyways?
# store dtypes
# dtypes = net.bus.dtypes
dd = pd.DataFrame(index=index, columns=net.bus.columns)
dd["vn_kv"] = vn_kv
dd["type"] = type
dd["zone"] = zone
dd["in_service"] = in_service
dd["name"] = name
net["bus"] = pd.concat([net["bus"], dd], axis=0).reindex_axis(net["bus"].columns, axis=1)
# and preserve dtypes
# _preserve_dtypes(net.bus, dtypes)
if geodata:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[bid, ["x", "y"]] = geodata
if not isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_load(net, bus, p_kw, q_kvar=0, const_z_percent=0, const_i_percent=0, sn_kva=nan,
name=None, scaling=1., index=None,
in_service=True, type=None, max_p_kw=nan, min_p_kw=nan,
max_q_kvar=nan, min_q_kvar=nan, controllable=nan):
"""create_load(net, bus, p_kw, q_kvar=0, const_z_percent=0, const_i_percent=0, sn_kva=nan, \
name=None, scaling=1., index=None, \
in_service=True, type=None, max_p_kw=nan, min_p_kw=nan, max_q_kvar=nan, \
min_q_kvar=nan, controllable=nan)
Adds one load in table net["load"].
All loads are modelled in the consumer system, meaning load is positive and generation is
negative active power. Please pay attention to the correct signing of the reactive power as
well.
INPUT:
**net** - The net within this load should be created
**bus** (int) - The bus id to which the load is connected
OPTIONAL:
**p_kw** (float, default 0) - The real power of the load
- postive value -> load
- negative value -> generation
**q_kvar** (float, default 0) - The reactive power of the load
**const_z_percent** (float, default 0) - percentage of p_kw and q_kvar that will be \
associated to constant impedance load at rated voltage
**const_i_percent** (float, default 0) - percentage of p_kw and q_kvar that will be \
associated to constant current load at rated voltage
**sn_kva** (float, default None) - Nominal power of the load
**name** (string, default None) - The name for this load
**scaling** (float, default 1.) - An OPTIONAL scaling factor to be set customly
**type** (string, None) - type variable to classify the load
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**in_service** (boolean) - True for in_service or False for out of service
**max_p_kw** (float, default NaN) - Maximum active power load - necessary for controllable \
loads in for OPF
**min_p_kw** (float, default NaN) - Minimum active power load - necessary for controllable \
loads in for OPF
**max_q_kvar** (float, default NaN) - Maximum reactive power load - necessary for \
controllable loads in for OPF
**min_q_kvar** (float, default NaN) - Minimum reactive power load - necessary for \
controllable loads in OPF
**controllable** (boolean, default NaN) - States, whether a load is controllable or not. \
Only respected for OPF
OUTPUT:
**index** (int) - The unique ID of the created element
EXAMPLE:
create_load(net, bus=0, p_kw=10., q_kvar=2.)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if index is None:
index = get_free_id(net["load"])
if index in net["load"].index:
raise UserWarning("A load with the id %s already exists" % id)
# store dtypes
dtypes = net.load.dtypes
net.load.loc[index, ["name", "bus", "p_kw", "const_z_percent", "const_i_percent", "scaling",
"q_kvar", "sn_kva", "in_service", "type"]] = \
[name, bus, p_kw, const_z_percent, const_i_percent, scaling, q_kvar, sn_kva,
bool(in_service), type]
# and preserve dtypes
_preserve_dtypes(net.load, dtypes)
if not isnan(min_p_kw):
if "min_p_kw" not in net.load.columns:
net.load.loc[:, "min_p_kw"] = pd.Series()
net.load.loc[index, "min_p_kw"] = float(min_p_kw)
if not isnan(max_p_kw):
if "max_p_kw" not in net.load.columns:
net.load.loc[:, "max_p_kw"] = pd.Series()
net.load.loc[index, "max_p_kw"] = float(max_p_kw)
if not isnan(min_q_kvar):
if "min_q_kvar" not in net.load.columns:
net.load.loc[:, "min_q_kvar"] = pd.Series()
net.load.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not isnan(max_q_kvar):
if "max_q_kvar" not in net.load.columns:
net.load.loc[:, "max_q_kvar"] = pd.Series()
net.load.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not isnan(controllable):
if "controllable" not in net.load.columns:
net.load.loc[:, "controllable"] = pd.Series()
net.load.loc[index, "controllable"] = bool(controllable)
else:
if "controllable" in net.load.columns:
net.load.loc[index, "controllable"] = False
return index
def create_load_from_cosphi(net, bus, sn_kva, cos_phi, mode, **kwargs):
"""
Creates a load element from rated power and power factor cos(phi).
INPUT:
**net** - The net within this static generator should be created
**bus** (int) - The bus id to which the load is connected
**sn_kva** (float) - rated power of the load
**cos_phi** (float) - power factor cos_phi
**mode** (str) - "ind" for inductive or "cap" for capacitive behaviour
**kwargs are passed on to the create_load function
OUTPUT:
**index** (int) - The unique ID of the created load
All elements are modeled from a consumer point of view. Active power will therefore always be
positive, reactive power will be negative for inductive behaviour and positive for capacitive
behaviour.
"""
from pandapower.toolbox import pq_from_cosphi
p_kw, q_kvar = pq_from_cosphi(sn_kva, cos_phi, qmode=mode, pmode="load")
return create_load(net, bus, sn_kva=sn_kva, p_kw=p_kw, q_kvar=q_kvar, **kwargs)
def create_sgen(net, bus, p_kw, q_kvar=0, sn_kva=nan, name=None, index=None,
scaling=1., type=None, in_service=True, max_p_kw=nan, min_p_kw=nan,
max_q_kvar=nan, min_q_kvar=nan, controllable=nan, k=nan, rx=nan):
"""create_sgen(net, bus, p_kw, q_kvar=0, sn_kva=nan, name=None, index=None, \
scaling=1., type=None, in_service=True, max_p_kw=nan, min_p_kw=nan, \
max_q_kvar=nan, min_q_kvar=nan, controllable=nan, k=nan, rx=nan)
Adds one static generator in table net["sgen"].
Static generators are modelled as negative PQ loads. This element is used to model generators
with a constant active and reactive power feed-in. If you want to model a voltage controlled
generator, use the generator element instead.
All elements in the grid are modelled in the consumer system, including generators!
If you want to model the generation of power, you have to assign a negative active power
to the generator. Please pay attention to the correct signing of the
reactive power as well.
INPUT:
**net** - The net within this static generator should be created
**bus** (int) - The bus id to which the static generator is connected
**p_kw** (float) - The real power of the static generator (negative for generation!)
OPTIONAL:
**q_kvar** (float, default 0) - The reactive power of the sgen
**sn_kva** (float, default None) - Nominal power of the sgen
**name** (string, default None) - The name for this sgen
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**scaling** (float, 1.) - An OPTIONAL scaling factor to be set customly
**type** (string, None) - type variable to classify the static generator
**in_service** (boolean) - True for in_service or False for out of service
**max_p_kw** (float, NaN) - Maximum active power injection - necessary for \
controllable sgens in OPF
**min_p_kw** (float, NaN) - Minimum active power injection - necessary for \
controllable sgens in OPF
**max_q_kvar** (float, NaN) - Maximum reactive power injection - necessary for \
controllable sgens in OPF
**min_q_kvar** (float, NaN) - Minimum reactive power injection - necessary for \
controllable sgens in OPF
**controllable** (bool, NaN) - Whether this generator is controllable by the optimal
powerflow
**k** (float, NaN) - Ratio of nominal current to short circuit current
**rx** (float, NaN) - R/X ratio for short circuit impedance. Only relevant if type is specified as motor so that sgen is treated as asynchronous motor
OUTPUT:
**index** (int) - The unique ID of the created sgen
EXAMPLE:
create_sgen(net, 1, p_kw = -120)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if index is None:
index = get_free_id(net["sgen"])
if index in net["sgen"].index:
raise UserWarning("A static generator with the id %s already exists" % index)
# store dtypes
dtypes = net.sgen.dtypes
net.sgen.loc[index, ["name", "bus", "p_kw", "scaling",
"q_kvar", "sn_kva", "in_service", "type"]] = \
[name, bus, p_kw, scaling, q_kvar, sn_kva, bool(in_service), type]
# and preserve dtypes
_preserve_dtypes(net.sgen, dtypes)
if not isnan(min_p_kw):
if "min_p_kw" not in net.sgen.columns:
net.sgen.loc[:, "min_p_kw"] = pd.Series()
net.sgen.loc[index, "min_p_kw"] = float(min_p_kw)
if not isnan(max_p_kw):
if "max_p_kw" not in net.sgen.columns:
net.sgen.loc[:, "max_p_kw"] = pd.Series()
net.sgen.loc[index, "max_p_kw"] = float(max_p_kw)
if not isnan(min_q_kvar):
if "min_q_kvar" not in net.sgen.columns:
net.sgen.loc[:, "min_q_kvar"] = pd.Series()
net.sgen.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not isnan(max_q_kvar):
if "max_q_kvar" not in net.sgen.columns:
net.sgen.loc[:, "max_q_kvar"] = | pd.Series() | pandas.Series |
'''Defines a pipeline step which merges training data.
'''
import os
from pandas.errors import EmptyDataError
import pandas as pd
import progressbar as pb
from src.step import Step
class Merge(Step):
'''Defines a pipeline step which merges training data.
'''
def __init__(self):
'''Initializes a new instance of the Merge object.
'''
super(Merge, self).__init__()
self.input = {
'app': 'data/raw/anything',
'game': 'data/raw/games',
'movie': 'data/raw/movie',
'music': 'data/raw/music',
'tv': 'data/raw/tv',
}
self.output = {
'path': 'data/interim/combined.csv',
}
def run(self):
'''Runs the pipeline step.
'''
consolidated = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------------------------
# Name : Image Patches Generator Script
# Date : April 13, 2021
# Author: <NAME>, PhD
#
# Description: This program is based on Open Slide Patch Manger (OPM) and
# slices an image into patches.
# The generated patches properties are defined in a yamel file.
# In addition, tow csv files are generated, the first listings
# patch files names and XY coordinates. The second parses only
# xy coordinates. The later csv file is meant to be used as an
# input to other OPM executions ensuring exact xy patch
# coordinates are applied to other images.
# Added discrete error checking, stdout and stderr datalogger,
# verbose comments, summary report, and improved variable names
#
# Usage Example:
# python patch_miner.py\
# --Input_Image_Path /media/jlagraz/MyBookWD/InputImages/TCGA-32-2494-01A-01-TS1.24dfccf0-f73c-4926-833c-059d934bc72f_overlay.tiff
# --Configuration_Yaml_File /home/jlagraz/Documents/Normalization/BaSSaN-Update/OpmTiling/ConfigutationFiles/overlap_0.yml
# --Patches_Output_Path /media/jlagraz/MyBookWD/Results
# --Input_XY_Coordinates_Csv_File /media/jlagraz/MyBookWD/Result/XYPatchCoordinates.csv
#
#------------------------------------------------------------------------------------------------
import sys
import yaml
import logging
import argparse
import warnings
import functools
import openslide
import numpy as np
import pandas as pd
#
from pathlib import Path
from PIL import Image
from functools import partial
from datetime import datetime
from opm.patch_manager import PatchManager
from opm.utils import tissue_mask, alpha_channel_check, patch_size_check
#
Image.MAX_IMAGE_PIXELS = None
warnings.simplefilter("ignore")
__author__ = '<NAME>, PhD'
__status__ = 'Prototype'
__email__ = '<EMAIL>'
__credits__ = ['<NAME>','<NAME>','<NAME>','<NAME>, PhD']
__license__ = "GPL"
__version__ = "1.0"
# Global variables
X_DIMENSION = 0
Y_DIMENSION = 1
#-----------------------------------------------------------------
# Name: Defining logger
# Author: <NAME>, PhD
# Date: 06/12/2020
# Description: Logger definitions including decorator
# https://dev.to/mandrewcito/a-tiny-python-log-decorator-1o5m
#-----------------------------------------------------------------
FORMATTER = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
LEVEL_NAME = logging.getLevelName('DEBUG')
DESCRITPTION_MESSAGE = \
'This program is based on Open Slide Patch Manger (OPM) and ' + '\n'\
'slices an image into patches. ' + '\n'\
'The generated patches properties are defined in a yamel file.' + '\n'\
'In addition, tow csv files are generated, the first listings ' + '\n'\
'patch files names and XY coordinates. The second parses only ' + '\n'\
'xy coordinates. The later csv file is meant to be used as an ' + '\n'\
'input to other OPM executions ensuring exact xy patch ' + '\n'\
'coordinates are applied to other images. ' + '\n'\
'Added discrete error checking, stdout and stderr datalogger, ' + '\n'\
'verbose comments, summary report, and improved variable names'
#-----------------------------------------------------------------
# Name: Logger Definitions
# Author: <NAME>, PhD
# Date: 06/12/2020
# Description:
# Input:
# Output:
#-----------------------------------------------------------------
def GetConsoleHandler(TargetOutput):
# Create console handler and set level to debug
ConsoleHandler = logging.StreamHandler(TargetOutput)
# add formatter to Console Logger
ConsoleHandler.setFormatter(FORMATTER)
return ConsoleHandler
#-----------------------------------------------------------------
def GetLogger(LoggerName,TargetOutput):
logger = logging.getLogger(LoggerName)
logger.setLevel(LEVEL_NAME) # better to have too much log than not enough
LogHandler = GetConsoleHandler(TargetOutput)
logger.addHandler(LogHandler)
# with this pattern, it's rarely necessary to propagate the error up to parent
logger.propagate = False
return logger,LogHandler
#-----------------------------------------------------------------
class LogDecorator(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
def __call__(self, fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
try:
self.logger.info("{0} - {1} - {2}".format(fn.__name__, args, kwargs))
result = fn(*args, **kwargs)
self.logger.info(result)
return result
except Exception as ex:
self.logger.debug("Exception!!!! {0}".format(ex), exc_info=True)
raise ex
return result
return decorated
#------------------------------------------------------------------------------------------------
# Function Name: Get Arguments
# Author: <NAME>, PhD
# Date: 03/12/2020
# Description: Define input arguments using flags
# Input: input image path, Configuration Yaml File, Patches Output Path,Input XY Coordinates CSV File
# Output: Argument list
#------------------------------------------------------------------------------------------------
def GetArguments():
parser = argparse.ArgumentParser(description=DESCRITPTION_MESSAGE)
parser.add_argument('-i', '--Input_Image_Path', required=True, help='Image to path to slice')
parser.add_argument('-c', '--Configuration_Yaml_File', required=True, help='config.yml for running OPM')
parser.add_argument('-o', '--Patches_Output_Path', required=False, help='output path for the patches')
parser.add_argument('-icsv', '--Input_XY_Coordinates_Csv_File', required=False, help='CSV with x,y coordinates of patches to mine')
args = parser.parse_args()
return args
#------------------------------------------------------------------------------------------------
# Function Name: get folder size
# Author: <NAME>, PhD
# Date: 03/12/2020
# Description: Calculates directory size in GB
# Input: Directory
# Output: Directory size and number of patches in the directory
# https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python
#------------------------------------------------------------------------------------------------
def get_folder_size(folder):
Gigabytes = 1024**3
TotalSum = sum(file.stat().st_size for file in Path(folder).rglob('*'))
SumGB = TotalSum / Gigabytes
FolderSize = '{0:.3f}GB'.format(SumGB)
NumberOfPatches = len(list(Path(folder).rglob('*')))
return FolderSize,NumberOfPatches
#------------------------------------------------------------------------------------------------
# Function Name: Terminate
# Author: <NAME>, PhD
# Date: 04/14/2020
# Description: Summarizes run
# Input: Yaml configuration file details and OPM manage object
# Output: None
#------------------------------------------------------------------------------------------------
def Terminate(ConfigurationFile,manager):
global InputArguments
RootDirectory = str(Path(InputArguments.Patches_Output_Path) /manager.slide_folder)
FolderSize,NumberOfPatches = get_folder_size(RootDirectory)
StdOutLogger.info('----------------------------------------------------')
StdOutLogger.info('Summary ')
StdOutLogger.info('----------------------------------------------------')
StdOutLogger.info('Image Name : {} '.format(Path(manager.path).name))
StdOutLogger.info('Patches Total Size : {} '.format(FolderSize))
StdOutLogger.info('Number of Patches : {} '.format(NumberOfPatches))
StdOutLogger.info('Patches Directory : {} '.format(manager.slide_folder))
StdOutLogger.info('Patch Size : {}x{}'.format(ConfigurationFile['patch_size'][X_DIMENSION],ConfigurationFile['patch_size'][Y_DIMENSION]))
StdOutLogger.info('Patch Saved : {} '.format(ConfigurationFile['save_patches']))
StdOutLogger.info('Patching Type : {} '.format(ConfigurationFile['read_type']))
StdOutLogger.info('Patch White Color : {} '.format(ConfigurationFile['white_color']))
StdOutLogger.info('Patch Scale : {} '.format(ConfigurationFile['scale']))
StdOutLogger.info('Overlap Factor : {} '.format(ConfigurationFile['overlap_factor']))
StdOutLogger.info('Config YML File Name : {} '.format(Path(InputArguments.Configuration_Yaml_File).name))
StdOutLogger.info('Output Directory : {} '.format(InputArguments.Patches_Output_Path))
StdOutLogger.info('----------------------------------------------------')
# ------------------------------------------------------------------------------------------------
# Function Name: Close Handles
# Author: <NAME>, PhD
# Date: 04/12/2020
# Description: Closes logger handle
# Input: none
# Output: none
# ------------------------------------------------------------------------------------------------
def CloseHandles():
StdOutLogger.info('Closing Log handles')
StdOutLogger.info('Close stream handle')
LogHandler.close()
StdOutLogger.info('Remove stream handle from logger')
StdOutLogger.removeHandler(LogHandler)
StdOutLogger.info('Shutdown logger upon app exit')
logging.shutdown()
#------------------------------------------------------------------------------------------------
# Function Name: Creates a directory
# Author: <NAME>, PhD
# Date: 04/12/2020
# Description: Created a directory
# Input: path
# Output: none
#------------------------------------------------------------------------------------------------
def CreateDirectory(OutputPath):
try:
StdOutLogger.info('Creating directory:\n{}'.format(OutputPath))
Path(OutputPath).mkdir(parents=True, exist_ok=True)
except:
StdOutLogger.info('Could not created directory:\n{}'.format(OutputPath))
raise IOError()
#------------------------------------------------------------------------------------------------
# Function Name: Initialize
# Author: <NAME>, PhD
# Date: 04/14/2020
# Description: Sets up run
# Input: None
# Output: Patches directory and Yaml configuration file details
#------------------------------------------------------------------------------------------------
def Initialize():
global InputArguments
StdOutLogger.info('Define Input Arguments')
InputArguments = GetArguments()
if not Path(InputArguments.Input_Image_Path).exists():
raise IOError('Could not find the image:\n{}'.format(InputArguments.Patches_Output_Path))
if not Path(InputArguments.Configuration_Yaml_File).exists():
raise IOError('Could not find the config file:\n{}'.format(InputArguments.Configuration_Yaml_File))
if InputArguments.Patches_Output_Path is None:
PatchesOutputDirectory = ""
else:
CreateDirectory(InputArguments.Patches_Output_Path)
PatchesOutputDirectory = '{}/'.format(Path(InputArguments.Patches_Output_Path))
try:
StdOutLogger.info('Load config file')
ConfigurationFile = yaml.load(open(InputArguments.Configuration_Yaml_File), Loader=yaml.FullLoader)
except:
raise IOError('Exception Yaml Load failed')
return PatchesOutputDirectory,ConfigurationFile
#------------------------------------------------------------------------------------------------
# Function Name: Parse Image List
# Author: <NAME>, PhD
# Date: 04/14/2020
# Description: Fetch image list
# Input: File name
# Output: List
#------------------------------------------------------------------------------------------------
def ParseImageList(ListFileName):
global InputArguments
ImagesPathsList = list()
StdOutLogger.info('Opening image list: {} '.format(Path(ListFileName).name))
try:
# Pandas assumes CSV file has column titles
ImagesPathsList = pd.read_csv(ListFileName).squeeze().tolist()
except:
raise IOError('Exception triggered!!!')
return ImagesPathsList
#------------------------------------------------------------------------------------------------
# Function Name: Parse CSV Patches Files
# Author: <NAME>, PhD
# Date: 04/12/2020
# Description: Parses file names listed in CSV file
# Input: File path and File Name
# Output: XY Coordinates File List
#------------------------------------------------------------------------------------------------
def ParseCsvPatchesFiles(FilePath,FileName):
global InputArguments
XYCoordinatesDataframe = pd.DataFrame()
OutputXYCoordinatesFileList = list()
FilePathColumnName = 'Csv_File_Path'
X_CoordinateColumnName = 'X'
Y_CoordinateColumnName = 'Y'
DataframeColumnNames = [FilePathColumnName,X_CoordinateColumnName,Y_CoordinateColumnName]
StdOutLogger.info('****************************************')
StdOutLogger.info('Reading CSV File: {}'.format(Path(FilePath).stem))
XYCoordinatesDataframe = | pd.read_csv(FilePath) | pandas.read_csv |
import glob
import datetime
import os
import pandas as pd
import numpy as np
import re
from tkinter import filedialog
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
# pyinstaller --onefile --noconsole --icon GetCSV.ico Arca_GetCSVConverter_2-0-0.py
#for MMW 18-6 spreadsheets
probCol = False
#infer desktop
desktopPath = os.path.expanduser("~/Desktop/")
filelist=['']
probRecords = []
probColls = []
#filename = r'arms_modsonly_May9.csv'
col_names = ["IslandoraContentModel","BCRDHSimpleObjectPID",'imageLink','filename','directory','childKey','title', 'alternativeTitle', 'creator1', 'creator2','creator3']
col_names += ['corporateCreator1','corporateCreator2','contributor1','contributor2','corporateContributor1','publisher_original','publisher_location']
col_names += ['dateCreated','description','extent','topicalSubject1','topicalSubject2','topicalSubject3','topicalSubject4','topicalSubject5']
col_names += ['geographicSubject1','coordinates','personalSubject1','personalSubject2','corporateSubject1','corporateSubject2', 'dateIssued_start']
col_names += ['dateIssued_end','dateRange', 'frequency','genre','genreAuthority','type','internetMediaType','language1','language2','notes']
col_names += ['accessIdentifier','localIdentifier','ISBN','classification','URI']
col_names += ['source','rights','creativeCommons_URI','rightsStatement_URI','relatedItem_title','relatedItem_PID','recordCreationDate','recordOrigin']
pattern1 = r'^[A-Z][a-z]{2}-\d{2}$' #%b-%Y date (e.g. Jun-17)
pattern2 = r'^\d{2}-\d{2}-[1-2]\d{3}$'
contentModels = {
r"info:fedora/islandora:sp_large_image_cmodel": "Large Image",
r"info:fedora/islandora:sp_basic_image": "Basic Image",
r"info:fedora/islandora:bookCModel": "Book",
r"info:fedora/islandora:newspaperIssueCModel":"Newspaper - issue",
r"info:fedora/islandora:newspaperPageCModel":"Newspaper",
r"info:fedora/islandora:sp_PDF":"PDF",
r"info:fedora/islandora:sp-audioCModel":"Audio",
r"info:fedora/islandora:sp_videoCModel":"Video",
r"info:fedora/islandora:sp_compoundCModel":"Compound",
r"info:fedora/ir:citationCModel":"Citation"
}
def browse_button():
# Allow user to select a directory and store it in global var
# called folder_path1
lbl1['text'] = ""
csvname = filedialog.askopenfilename(initialdir = desktopPath,title = "Select file",filetypes = (("csv files","*.csv"),("all files","*.*")))
if ".csv" not in csvname:
lbl1['text'] = "**Please choose a file with a .csv extension!"
else:
filelist[0] = csvname
lbl1['text'] = csvname
def splitMultiHdgs(hdgs):
if pd.notna(hdgs):
hdgs = hdgs.replace("\\,",";")
hdgs = hdgs.split(",")
newhdgs = []
for hdg in hdgs:
newhdg = hdg.replace(";", ",")
newhdgs.append(newhdg)
return newhdgs
else:
return None
def getMultiVals(item, string, df, pd):
hdgs = df.filter(like=string).columns
for hdg in hdgs:
vals = df.at[item.Index,hdg]
if pd.notna(vals):
vals = splitMultiHdgs(vals)
return vals
return None
def convert_date(dt_str, letter_date):
"""
Converts an invalid formatted date into a proper date for ARCA Mods
Correct format: Y-m-d
Fixes:
Incorrect format: m-d-Y
Incorrect format (letter date): m-d e.g. Jun-17
:param dt_str: the date string
:param letter_date: whether the string is a letter date. Letter date is something like Jun-17
:return: the correctly formatted date
"""
if letter_date:
rev_date = datetime.datetime.strptime(dt_str, '%b-%y').strftime('%Y-%m') # convert date to yymm string format
rev_date_pts = rev_date.split("-")
year_num = int(rev_date_pts[0])
if year_num > 1999:
year_num = year_num - 100
year_str = str(year_num)
rev_date_pts[0] = year_str
revised = "-".join(rev_date_pts)
else:
revised = datetime.datetime.strptime(dt_str, '%d-%m-%Y').strftime(
'%Y-%m-%d') # convert date to YY-mm string format
return revised
def sortValues(lst):
for item in lst:
if pd.isna(item):
lst.remove(item)
lst = set(lst)
lst = list(lst)
return lst
def dropNullCols(df):
nullcols = []
for col in df.columns:
notNull = df[col].notna().sum()
if notNull < 1:
nullcols.append(col)
return nullcols
def convert():
probCol = False
df2 = pd.DataFrame(columns = col_names)
df2.append(pd.Series(), ignore_index=True)
f=filelist[0]
# if not os.path.exists(savePath): #if folder does not exist
# os.makedirs(savePath)
try:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_7')
except UnicodeDecodeError:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_8')
nullcols = dropNullCols(df)
df.drop(nullcols, axis=1, inplace=True)
i = 1
for item in df.itertuples():
#PID
df2.at[i, 'BCRDHSimpleObjectPID'] = item.PID
if 'mods_subject_name_personal_namePart_ms' in df.columns:
pNames = item.mods_subject_name_personal_namePart_ms
#ContentModel
cModel = item.RELS_EXT_hasModel_uri_s
df2.at[i,"IslandoraContentModel"] =contentModels[cModel]
#Local Identifier
if 'mods_identifier_local_ms' in df.columns:
localID = item.mods_identifier_local_ms
if pd.notna(localID) and localID != "None":
df2.at[i,'localIdentifier'] = localID
#Access Identifer
if 'mods_identifier_access_ms' in df.columns:
accessID = item.mods_identifier_access_ms
if pd.notna(accessID):
df2.at[i,'accessIdentifier'] = accessID
#Image Link
# Link to Image
PIDparts = item.PID.split(":")
repo = PIDparts[0] #repository code
num = PIDparts[1] #auto-generated accession number
imageLink = "https://bcrdh.ca/islandora/object/" + repo + "%3A" + num
df2.at[i, 'imageLink'] = imageLink
#Title
if 'mods_titleInfo_title_ms' in df.columns:
title = item.mods_titleInfo_title_ms
if pd.notna(title):
df2.at[i,'title'] = title.replace("\,",",")
#Alternative Title
if "mods_titleInfo_alternative_title_ms" in df.columns:
altTitle = item.mods_titleInfo_alternative_title_ms
if pd.notna(altTitle):
df2.at[i, 'alternativeTitle'] = altTitle.replace("\,",",")
#Date
if "mods_originInfo_dateIssued_ms" in df.columns:
dt = item.mods_originInfo_dateIssued_ms
if pd.notna(dt):
if (re.match(pattern1, dt)): #letter date, i.e. Jun-17
dt = convert_date(dt, True)
elif (re.match(pattern2, dt)): #reverse date
dt = convert_date(dt, False)
df2.at[i,'dateCreated'] = dt
#Date Issued Start
if 'mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms' in df.columns:
startDt = item.mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms
if pd.notna(startDt):
df2.at[i,'dateIssued_start'] = startDt
#Date Issued End
if 'mods_originInfo_encoding_w3cdtf_keyDate_yes_point_end_dateIssued_ms' in df.columns:
endDt = item.mods_originInfo_encoding_w3cdtf_keyDate_yes_point_end_dateIssued_ms
if | pd.notna(endDt) | pandas.notna |
"""see https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/"""
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dropout
from keras.layers import Dense
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from matplotlib import pyplot
from pandas import read_csv
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
values = [x for x in range(10)]
data = series_to_supervised(values, 2)
print(data)
# load dataset
#dataset = read_csv('./data/minitaur_log_latency_0.01.csv')
#dataset = read_csv('./data/minitaur_log_latency_0.003.csv')
dataset = | read_csv('./data/minitaur_log_latency_0.006.csv') | pandas.read_csv |
import json
import re
from datetime import datetime, date
from time import sleep
import pandas as pd
import pymongo
import pytz
import requests
from loguru import logger
from pyecharts.charts import Line
from pyecharts.charts import ThemeRiver
from pyecharts.charts import EffectScatter
from pyecharts.charts import Boxplot
from pyecharts.charts import Bar
import pyecharts.options as opts
from pyecharts.globals import ThemeType
import ofanalysis.const as const
def get_numeric_df_by_column(target_df: pd.DataFrame, target_column_list: list = None, ignore_column_list: list = None):
'''
将target_df中需要转换的列,从字符串转换成数字格式列
如果cell有非数字内容就过滤掉;不能转换成数字格式的成为NaN
:param target_df:
:param target_column_list: [column1,column2,...],需要转换的列
:param ignore_column_list: [column1,column2,...],不需要转换的列
:return:
'''
df = target_df.copy()
column_list = list(target_df.columns)
if target_column_list is not None:
column_list = target_column_list
if ignore_column_list is not None:
for item in ignore_column_list:
if item not in column_list:
continue
column_list.remove(item)
for column in column_list:
# s = df[column].str.extract(r'(-?[0-9]*([\.][0-9]+)?)', expand=True)[0]
s = df[column].str.extract(r'(-?\d+(\.\d+)?)', expand=True)[0]
df[column] = pd.to_numeric(s, errors='coerce')
return df
def extract_float_from_str(s: str):
'''
给定的字符串中提取其中所有的数字
:param s:
:return:
'''
result_list = re.findall(r'-?\d+\.?\d*', s)
return list(map(float, result_list))
def convert_float_for_dataframe_columns(target_df, columns, number=2, thousands=True):
"""
给定的dataframe中,指定[列]中的所有数字转换convert_float_format
:param target_df:
:param columns: list-> [column1, column2]
:param number: 保留小数点后几位
:param thousands:
:return:
"""
for column in columns:
target_df[column] = target_df[column].apply(
convert_float_format, args=(number, thousands,))
return target_df
# 转换数字为:保留n位小数;是否使用千分位
def convert_float_format(target, number=2, thousands=True):
if isinstance(target, str):
target = float(target.replace(',', ''))
first_step = round(target, number)
second_step = format(first_step, ',') if thousands else first_step
return second_step
def request_post_json(api_url: str, headers: dict, request_param: dict) -> dict:
'''
发送post request,使用自动重试机制;得到json并转换成字典返回
:param request_param: 字典格式
:param headers: const里有或传入
:param api_url:
:return: 字典
'''
request_data = json.dumps(request_param)
for _ in range(const.RETRY_TIMES): # 重试机制
try:
response = requests.post(api_url,
headers=headers,
data=request_data)
if response.status_code != 200:
logger.info('返回code不是200!')
raise Exception
except:
sleep(2)
else:
break
return response.json()
def db_save_dict_to_mongodb(mongo_db_name: str, col_name: str, target_dict):
c = pymongo.MongoClient(const.MONGODB_LINK)
db = c[mongo_db_name]
db_col = db[col_name]
if not isinstance(target_dict, list):
target_dict = [target_dict]
if len(target_dict) == 0:
logger.warning('准备存入db的数据为空,不能保存!')
return
item = db_col.insert_many(target_dict)
return item.inserted_ids
def db_get_dict_from_mongodb(mongo_db_name: str, col_name: str,
query_dict: dict = {}, field_dict: dict = {}):
'''
:param mongo_db_name:
:param col_name:
:param query_dict:
:param field_dict: {'column1':1, 'column2':1}
:return:
'''
c = pymongo.MongoClient(
host=const.MONGODB_LINK,
tz_aware=True,
tzinfo=pytz.timezone('Asia/Shanghai')
)
db = c[mongo_db_name]
db_col = db[col_name]
field_dict['_id'] = 0
result_dict_list = [x for x in db_col.find(query_dict, field_dict)]
return result_dict_list
def db_get_distinct_from_mongodb(mongo_db_name: str, col_name: str, field: str, query_dict: dict = {}):
c = pymongo.MongoClient(
host=const.MONGODB_LINK,
tz_aware=True,
tzinfo=pytz.timezone('Asia/Shanghai')
)
db = c[mongo_db_name]
db_col = db[col_name]
result_list = db_col.distinct(field, query=query_dict)
return result_list
def db_del_dict_from_mongodb(mongo_db_name: str, col_name: str, query_dict: dict):
c = pymongo.MongoClient(const.MONGODB_LINK)
db = c[mongo_db_name]
db_col = db[col_name]
x = db_col.delete_many(query_dict)
return x.deleted_count
def get_trade_cal_from_ts(ts_pro_token, start_date: str = '20000101', end_date: str = None):
if end_date is None:
end_date = date.today().strftime('%Y%m%d')
df_trade_cal = ts_pro_token.trade_cal(**{
"exchange": "SSE",
"cal_date": "",
"start_date": start_date,
"end_date": end_date,
"is_open": 1,
"limit": "",
"offset": ""
}, fields=[
"cal_date",
"pretrade_date"
])
return df_trade_cal['cal_date']
def get_q_end(target_date):
'''
获取给定日期所在的季度最后一天
:param target_date: 6位数日期,例如20211201
:return: 6位数日期,例如20211231
'''
quarter = pd.Period(target_date, 'Q').quarter
if quarter == 1:
return datetime(pd.to_datetime(target_date).year, 3, 31).strftime('%Y%m%d')
elif quarter == 2:
return datetime(pd.to_datetime(target_date).year, 6, 30).strftime('%Y%m%d')
elif quarter == 3:
return datetime(pd.to_datetime(target_date).year, 9, 30).strftime('%Y%m%d')
else:
return datetime(pd.to_datetime(target_date).year, 12, 31).strftime('%Y%m%d')
def get_pyechart_boxplot_obj(target_df: pd.DataFrame, title: str = ''):
"""通过给定的df,生成pyechart的boxplot对象并返回
Args:
target_df (pd.DataFrame): _description_
title (str, optional): _description_. Defaults to ''.
Returns:
_type_: _description_
"""
x_data = list(target_df.columns)
boxplot = Boxplot(init_opts=opts.InitOpts(
width='100%', height="700px", theme=ThemeType.CHALK))
boxplot.add_xaxis([''])
for name, value_series in target_df.iteritems():
boxplot.add_yaxis(name, boxplot.prepare_data(
[list(value_series.dropna())]))
boxplot.set_global_opts(
title_opts=opts.TitleOpts(title=title),
legend_opts=opts.LegendOpts(pos_top='5%')
)
return boxplot
def get_pyechart_scatterplots_obj(target_series: pd.Series, title: str = ''):
"""通过给定的Series,生成pyechart的scatterplots对象并返回
Args:
target_series (pd.Series): 其中series的index会作为x轴坐标,series的值作为y轴坐标
title (str, optional): _description_. Defaults to ''.
"""
scatter = EffectScatter(
init_opts=opts.InitOpts(
width='100%', height="700px", theme=ThemeType.CHALK)
)
scatter.add_xaxis(list(target_series.index))
scatter.add_yaxis("", list(target_series))
scatter.set_global_opts(
title_opts=opts.TitleOpts(title=title),
legend_opts=opts.LegendOpts(pos_top='5%')
)
return scatter
def get_pyechart_river_obj(target_df: pd.DataFrame, title: str = '', exclude_sum_weight: int = 10):
"""通过给定的df,生成pyechart的river对象并返回
Args:
target_df (pd.DataFrame): _description_
title (str, optional): _description_. Defaults to ''.
exclude_sum_weight (int, optional): 为了更好的显示,将某一X上值汇总小于这个权值的系列排除在外. Defaults to 10.
"""
df = target_df.fillna(0)
x_series = [index for index, value in (
df.sum() > exclude_sum_weight).iteritems() if value]
y_data = []
for name, column in df.iteritems():
if name in x_series:
l = [[x, y, name] for x, y in column.iteritems()]
y_data.extend(l)
river = ThemeRiver(init_opts=opts.InitOpts(width="100%", height="800px", theme=ThemeType.CHALK)).add(
series_name=x_series,
data=y_data,
singleaxis_opts=opts.SingleAxisOpts(
pos_top="50", pos_bottom="50", type_="time"),
).set_global_opts(
tooltip_opts=opts.TooltipOpts(
trigger="axis", axis_pointer_type="line"),
title_opts=opts.TitleOpts(title=title),
legend_opts=opts.LegendOpts(pos_top='5%'))
return river
def get_pyechart_databin_bar_obj(target_series: pd.Series, interval:int, title:str = ''):
"""通过给定的Series,生成分箱数据,填充pyechart的bar对象并返回
Args:
target_series (pd.Series): _description_
interval (int): 分箱间隔,可以调整
title (str, optional): _description_. Defaults to ''.
Returns:
_type_: _description_
"""
series_desc = target_series.describe()
bins = []
max = series_desc.loc['max']
min = series_desc.loc['min']
sub_max_min = max - min
for i in range(interval):
increment = i * (1 / interval)
bins.append(float('%.2f' % (sub_max_min * increment + min)))
bins.append(max)
score_cat = | pd.cut(target_series, bins) | pandas.cut |
import csv
import datetime
import logging
import os
import time
import eel
import pandas as pd
from .data import get_users_name
def resd(path, start, finish, number):
path_res = os.path.join(path, 'data', 'res.csv')
s = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M')
f = datetime.datetime.strptime(finish, '%Y-%m-%d %H:%M')
if not s>=f:
df = pd.read_csv(path_res,header=None, names=['start','finish','number'])
df['start']=pd.to_datetime(df['start'])
df['finish']=pd.to_datetime(df['finish'])
ans1 = df[~((s < df['start']) & (f <= df['start']))]
ans2 = df[~((s >= df['finish']) & (f > df['finish']))]
if (len(ans1)==0 or len(ans2) == 0):
#df.append()
#df.to_csv(path_res,header=False, index=False)
with open(path_res, 'a', newline="", encoding='utf_8_sig') as f:
writer = csv.writer(f, delimiter=",")
writer.writerow([start, finish, number])
eel.nok(f'予約できました。')
eel.clean()
else:
eel.nok(f'予約できませんでした')
else:
eel.nok('予約できませんでした。')
def tdd(path):
path_res = os.path.join(path, 'data', 'res.csv')
df = pd.read_csv(path_res,header=None, names=['start','finish','number'])
df['start']=pd.to_datetime(df['start'])
df['finish']=pd.to_datetime(df['finish'])
df = df[df['start']>=datetime.datetime.now()]
f = lambda x: f"{x[0].strftime('%m/%d %H時')} ~ {x[1].strftime('%m/%d %H時')} {x[2]}"
new_df_1 = df.apply(f, axis=1)
return new_df_1.to_list() if len(new_df_1)!=0 else []
def getdated(path, y, m, d):
d_0 = datetime.datetime(y, m, d)
d_1 = d_0 + datetime.timedelta(days=1)
path_res = os.path.join(path, 'data', 'res.csv')
df = pd.read_csv(path_res,header=None, names=['start','finish','number'])
df['start']= | pd.to_datetime(df['start']) | pandas.to_datetime |
#This file implements the upHRP algorithm, the MinVAr portfolio and the InvLamda portf.
#Code for classical HRP is based on <NAME>. (2018). Advances in Financial
#Machine Learning. Wiley. The code has been modified to create an uplifted portfolio
#strategies based on FRM adjacency mtrices and its adapted in order to be used with
#python 3 and the data set.
#<NAME>
#@date: 20201010
#"""
#[0] Import library
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#[0]Upload input data, Financial Institutions of the 6 Emerging Markets and adjacency matrix
FIs_prices = pd.read_excel("Financial Institutions Price Series.xlsx")
FRM_EM_Adjacency_matrix= | pd.read_csv("adj_matix_20200630_050.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
from singlecelldata import SingleCell
from sklearn.metrics import pairwise_distances
from scipy.stats import spearmanr
from .feats_utils import FeatureNormalize
### Pairwise Distances
def spearmans(X):
"""
X is a (n x d) matrix where rows are samples
Returns D which is a (n x n) matrix of distances between samples
"""
D, _ = spearmanr(X, axis = 1)
D = 1 - D
return D
def pearsons(X):
"""
X is a (n x d) matrix where rows are samples
Returns D which is a (n x n) matrix of distances between samples
"""
D = pairwise_distances(X, metric = "correlation")
return D
def euclidean(X):
"""
X is a (n x d) matrix where rows are samples
Returns D which is a (n x n) matrix of distances between samples
"""
D = pairwise_distances(X, metric = "euclidean", n_jobs = -1)
return D
### Kernels
def linear_kernel(X):
"""
X is a (n x d) matrix where rows are samples
Returns K which is a (n x n) kernel matrix
"""
K = np.dot(X, X.T)
return K
### Utility Functions
# Converts Distances to Kernels
def dist_to_kernel(D):
gamma = 1.0 / np.amax(D)
S = np.exp(-D * gamma)
return S
# Computes Gram Matrix from Kernel
def GramMatrix(K):
N_one = np.ones(K.shape) / K.shape[0]
K_bar = K - np.dot(N_one, K) - np.dot(K, N_one) + np.dot(np.dot(N_one, K), N_one)
return K_bar
def PCA(sc, n_comp = 1, dist_or_kernel = 'linear'):
"""
Computes and stores the Principal Components of the gene expression data stored in
the SingleCell object.
Parameters
----------
sc : SingleCell
The SingleCell object containing gene expression data.
n_comp : int, optional
The number of Principal Components to compute. Default 1.
dist_or_kernel : str, optional
The distance metric or the kernel to compute. If a distance metric is passed,
it computes the pairwise distance between the cells and then converts the distance
metrics to kernels. If a kernel is passed, it computes the kernel. Valid values
are 'linear' (default) for linear kernel, 'spearmans' for Spearmans distance,
'euclidean' for Euclidean distance and 'pearsons' for Pearsons distance.
Returns
-------
sc : SingleCell
The SingleCell object containing the dimensionnality reduced gene expression data. The
reduced dimensionality is n_comp. The gene names are removed and the features in the
reduced space are named 'PC1', 'PC2' and so on.
"""
sc = FeatureNormalize(sc, 'mean')
X = sc.getCounts()
if (dist_or_kernel == 'linear'):
K = GramMatrix(linear_kernel(X.T))
elif (dist_or_kernel == 'spearmans'):
K = GramMatrix(dist_to_kernel(spearmans(X.T)))
elif (dist_or_kernel == 'euclidean'):
K = GramMatrix(dist_to_kernel(euclidean(X.T)))
elif (dist_or_kernel == 'pearsons'):
K = GramMatrix(dist_to_kernel(pearsons(X.T)))
E_vec, E_val, _ = np.linalg.svd(K)
#print(E_val)
#print(E_vec[:,0])
# Sort Eigenvector
idx = np.argsort(E_val, kind = 'mergesort')
idx = np.flip(idx)
E_val = E_val[idx]
E_vec = E_vec[:, idx]
# Remove zero eigenvectors
idx2 = E_val > 0
E_val = E_val[idx2]
E_vec = E_vec[:, idx2]
# print("Maximum components possible = ", E_val.size)
# Scale eigenvectors so that np.dot(D[:,0].T, D[:, 0]) * E[0] = 1
E_val = np.reshape(E_val, [1, E_val.size])
# E_vec = E_vec / np.linalg.norm(E_vec, axis = 0, keepdims = True)
E_vec = E_vec / np.sqrt(E_val)
X_red = np.dot(E_vec[:, 0:n_comp].T, K)
data = | pd.DataFrame(X_red) | pandas.DataFrame |
#
# Copyright (c) 2017-18 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
DLWP utilities.
"""
import pickle
import random
import tempfile
from importlib import import_module
from copy import copy
import numpy as np
import pandas as pd
import keras.models
from keras.utils import multi_gpu_model
# ==================================================================================================================== #
# General utility functions
# ==================================================================================================================== #
def make_keras_picklable():
"""
Thanks to http://zachmoshe.com/2017/04/03/pickling-keras-models.html
"""
def __getstate__(self):
model_str = ""
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
keras.models.save_model(self, fd.name, overwrite=True)
model_str = fd.read()
d = {'model_str': model_str}
return d
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
model = keras.models.load_model(fd.name)
self.__dict__ = model.__dict__
cls = keras.models.Model
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
def get_object(module_class):
"""
Given a string with a module class name, it imports and returns the class.
This function (c) <NAME>, weeWX; modified by <NAME>.
"""
# Split the path into its parts
parts = module_class.split('.')
# Get the top level module
module = parts[0] # '.'.join(parts[:-1])
# Import the top level module
mod = __import__(module)
# Recursively work down from the top level module to the class name.
# Be prepared to catch an exception if something cannot be found.
try:
for part in parts[1:]:
module = '.'.join([module, part])
# Import each successive module
__import__(module)
mod = getattr(mod, part)
except ImportError as e:
# Can't find a recursive module. Give a more informative error message:
raise ImportError("'%s' raised when searching for %s" % (str(e), module))
except AttributeError:
# Can't find the last attribute. Give a more informative error message:
raise AttributeError("Module '%s' has no attribute '%s' when searching for '%s'" %
(mod.__name__, part, module_class))
return mod
def get_from_class(module_name, class_name):
"""
Given a module name and a class name, return an object corresponding to the class retrieved as in
`from module_class import class_name`.
:param module_name: str: name of module (may have . attributes)
:param class_name: str: name of class
:return: object pointer to class
"""
mod = __import__(module_name, fromlist=[class_name])
class_obj = getattr(mod, class_name)
return class_obj
def get_classes(module_name):
"""
From a given module name, return a dictionary {class_name: class_object} of its classes.
:param module_name: str: name of module to import
:return: dict: {class_name: class_object} pairs in the module
"""
module = import_module(module_name)
classes = {}
for key in dir(module):
if isinstance(getattr(module, key), type):
classes[key] = get_from_class(module_name, key)
return classes
def save_model(model, file_name, history=None):
"""
Saves a class instance with a 'model' attribute to disk. Creates two files: one pickle file containing no model
saved as ${file_name}.pkl and one for the model saved as ${file_name}.keras. Use the `load_model()` method to load
a model saved with this method.
:param model: model instance (with a 'model' attribute) to save
:param file_name: str: base name of save files
:param history: history from Keras fitting, or None
:return:
"""
# Save the model structure and weights
if hasattr(model, 'base_model'):
model.base_model.save('%s.keras' % file_name)
else:
model.model.save('%s.keras' % file_name)
# Create a picklable copy of the DLWP model object excluding the keras model
model_copy = copy(model)
model_copy.model = None
if hasattr(model, 'base_model'):
model_copy.base_model = None
# Save the pickled DLWP object
with open('%s.pkl' % file_name, 'wb') as f:
pickle.dump(model_copy, f, protocol=pickle.HIGHEST_PROTOCOL)
# Save the history, if requested
if history is not None:
with open('%s.history' % file_name, 'wb') as f:
pickle.dump(history.history, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_model(file_name, history=False, custom_objects=None, gpus=1):
"""
Loads a model saved to disk with the `save_model()` method.
:param file_name: str: base name of save files
:param history: bool: if True, loads the history file along with the model
:param custom_objects: dict: any custom functions or classes to be included when Keras loads the model. There is
no need to add objects in DLWP.custom as those are added automatically.
:param gpus: int: load the model onto this number of GPUs
:return: model [, dict]: loaded object [, dictionary of training history]
"""
# Load the pickled DLWP object
with open('%s.pkl' % file_name, 'rb') as f:
model = pickle.load(f)
# Load the saved keras model weights
custom_objects = custom_objects or {}
custom_objects.update(get_classes('DLWP.custom'))
loaded_model = keras.models.load_model('%s.keras' % file_name, custom_objects=custom_objects, compile=True)
# If multiple GPUs are requested, copy the model to the GPUs
if gpus > 1:
import tensorflow as tf
with tf.device('/cpu:0'):
model.base_model = keras.models.clone_model(loaded_model)
model.base_model.set_weights(loaded_model.get_weights())
model.model = multi_gpu_model(model.base_model, gpus=gpus)
model.gpus = gpus
else:
model.base_model = loaded_model
model.model = model.base_model
# Also load the history file, if requested
if history:
with open('%s.history' % file_name, 'rb') as f:
h = pickle.load(f)
return model, h
else:
return model
def save_torch_model(model, file_name, history=None):
"""
Saves a DLWPTorchNN model to disk. Creates two files: one pickle file containing the DLWPTorchNN wrapper, saved as
${file_name}.pkl, and one for the model saved as ${file_name}.torch. Use the `load_torch_model()` method to load
a model saved with this method.
:param model: DLWPTorchNN or other torch model to save
:param file_name: str: base name of save files
:param history: history of model to save; optional
:return:
"""
import torch
torch.save(model.model, '%s.torch' % file_name)
model_copy = copy(model)
model_copy.model = None
with open('%s.pkl' % file_name, 'wb') as f:
pickle.dump(model_copy, f, protocol=pickle.HIGHEST_PROTOCOL)
if history is not None:
with open('%s.history' % file_name, 'wb') as f:
pickle.dump(history, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_torch_model(file_name, history=False):
"""
Loads a DLWPTorchNN or other model using Torch saved to disk with the `save_torch_model()` method.
:param file_name: str: base name of save files
:param history: bool: if True, loads the history file along with the model
:return: model [, dict]: loaded object [, dictionary of training history]\
"""
import torch
with open('%s.pkl' % file_name, 'rb') as f:
model = pickle.load(f)
model.model = torch.load('%s.torch' % file_name)
model.model.eval()
if history:
with open('%s.history' % file_name, 'rb') as f:
h = pickle.load(f)
return model, h
else:
return model
def delete_nan_samples(predictors, targets, large_fill_value=False, threshold=None):
"""
Delete any samples from the predictor and target numpy arrays and return new, reduced versions.
:param predictors: ndarray, shape [num_samples,...]: predictor data
:param targets: ndarray, shape [num_samples,...]: target data
:param large_fill_value: bool: if True, treats very large values (>= 1e20) as NaNs
:param threshold: float 0-1: if not None, then removes any samples with a fraction of NaN larger than this
:return: predictors, targets: ndarrays with samples removed
"""
if threshold is not None and not (0 <= threshold <= 1):
raise ValueError("'threshold' must be between 0 and 1")
if large_fill_value:
predictors[(predictors >= 1.e20) | (predictors <= -1.e20)] = np.nan
targets[(targets >= 1.e20) | (targets <= -1.e20)] = np.nan
p_shape = predictors.shape
t_shape = targets.shape
predictors = predictors.reshape((p_shape[0], -1))
targets = targets.reshape((t_shape[0], -1))
if threshold is None:
p_ind = list(np.where(np.isnan(predictors))[0])
t_ind = list(np.where(np.isnan(targets))[0])
else:
p_ind = list(np.where(np.mean(np.isnan(predictors), axis=1) >= threshold)[0])
t_ind = list(np.where(np.mean(np.isnan(targets), axis=1) >= threshold)[0])
bad_ind = list(set(p_ind + t_ind))
predictors = np.delete(predictors, bad_ind, axis=0)
targets = np.delete(targets, bad_ind, axis=0)
new_p_shape = (predictors.shape[0],) + p_shape[1:]
new_t_shape = (targets.shape[0],) + t_shape[1:]
return predictors.reshape(new_p_shape), targets.reshape(new_t_shape)
def train_test_split_ind(n_sample, test_size, method='random'):
"""
Return indices splitting n_samples into train and test index lists.
:param n_sample: int: number of samples
:param test_size: int: number of samples in test set
:param method: str: 'first' ('last') to take first (last) t samples as test, or 'random'
:return: (list, list): list of train indices, list of test indices
"""
if method == 'first':
test_set = list(range(0, test_size))
train_set = list(range(test_size, n_sample))
elif method == 'last':
test_set = list(range(n_sample - test_size, n_sample))
train_set = list(range(0, n_sample - test_size))
elif method == 'random':
train_set = list(range(n_sample))
test_set = []
for j in range(test_size):
i = random.choice(train_set)
test_set.append(i)
train_set.remove(i)
test_set.sort()
else:
raise ValueError("'method' must be 'first', 'last', or 'random'")
return train_set, test_set
def day_of_year(date):
year_start = pd.Timestamp(date.year, 1, 1)
return (date - year_start).total_seconds() / 3600. / 24.
def insolation(dates, lat, lon, S=1.):
"""
Calculate the approximate solar insolation for given dates
:param dates: 1d array: datetime or Timestamp
:param lat: 1d or 2d array of latitudes
:param lon: 1d or 2d array of longitudes (0-360º). If 2d, must match the shape of lat.
:param S: float: scaling factor (solar constant)
:return: 3d array: insolation (date, lat, lon)
"""
try:
assert len(lat.shape) == len(lon.shape)
except AssertionError:
raise ValueError("'lat' and 'lon' must either both be 1d or both be 2d'")
if len(lat.shape) == 2:
try:
assert lat.shape == lon.shape
except AssertionError:
raise ValueError("shape mismatch between lat (%s) and lon (%s)" % (lat.shape, lon.shape))
if len(lat.shape) == 1:
lon, lat = np.meshgrid(lon, lat)
# Constants for year 1995 (standard)
eps = 23.4441 * np.pi / 180.
ecc = 0.016715
om = 282.7 * np.pi / 180.
beta = np.sqrt(1 - ecc ** 2.)
# Get the day of year. Ignore leap days.
days = | pd.Series(dates) | pandas.Series |
from sklearn import preprocessing
from sklearn.metrics import classification_report, ConfusionMatrixDisplay
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
# Declare file paths and constants.
#
PORT = 5050
BUFFER_RADIUS = 50
SERVER = "10.183.119.142"
DATA_PATH = 'data/tracks'
DATA_MOD_PATH = Path("data/mod")
DATA_RESULTS = Path("data/results")
FILE_ALL_DATA = Path("data/results/all_data.csv")
FILE_ALL_TRACKS = Path("data/results/all_tracks.csv")
FILE_ALL_MODALITIES = Path("data/results/all_modalities.csv")
# FILE_ALL_DATA = Path("/Users/christoph/Dropbox/MDA_Projekt/Dashboard_presentation/all_data.csv")
# FILE_ALL_TRACKS = Path("/Users/christoph/Dropbox/MDA_Projekt/Dashboard_presentation/all_tracks.csv")
# FILE_ALL_MODALITIES = Path("/Users/christoph/Dropbox/MDA_Projekt/Dashboard_presentation/all_modalities.csv")
FILE_TYPE = ('.csv', '.pkl')
SAVE_PATH_MODEL = Path("data/model/rf_model.joblib")
SAVE_PATH_MATRIX = Path("data/model/") # Name and file type gets declared later (looping)
INPUT_FEATURES = ["track_distance", "mean_speed", "var_speed", "median_speed", "p85_speed", "stoprate",
"p85_accel", "var_accel"]
# Import Dataframes from .csv file
def get_dataframes():
all_data_df = pd.read_csv(FILE_ALL_DATA)
tracks_df = | pd.read_csv(FILE_ALL_TRACKS) | pandas.read_csv |
import json
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.colors import LogNorm
from pylossmap import BLMData
# from tensorflow.keras.utils import Sequence
from tqdm.auto import tqdm
UFO_LABEL = 1
NON_UFO_LABEL = 0
def augment_mirror(data: np.ndarray) -> np.ndarray:
"""Augment the data with the mirrored data.
Args:
data: data to augment
Returns:
the data with the mirrored data appended to the data.
"""
return np.vstack([data, data[:, ::-1]])
def create_labels(
ufo: np.ndarray, non_ufo: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Create the label arrays.
Args:
ufo: ufo data
non_ufo: non ufo data
Returns:
The labels of the ufo and non ufo data.
"""
ufo_labels = np.array([UFO_LABEL] * len(ufo))[:, None]
non_ufo_labels = np.array([NON_UFO_LABEL] * len(non_ufo))[:, None]
return ufo_labels, non_ufo_labels
def truncate_data(data: List[pd.DataFrame], target_length: int) -> np.ndarray:
"""Truncate the rows to a given length, centered.
Args:
data: iterable containing vector data to truncate
target_length: the desired length of the vector conatining the blm signals
Returns:
Array containing the truncated data.
"""
truncated_rows = []
for row in data:
length = row.shape[1]
half_delta = (length - target_length) / 2
start_shift = int(np.floor(half_delta))
end_cutoff = int(np.ceil(half_delta))
row_chunk = row.iloc[0, start_shift:-end_cutoff]
truncated_rows.append(row_chunk.to_numpy())
truncated_rows = np.array(truncated_rows)
return truncated_rows
def create_peak_dataset(
ufo_meta: pd.DataFrame,
raw_data_dir: Path,
dcum_around: int = 24000,
target_length: int = 33,
prior_dt: int = 3,
post_dt: int = 3,
non_ufo_threshold: float = 1e-3,
include_meta: bool = True,
) -> Dict[str, np.ndarray]:
"""Create a ufo and non ufo peak dataset.
Args:
ufo_meta: metadata of the ufo events
raw_data_dir: directory containing the raw data
dcum_around: dcum range around the ufo
target_length: the desired length of the vector conatining the blm signals
prior_dt: how many seconds back to get the prior events
post_dt: how many seconds forward to get the post events
non_ufo_threshold: don't include non ufo samples when the max is above threshold
include_meta: include the metadata of the samples in the returned dictionary
Returns:
Dictionary containing the ufo and non ufo data and metadata.
"""
non_ufo_prior = []
non_ufo_prior_meta = []
peaks = []
peaks_meta = []
non_ufo_post = []
non_ufo_post_meta = []
for idx, ufo in tqdm(ufo_meta.iterrows(), total=len(ufo_meta)):
raw_fill_data = BLMData.load(raw_data_dir / f"{ufo.fill}.h5")
raw_fill_data.df = raw_fill_data.df.droplevel("mode")
raw_fill_data.df = raw_fill_data.df.iloc[~raw_fill_data.df.index.duplicated()]
raw_idx = raw_fill_data.df.index.get_loc(ufo.datetime, method="nearest") + 1
around_blms = raw_fill_data.meta[
(raw_fill_data.meta["dcum"] < ufo.dcum + dcum_around)
& (raw_fill_data.meta["dcum"] > ufo.dcum - dcum_around)
]
around_data = raw_fill_data.df[around_blms.index].iloc[raw_idx : raw_idx + 1]
if around_data.shape[1] < target_length:
print("skipping sample, not enough blms.")
continue
peaks.append(around_data)
if include_meta:
peaks_meta.append(ufo)
around_prior_data = raw_fill_data.df[around_blms.index].iloc[
raw_idx - prior_dt : raw_idx + 1 - prior_dt
]
around_post_data = raw_fill_data.df[around_blms.index].iloc[
raw_idx + post_dt : raw_idx + 1 + post_dt
]
print("===============")
print("prior max: ", around_prior_data.max().max())
print("prior min: ", around_prior_data.min().min())
print("prior shape: ", around_prior_data.shape)
if around_prior_data.max().max() > non_ufo_threshold:
print("High value, skipping")
print(idx, ufo)
elif around_prior_data.min().min() == 0:
print("found a zero min value, skipping")
print(idx, ufo)
else:
non_ufo_prior.append(around_prior_data)
if include_meta:
prior_meta = ufo.copy()
prior_meta["datetime"] = prior_meta["datetime"] - pd.Timedelta(
f"{prior_dt}s"
)
non_ufo_prior_meta.append(prior_meta)
print("post max: ", around_post_data.max().max())
print("post min: ", around_post_data.min().min())
print("post shape: ", around_post_data.shape)
if around_post_data.max().max() > non_ufo_threshold:
print("High value, skipping")
print(idx, ufo)
elif around_post_data.min().min() == 0:
print("found a zero min value, skipping")
print(idx, ufo)
else:
non_ufo_post.append(around_post_data)
if include_meta:
post_meta = ufo.copy()
post_meta["datetime"] = post_meta["datetime"] + pd.Timedelta(
f"{post_dt}s"
)
non_ufo_post_meta.append(post_meta)
out = {
"ufo": truncate_data(peaks, target_length=target_length),
"non_ufo_prior": truncate_data(non_ufo_prior, target_length=target_length),
"non_ufo_post": truncate_data(non_ufo_post, target_length=target_length),
}
if include_meta:
out["ufo_meta"] = | pd.DataFrame(peaks_meta) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Describe tissue types of input data
#
# **<NAME>, 2019**
#
# Load in previously identified tissue type counts and output a supplementary table.
# In[1]:
import os
import pandas as pd
# In[2]:
# Load TCGA data
file = os.path.join('data', 'tcga_sample_counts.tsv')
tcga_count_df = pd.read_table(file, sep='\t').rename({'cancertype': 'tissue'}, axis='columns')
tcga_count_df = tcga_count_df.assign(dataset="TCGA")
tcga_count_df.head()
# In[3]:
# Load GTEX data
file = os.path.join('data', 'gtex_sample_counts.tsv')
gtex_count_df = pd.read_table(file, sep='\t').rename({'tissuetype': 'tissue'}, axis='columns')
gtex_count_df = gtex_count_df.assign(dataset="GTEX")
gtex_count_df.head()
# In[4]:
# Load TARGET data
file = os.path.join('data', 'target_sample_counts.tsv')
target_count_df = pd.read_table(file, sep='\t').rename({'cancertype': 'tissue'}, axis='columns')
target_count_df = target_count_df.assign(dataset="TARGET")
target_count_df.head()
# In[5]:
# Combine all data to generate supplementary table
full_count_df = (
| pd.concat([tcga_count_df, gtex_count_df, target_count_df], axis='rows') | pandas.concat |
from math import floor
import pandas as pd
import numpy as np
import traceback
import pickle
class Model(object):
"""
Abstract base model class defining the model interface - a model is an object with a training method and a testing/forecasting ability given some input data
"""
def __init__(self):
self.__meta__ = "from ja_model library 2018"
self.__data__ = {'X_train' : | pd.DataFrame({}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import functools
import json
import os.path
import numpy as np
import PIL.Image
import pandas as pd
from lmnet.datasets.base import ObjectDetectionBase
from lmnet.utils.random import shuffle, train_test_split
class LmFlower(ObjectDetectionBase):
"""Leapmind flower dataset for object detection.
images: images numpy array. shape is [batch_size, height, width]
labels: gt_boxes numpy array. shape is [batch_size, num_max_boxes, 5(x, y, w, h, class_id)]
"""
classes = ["sunflower", "calla", "poppy (Iceland poppy)", "carnation", "cosmos"]
num_classes = len(classes)
available_subsets = ["train", "validation"]
extend_dir = "lm_flower"
@classmethod
def count_max_boxes(cls):
"""Count max boxes size over all subsets."""
num_max_boxes = 0
for subset in cls.available_subsets:
obj = cls(subset=subset)
gt_boxes_list = obj.annotations
subset_max = max([len(gt_boxes) for gt_boxes in gt_boxes_list])
if subset_max >= num_max_boxes:
num_max_boxes = subset_max
return num_max_boxes
def __init__(
self,
*args,
**kwargs
):
super().__init__(
*args,
**kwargs,
)
self.json = os.path.join(self.data_dir, "project_126_1507252774.json")
self.images_dir = os.path.join(self.data_dir, "images")
self._init_files_and_annotations()
@property
def num_per_epoch(self):
return len(self.files)
def _element(self):
"""Return an image, gt_boxes."""
index = self.current_element_index
self.current_element_index += 1
if self.current_element_index == self.num_per_epoch:
self.current_element_index = 0
self._shuffle()
files, gt_boxes_list = self.files, self.annotations
target_file = files[index]
gt_boxes = gt_boxes_list[index]
gt_boxes = np.array(gt_boxes)
image = PIL.Image.open(target_file)
image = np.array(image)
samples = {'image': image, 'gt_boxes': gt_boxes}
if callable(self.augmentor) and self.subset == "train":
samples = self.augmentor(**samples)
if callable(self.pre_processor):
samples = self.pre_processor(**samples)
image = samples['image']
gt_boxes = samples['gt_boxes']
return image, gt_boxes
def _files_and_annotations_from_json(self, json_file):
"""Return files and gt_boxes list."""
image_ids = self._image_ids(json_file)
image_files = [self._image_file_from_image_id(json_file, image_id) for image_id in image_ids]
gt_boxes_list = [self._gt_boxes_from_image_id(json_file, image_id) for image_id in image_ids]
return image_files, gt_boxes_list
def _image_file_from_image_id(self, json_file, image_id):
images = self._images_from_json(json_file)
file_name = images[images.id == image_id].file_name.tolist()[0]
return os.path.join(self.images_dir, file_name)
def _gt_boxes_from_image_id(self, json_file, image_id):
annotations = self._annotations_from_json(json_file)
category_ids = annotations[annotations.image_id == image_id].category_id.tolist()
categories = self._categories_from_json(json_file)
category_names = [
categories[categories.id == category_id].name.iloc[0]
for category_id in category_ids
]
labels = [self.classes.index(category) for category in category_names]
bboxes = annotations[annotations.image_id == image_id].bbox.tolist()
gt_boxes = []
for class_id, bbox in zip(labels, bboxes):
# ignore width 0 or height 0 box.
if bbox[2] == 0 or bbox[3] == 0:
continue
gt_boxes.append(bbox + [class_id])
return gt_boxes
@functools.lru_cache(maxsize=None)
def _load_json(self, json_file):
f = open(json_file)
data = json.load(f)
f.close()
return data
def _image_ids(self, json_file):
images = self. _images_from_json(json_file)
return images.id.tolist()
@functools.lru_cache(maxsize=None)
def _annotations_from_json(self, json_file):
data = self._load_json(json_file)
annotations = | pd.DataFrame(data["annotations"]) | pandas.DataFrame |
import pymongo
from pymongo import MongoClient
from tkinter import *
import time;
import datetime
import random
from tkinter import messagebox
import numpy as np
import pandas as pd
from tkinter import simpledialog
#GLOBAL VALUES
d_c = []
x = pd.DataFrame()
y = pd.DataFrame()
X_train = pd.DataFrame()
X_test = pd.DataFrame()
y_train = pd.DataFrame()
y_test = pd.DataFrame()
X_poly = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 16:31:58 2021
@author: snoone
"""
import os
import glob
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
OUTDIR = "D:/Python_CDM_conversion/hourly/qff/cdm_out/observations_table"
os.chdir("D:/Python_CDM_conversion/hourly/qff/test")
extension = 'qff'
#my_file = open("D:/Python_CDM_conversion/hourly/qff/ls1.txt", "r")
#all_filenames = my_file.readlines()
#print(all_filenames)
##use alist of file name sto run 5000 parallel
#with open("D:/Python_CDM_conversion/hourly/qff/ls.txt", "r") as f:
# all_filenames = f.read().splitlines()
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
##to start at begining of files
for filename in all_filenames:
##to start at next file after last processe
#for filename in all_filenames[all_filenames.index('SWM00002338.qff'):] :
df=pd.read_csv(filename, sep="|")
##set up master df to extrcat each variable
df["report_id"]=""
df["observation_id"]=""
df["data_policy_licence"]=""
df["date_time_meaning"]="1"
df["observation_duration"]="0"
df["latitude"]=df["Latitude"]
df["longitude"]=df["Longitude"]
df["crs"]=""
df["z_coordinate"]=""
df["z_coordinate_type"]=""
df["observation_height_above_station_surface"]=""
df["observed_variable"]=""
df["secondary_variable"]=""
df["observation_value"]=""
df["value_significance"]="12"
df["secondary_value"]=""
df["units"]=""
df["code_table"]=""
df["conversion_flag"]=""
df["location_method"]=""
df["location_precision"]=""
df["z_coordinate_method"]=""
df["bbox_min_longitude"]=""
df["bbox_max_longitude"]=""
df["bbox_min_latitude"]=""
df["bbox_max_latitude"]=""
df["spatial_representativeness"]=""
df["original_code_table"]=""
df["quality_flag"]=""
df["numerical_precision"]=""
df["sensor_id"]=""
df["sensor_automation_status"]=""
df["exposure_of_sensor"]=""
df["original_precision"]=""
df["original_units"]=""
df["original_code_table"]=""
df["original_value"]=""
df["conversion_method"]=""
df["processing_code"]=""
df["processing_level"]="0"
df["adjustment_id"]=""
df["traceability"]=""
df["advanced_qc"]=""
df["advanced_uncertainty"]=""
df["advanced_homogenisation"]=""
df["advanced_assimilation_feedback"]=""
df["source_id"]=""
df["source_record_id"]=""
df["primary_station_id"]=df["Station_ID"]
df["Timestamp2"] = df["Year"].map(str) + "-" + df["Month"].map(str)+ "-" + df["Day"].map(str)
df["Seconds"]="00"
df["offset"]="+00"
df["date_time"] = df["Timestamp2"].map(str)+ " " + df["Hour"].map(str)+":"+df["Minute"].map(str)+":"+df["Seconds"].map(str)
df['date_time'] = pd.to_datetime(df['date_time'], format='%Y/%m/%d' " ""%H:%M")
df['date_time'] = df['date_time'].astype('str')
df.date_time = df.date_time + '+00'
#=========================================================================================
##convert temperature changes for each variable
dft = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dft["observation_value"]=df["temperature"]+273.15
dft["source_id"]=df["temperature_Source_Code"]
dft["Seconds"]="00"
dft["quality_flag"]=df["temperature_QC_flag"]
dft["qc_method"]=dft["quality_flag"]
dft["conversion_flag"]="0"
dft["conversion_method"]="1"
dft["numerical_precision"]="0.01"
dft["original_precision"]="0.1"
dft["original_units"]="60"
dft["original_value"]=df["temperature"]
dft["observation_height_above_station_surface"]="2"
dft["units"]="5"
dft["observed_variable"]="85"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dft.loc[dft['quality_flag'].notnull(), "quality_flag"] = 1
dft = dft.fillna("Null")
dft.quality_flag[dft.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dft = dft.fillna("null")
dft = dft.replace({"null":"-99999"})
dft = dft[dft.observation_value != -99999]
#df = df.astype(str)
dft["source_id"] = pd.to_numeric(dft["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dft['source_id'] = dft['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dft['primary_station_id_2']=dft['primary_station_id'].astype(str)+'-'+dft['source_id'].astype(str)
dft["observation_value"] = pd.to_numeric(dft["observation_value"],errors='coerce')
#dft.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dft = dft.astype(str)
df2 = df2.astype(str)
dft= df2.merge(dft, on=['primary_station_id_2'])
dft['data_policy_licence'] = dft['data_policy_licence_x']
dft['data_policy_licence'] = dft['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dft['observation_id']=dft['primary_station_id'].astype(str)+'-'+dft['record_number'].astype(str)+'-'+dft['date_time'].astype(str)
dft['observation_id'] = dft['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dft['observation_id'] = dft['observation_id'].str[:-6]
dft["observation_id"]=dft["observation_id"]+'-'+dft['observed_variable'].astype(str)+'-'+dft['value_significance'].astype(str)
dft["report_id"]=dft["observation_id"].str[:-6]
##set up qc table
dft = dft[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
df.dropna(subset = ["observation_value"], inplace=True)
dft['source_id'] = dft['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dft['data_policy_licence'] = dft['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dft["source_id"] = pd.to_numeric(dft["source_id"],errors='coerce')
dft["observation_value"] = pd.to_numeric(dft["observation_value"],errors='coerce')
dft["observation_value"]= dft["observation_value"].round(2)
#dft.to_csv("isuest.csv", index=False, sep=",")
#=================================================================================
##convert dew point temperature changes for each variable
dfdpt= df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfdpt["observation_value"]=df["dew_point_temperature"]+273.15
dfdpt["source_id"]=df["dew_point_temperature_Source_Code"]
dfdpt["Seconds"]="00"
dfdpt["quality_flag"]=df["dew_point_temperature_QC_flag"]
dfdpt["conversion_flag"]="0"
dfdpt["conversion_method"]="1"
dfdpt["numerical_precision"]="0.01"
dfdpt["original_precision"]="0.1"
dfdpt["original_units"]="60"
dfdpt["original_value"]=df["dew_point_temperature"]
dfdpt["observation_height_above_station_surface"]="2"
dfdpt["units"]="5"
dfdpt["observed_variable"]="36"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfdpt.loc[dfdpt['quality_flag'].notnull(), "quality_flag"] = 1
dfdpt= dfdpt.fillna("Null")
dfdpt.quality_flag[dfdpt.quality_flag == "Null"] = 0
##remove unwanted mising data rows
dfdpt= dfdpt.fillna("null")
dfdpt= dfdpt.replace({"null":"-99999"})
dfdpt= dfdpt[dfdpt.observation_value != -99999]
#df = df.astype(str)
dfdpt["source_id"] = pd.to_numeric(dfdpt["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dfdpt['source_id'] = dfdpt['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['primary_station_id_2']=dfdpt['primary_station_id'].astype(str)+'-'+dfdpt['source_id'].astype(str)
dfdpt["observation_value"] = pd.to_numeric(dfdpt["observation_value"],errors='coerce')
#dfdpt.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dfdpt= dfdpt.astype(str)
df2 = df2.astype(str)
dfdpt= df2.merge(dfdpt, on=['primary_station_id_2'])
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence_x']
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['observation_id']=dfdpt['primary_station_id'].astype(str)+'-'+dfdpt['record_number'].astype(str)+'-'+dfdpt['date_time'].astype(str)
dfdpt['observation_id'] = dfdpt['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dfdpt['observation_id'] = dfdpt['observation_id'].str[:-6]
dfdpt["observation_id"]=dfdpt["observation_id"]+'-'+dfdpt['observed_variable'].astype(str)+'-'+dfdpt['value_significance'].astype(str)
dfdpt["report_id"]=dfdpt["observation_id"].str[:-6]
##set up qc table
dfdpt= dfdpt[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
dfdpt.dropna(subset = ["observation_value"], inplace=True)
dfdpt['source_id'] = dfdpt['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt["source_id"] = pd.to_numeric(dfdpt["source_id"],errors='coerce')
dfdpt["observation_value"] = pd.to_numeric(dfdpt["observation_value"],errors='coerce')
dfdpt["observation_value"]= dfdpt["observation_value"].round(2)
#====================================================================================
#convert station level to cdmlite
dfslp = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfslp["observation_value"]=df["station_level_pressure"]
dfslp["source_id"]=df["station_level_pressure_Source_Code"]
dfslp["Seconds"]="00"
dfslp["quality_flag"]=df["station_level_pressure_QC_flag"]
dfslp["conversion_flag"]="0"
dfslp["conversion_method"]="7"
dfslp["numerical_precision"]="10"
dfslp["original_precision"]="0.1"
dfslp["original_units"]="530"
dfslp["original_value"]=df["station_level_pressure"]
dfslp["observation_height_above_station_surface"]="2"
dfslp["units"]="32"
dfslp["observed_variable"]="57"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfslp.loc[dfslp['quality_flag'].notnull(), "quality_flag"] = 1
dfslp = dfslp.fillna("Null")
dfslp.quality_flag[dfslp.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dfslp = dfslp.fillna("null")
dfslp = dfslp.replace({"null":"-99999"})
dfslp = dfslp[dfslp.observation_value != -99999]
#df = df.astype(str)
dfslp["source_id"] = pd.to_numeric(dfslp["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dfslp['source_id'] = dfslp['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp['primary_station_id_2']=dfslp['primary_station_id'].astype(str)+'-'+dfslp['source_id'].astype(str)
dfslp["observation_value"] = pd.to_numeric(dfslp["observation_value"],errors='coerce')
#dfslp.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dfslp = dfslp.astype(str)
df2 = df2.astype(str)
dfslp= df2.merge(dfslp, on=['primary_station_id_2'])
dfslp['data_policy_licence'] = dfslp['data_policy_licence_x']
dfslp['data_policy_licence'] = dfslp['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp['observation_id']=dfslp['primary_station_id'].astype(str)+'-'+dfslp['record_number'].astype(str)+'-'+dfslp['date_time'].astype(str)
dfslp['observation_id'] = dfslp['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dfslp['observation_id'] = dfslp['observation_id'].str[:-6]
dfslp["observation_id"]=dfslp["observation_id"]+'-'+dfslp['observed_variable'].astype(str)+'-'+dfslp['value_significance'].astype(str)
dfslp["report_id"]=dfslp["observation_id"].str[:-6]
##set up qc table
dfslp = dfslp[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
##make sure no decimal places an dround value to reuqred decimal places
dfslp['observation_value'] = dfslp['observation_value'].map(float)
dfslp['observation_value'] = (dfslp['observation_value']*100)
dfslp['observation_value'] = dfslp['observation_value'].map(int)
dfslp['source_id'] = dfslp['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp['data_policy_licence'] = dfslp['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp["source_id"] = | pd.to_numeric(dfslp["source_id"],errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# # Wasserstein Pareto Frontier Experiment on COMPAS Data Set
# ## Import Data
# The experiment used the COMPAS data set as in "Optimized Pre-Processing for Discrimination Prevention" by Calmon and etc. for comparison purpose: https://github.com/fair-preprocessing/nips2017/tree/master/compas/experiment_data2
# In[1]:
import numpy as np
import pandas as pd
import scipy
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.interpolate import interp1d
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score, auc, classification_report, roc_curve
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
from scipy.linalg import sqrtm
from matplotlib import gridspec
from matplotlib.patches import Rectangle
# import data
path =r'/Users/shizhouxu/Documents/LIBRARY/Python/Fair_L2_Supervised_Learning/experiment_data_compass/' # use your path
train_0 = pd.read_csv(path + "train_0.csv",index_col=None, header=0, usecols=range(1,6))
train_1 = pd.read_csv(path + "train_1.csv",index_col=None, header=0, usecols=range(1,6))
train_2 = pd.read_csv(path + "train_2.csv",index_col=None, header=0, usecols=range(1,6))
train_3 = pd.read_csv(path + "train_3.csv",index_col=None, header=0, usecols=range(1,6))
train_4 = pd.read_csv(path + "train_4.csv",index_col=None, header=0, usecols=range(1,6))
test_0 = pd.read_csv(path + "test_0.csv",index_col=None, header=0, usecols=range(1,6))
test_1 = pd.read_csv(path + "test_1.csv",index_col=None, header=0, usecols=range(1,6))
test_2 = pd.read_csv(path + "test_2.csv",index_col=None, header=0, usecols=range(1,6))
test_3 = pd.read_csv(path + "test_3.csv",index_col=None, header=0, usecols=range(1,6))
test_4 = pd.read_csv(path + "test_4.csv",index_col=None, header=0, usecols=range(1,6))
train_new_0 = pd.read_csv(path + "train_new_0.csv",index_col=None, header=0, usecols=range(1,6))
train_new_1 = pd.read_csv(path + "train_new_1.csv",index_col=None, header=0, usecols=range(1,6))
train_new_2 = pd.read_csv(path + "train_new_2.csv",index_col=None, header=0, usecols=range(1,6))
train_new_3 = pd.read_csv(path + "train_new_3.csv",index_col=None, header=0, usecols=range(1,6))
train_new_4 = pd.read_csv(path + "train_new_4.csv",index_col=None, header=0, usecols=range(1,6))
test_new_0 = pd.read_csv(path + "test_new_0.csv",index_col=None, header=0, usecols=range(1,6))
test_new_1 = pd.read_csv(path + "test_new_1.csv",index_col=None, header=0, usecols=range(1,6))
test_new_2 = pd.read_csv(path + "test_new_2.csv",index_col=None, header=0, usecols=range(1,6))
test_new_3 = pd.read_csv(path + "test_new_3.csv",index_col=None, header=0, usecols=range(1,6))
test_new_4 = pd.read_csv(path + "test_new_4.csv",index_col=None, header=0, usecols=range(1,6))
# all available data variables: features = ['race','age_cat','c_charge_degree','priors_count','is_recid']
features = ['race','age_cat','c_charge_degree','priors_count','is_recid']
# sensitive variable: Z_features = ['race']
Z_features = ['race']
# dependnet variable: Y_features = ['is_recid']
Y_features = ['is_recid']
# independnet variable: X_features = ['age_cat', 'c_charge_degree','priors_count']
X_features = ['age_cat', 'c_charge_degree','priors_count']
# combine the data by train/test category
TrainList=[train_0,train_1,train_2,train_3,train_4]
TestList=[test_0,test_1,test_2,test_3,test_4]
TrainNewList=[train_new_0,train_new_1,train_new_2,train_new_3,train_new_4]
TestNewList=[test_new_0,test_new_1,test_new_2,test_new_3,test_new_4]
# data set combined: df
ord_enc = OrdinalEncoder()
df = pd.concat([train_0,train_1,train_2,train_3,train_4,test_0,test_1,test_2,test_3,test_4])
# data set further excluding the sensitive variable: df_delete
df_delete = df.drop('race',axis = 1)
# sensitive variable Z: gender
race = df['race']
# ## Compute the Wasserstein Pseudo-barycenter for X
# In[2]:
# independent variable: X
X = np.delete(np.array(pd.get_dummies(df[X_features])),[4],axis = 1)
# dependent variable: Y
Y = np.array(pd.get_dummies(df[Y_features]))
# mean of X and Y: X_mean, Y_mean
X_mean = np.mean(X,axis = 0)
Y_mean = np.mean(Y)
# covariance (matrix) of X and Y: X_cov, Y_cov
X_cov = np.cov(X.T)
Y_cov = np.cov(Y.T)
# marginal (conditional) dependent variables: X_male, Y_male
X_A = X[race == 'African-American',:]
Y_A = Y[race == 'African-American']
X_C = X[race == 'Caucasian',:]
Y_C = Y[race == 'Caucasian']
# marginal mean: X_(fe)male_mean, Y_(fe)male_mean
X_A_mean = np.average(X_A, axis = 0)
Y_A_mean = np.average(Y_A)
X_C_mean = np.average(X_C, axis = 0)
Y_C_mean = np.average(Y_C)
# marginal covariance: X_(fe)male_cov, Y_(fe)male_cov,
X_A_cov = np.cov(X_A.T)
Y_A_cov = np.cov(Y_A.T)
X_C_cov = np.cov(X_C.T)
Y_C_cov = np.cov(Y_C.T)
# cross-covariance (matrix) between Y and X: yX_(fe)male_cov
yX_A_cov = np.cov(Y_A.T, X_A.T)[range(len(Y[0,:]),len(Y[0,:]) + len(X[0,:])),0]
yX_C_cov = np.cov(Y_C.T, X_C.T)[range(len(Y[0,:]),len(Y[0,:]) + len(X[0,:])),0]
# algorithm 1, step 1: iterative process to the independent barycenter covariance matrix with stop cirterion error equals 0.00000001
sample_size = len(X[:,0])
X_bar = np.random.rand(len(X[0,:]),len(X[0,:])) # random initialization for the covariance
eps = 10 # initialization for the stop variable
while eps > 0.00000001:
X_new = ((len(X_A[:,0])/sample_size) * sqrtm(sqrtm(X_bar)@X_A_cov@sqrtm(X_bar))) + ((len(X_C[:,0])/sample_size) * sqrtm(sqrtm(X_bar)@X_C_cov@sqrtm(X_bar)))
eps = np.linalg.norm(X_bar - X_new)
X_bar = X_new
# algorithm 1, step 2: the corresponding Brenier's map for marginals of X: T_X_(fe)male
T_X_A = np.linalg.inv(sqrtm(X_A_cov)) @ sqrtm( sqrtm(X_A_cov) @ X_bar @ sqrtm(X_A_cov) ) @ np.linalg.inv(sqrtm(X_A_cov))
T_X_C = np.linalg.inv(sqrtm(X_C_cov)) @ sqrtm( sqrtm(X_C_cov) @ X_bar @ sqrtm(X_C_cov) ) @ np.linalg.inv(sqrtm(X_C_cov))
# wasserstein pseudo-barycenter for X separated in train/test categories: X_TrainFairList, X_TestFairList
X_TrainFairList = []
X_TestFairList = []
for i in range(0,len(TrainList)):
train = np.delete(np.array(pd.get_dummies(TrainList[i][X_features])),[4],axis = 1)
test = np.delete(np.array(pd.get_dummies(TestList[i][X_features])),[4],axis = 1)
gender_train_i = np.array(TrainList[i][Z_features]).T[0,:]
gender_test_i = np.array(TestList[i][Z_features]).T[0,:]
train_new = np.random.rand(train.shape[0],train.shape[1])
test_new = np.random.rand(test.shape[0],test.shape[1])
train_new[gender_train_i == 'African-American',:] = (train[gender_train_i == 'African-American',:] - X_A_mean) @ T_X_A.T + X_mean
train_new[gender_train_i == 'Caucasian',:] = (train[gender_train_i == 'Caucasian',:] - X_C_mean) @ T_X_C.T + X_mean
test_new[gender_test_i == 'African-American',:] = (test[gender_test_i == 'African-American',:] - X_A_mean) @ T_X_A.T + X_mean
test_new[gender_test_i == 'Caucasian',:] = (test[gender_test_i == 'Caucasian',:] - X_C_mean) @ T_X_C.T + X_mean
X_TrainFairList.append(train_new)
X_TestFairList.append(test_new)
# ## Compute the Wasserstein Pseudo-barycenter for E(Y|X)
# In[3]:
# wasserstein pseudo-barycenter for X: X_fair
X_fair = np.concatenate([X_TrainFairList[0],X_TrainFairList[1],X_TrainFairList[2],X_TrainFairList[3],X_TrainFairList[4],X_TestFairList[0],X_TestFairList[1],X_TestFairList[2],X_TestFairList[3],X_TestFairList[4]])
# marginal (conditional) X_fair: X_fair_(fe)male
X_fair_A = X_fair[race == 'African-American',:]
X_fair_C = X_fair[race == 'Caucasian',:]
# marginal means for X_fair: X_fair_(fe)male_mean
X_fair_A_mean = np.average(X_fair_A, axis = 0)
X_fair_C_mean = np.average(X_fair_C, axis = 0)
# marginal covariance for X_fair: X_fair_(fe)male_cov
X_fair_A_cov = np.cov(X_fair_A.T)
X_fair_C_cov = np.cov(X_fair_C.T)
# cross-covariance between Y and X_fair: yX_fair_(fe)male_cov
yX_fair_A_cov = np.cov(Y_A.T, X_fair_A.T)[range(1,8),0]
yX_fair_C_cov = np.cov(Y_C.T, X_fair_C.T)[range(1,8),0]
# covariance of marginal E(Y|X) in Gaussian case: yoX_(fe)male_cov
# which is also the optimal linear estimation of covariance of E(Y|X) in general distribution case
yoX_A_cov = [email protected](X_fair_A_cov)@yX_fair_A_cov.T
yoX_C_cov = [email protected](X_fair_C_cov)@yX_fair_C_cov.T
# algorithm 2, step 1: iterative process to the dependent barycenter covariance matrix with stop cirterion error equals 0.00000000000000000001
Y_bar = np.random.rand()
eps = 10
while eps > 0.00000000000000000001:
Y_new = ((len(X_A[:,0])/sample_size) * np.sqrt(np.sqrt(Y_bar)*yoX_A_cov*np.sqrt(Y_bar))) + ((len(X_C[:,0])/sample_size) * np.sqrt(np.sqrt(Y_bar)*yoX_C_cov*np.sqrt(Y_bar)))
eps = Y_bar - Y_new
Y_bar = Y_new
# algorithm 2, step 2: the corresponding Brenier's map for marginals of E(y|X)
T_Y_A = (1/np.sqrt(yoX_A_cov)) * np.sqrt( np.sqrt(yoX_A_cov) * Y_bar * np.sqrt(yoX_A_cov) ) * (1/np.sqrt(yoX_A_cov))
T_Y_C = (1/np.sqrt(yoX_C_cov)) * np.sqrt( np.sqrt(yoX_C_cov) * Y_bar * np.sqrt(yoX_C_cov) ) * (1/np.sqrt(yoX_C_cov))
# wasserstein pseudo-barycenter for Y separated in train/test categories: Y_TrainFairList, Y_TestFairList
Y_TrainFairList = []
Y_TestFairList = []
for i in range(0,len(TrainList)):
train = np.array(pd.get_dummies(TrainList[i][Y_features]))
test = np.array(pd.get_dummies(TestList[i][Y_features]))
train_new = np.random.rand(len(train.T[0,:]))
test_new = np.random.rand(len(test.T[0,:]))
gender_train_i = np.array(TrainList[i][Z_features]).T[0,:]
gender_test_i = np.array(TestList[i][Z_features]).T[0,:]
train_new[gender_train_i == 'African-American'] = ((train[gender_train_i == 'African-American'] - Y_A_mean) * T_Y_A.T + Y_mean).T[0,:]
train_new[gender_train_i == 'Caucasian'] = ((train[gender_train_i == 'Caucasian'] - Y_C_mean) * T_Y_C.T + Y_mean).T[0,:]
test_new[gender_test_i == 'African-American'] = ((test[gender_test_i == 'African-American'] - Y_A_mean) * T_Y_A.T + Y_mean).T[0,:]
test_new[gender_test_i == 'Caucasian'] = ((test[gender_test_i == 'Caucasian'] - Y_C_mean) * T_Y_C.T + Y_mean).T[0,:]
Y_TrainFairList.append(train_new)
Y_TestFairList.append(test_new)
# Algorithm 2, step 4: reshape the dependent pseudo-barycenter to binary variable for logit regression
fair_value = np.unique(Y_TrainFairList[0])
Y_prob = (fair_value - np.min(fair_value))/(np.max(fair_value) - np.min(fair_value))
for j in range(0,len(Y_TrainFairList)):
for i in range(0,len(fair_value)):
Y_TrainFairList[j][Y_TrainFairList[j] == fair_value[i]] = np.random.binomial(size = len(np.where(Y_TrainFairList[j] == fair_value[i])[0]),n = 1,p = Y_prob[i])
Y_TestFairList[j][Y_TestFairList[j] == fair_value[i]] = np.random.binomial(size = len(np.where(Y_TestFairList[j] == fair_value[i])[0]),n = 1,p = Y_prob[i])
# In[4]:
# random forest test for the fair representation of data (barycenter pair)
RFModelsAUC=[]
RFTestPreds=[]
test_disc=[]
for i in range(0,len(TrainList)):
rf=RandomForestClassifier()
rf.fit(X_TrainFairList[i],Y_TrainFairList[i])
proba=rf.predict_proba(X_TestFairList[i])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
RFModelsAUC.append(testauc)
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc.append(maxdisc)
RFres_Pseudobary = (RFModelsAUC, RFTestPreds)
RFDisc_Pseudobary = test_disc
RFModelsAUC, RFDisc_Pseudobary
# In[5]:
# logistic regression test for the fair representation of data (barycenter pair)
LRModelsAUC=[]
LRTestPreds=[]
test_disc = []
for i in range(0,len(TrainList)):
lr=LogisticRegression()
lr.fit(X_TrainFairList[i],Y_TrainFairList[i])
proba=lr.predict_proba(X_TestFairList[i])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
LRModelsAUC.append(testauc)
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc.append(maxdisc)
LRres_Pseudobary = (LRModelsAUC, LRTestPreds)
LRDisc_Pseudobary = test_disc
LRModelsAUC, LRDisc_Pseudobary
# ## Estimate the geodesic path from the E(Y|X_z) to the barycenter of the marginal conditional expectations
# 1. Compute both geodesic path path from X to X^dash and from Y to Y^dash
# 2. Use diagonal argument to estimate the geodesic path from the original E(Y|X) to E(Y^dash|X^dash) on both train and test data sets: X_train/test_path_list, Y_train\test_path_list
# In[6]:
# Algorithm 1, step 3: estimate of the independent variable (X) geodesic path using McCann interpolation
X_train_path_list = []
X_test_path_list = []
T = np.linspace(0,1,50) # discretize time variable T
Id = np.identity(7)
for i in range(0,len(TrainList)):
X_train_path = []
X_test_path = []
train = np.delete(np.array(pd.get_dummies(TrainList[i][X_features])),[4],axis = 1)
test = np.delete(np.array(pd.get_dummies(TestList[i][X_features])),[4],axis = 1)
gender_train_i = np.delete(np.array(pd.get_dummies(TrainList[i][Z_features])),[1],axis = 1).T[0,:]
gender_test_i = np.delete(np.array(pd.get_dummies(TestList[i][Z_features])),[1],axis = 1).T[0,:]
for t in range(0,len(T)):
train_new = np.random.rand(train.shape[0],train.shape[1])
test_new = np.random.rand(test.shape[0],test.shape[1])
tempt_train = train_new
tempt_test = test_new
tempt_train[gender_train_i == 1,:] = (1-T[t])*train[gender_train_i == 1,:] + T[t]*((train[gender_train_i == 1,:] - X_A_mean) @ T_X_A.T + X_mean)
tempt_train[gender_train_i == 0,:] = (1-T[t])*train[gender_train_i == 0,:] + T[t]*((train[gender_train_i == 0,:] - X_C_mean) @ T_X_C.T + X_mean)
tempt_test[gender_test_i == 1,:] = (1-T[t])*test[gender_test_i == 1,:] + T[t]*((test[gender_test_i == 1,:] - X_A_mean) @ T_X_A.T + X_mean)
tempt_test[gender_test_i == 0,:] = (1-T[t])*test[gender_test_i == 0,:] + T[t]*((test[gender_test_i == 0,:] - X_C_mean) @ T_X_C.T + X_mean)
# Algorithm 1, step 4: merge the corresponding (wrt t) linear interpolation of sensitive variable back to the MacCann interpolation of dependnet variable
X_train_path.append(np.concatenate((tempt_train, np.expand_dims(gender_train_i*(1-T[t]), axis=1)),axis = 1))
X_test_path.append(np.concatenate((tempt_test, np.expand_dims(gender_test_i*(1-T[t]), axis=1)),axis = 1))
X_train_path_list.append(X_train_path)
X_test_path_list.append(X_test_path)
# Algorithm 2, step 3: estimate of the dependnet (Y) geodesic path using McCann interpolation
Y_train_path_list = []
Y_test_path_list = []
T = np.linspace(0,1,50)
for i in range(0,len(TrainList)):
Y_train_path = []
Y_test_path = []
train = np.array(pd.get_dummies(TrainList[i][Y_features]))
test = np.array(pd.get_dummies(TestList[i][Y_features]))
gender_train_i = np.array(TrainList[i][Z_features]).T[0,:]
gender_test_i = np.array(TestList[i][Z_features]).T[0,:]
for t in range(0,len(T)):
train_new = np.random.rand(len(train.T[0,:]))
test_new = np.random.rand(len(test.T[0,:]))
tempt_train = train_new
tempt_test = test_new
tempt_train[gender_train_i == 'African-American'] = ((1 - T[t] + T[t]*T_Y_A)*train[gender_train_i == 'African-American'] + T[t]*(Y_mean - T_Y_A*Y_A_mean)).T[0,:]
tempt_train[gender_train_i == 'Caucasian'] = ((1 - T[t] + T[t]*T_Y_C)*train[gender_train_i == 'Caucasian'] + T[t]*(Y_mean - T_Y_C*Y_C_mean)).T[0,:]
tempt_test[gender_test_i == 'African-American'] = ((1 - T[t] + T[t]*T_Y_A)*test[gender_test_i == 'African-American'] + T[t]*(Y_mean - T_Y_A*Y_A_mean)).T[0,:]
tempt_test[gender_test_i == 'Caucasian'] = ((1 - T[t] + T[t]*T_Y_C)*test[gender_test_i == 'Caucasian'] + T[t]*(Y_mean - T_Y_C*Y_C_mean)).T[0,:]
Y_train_path.append(tempt_train)
Y_test_path.append(tempt_test)
Y_train_path_list.append(Y_train_path)
Y_test_path_list.append(Y_test_path)
# Algorithm 2, step 4: reshape the dependent pseudo-barycenter to binary variable for logit regression
for t in range(0,len(T)):
for i in range(0,len(TrainList)):
fair_value = np.unique(Y_train_path_list[i][t])
Y_prob = (fair_value - np.min(fair_value))/(np.max(fair_value) - np.min(fair_value))
for j in range(0,len(fair_value)):
Y_train_path_list[i][t][Y_train_path_list[i][t] == fair_value[j]] = np.random.binomial(size = len(np.where(Y_train_path_list[i][t] == fair_value[j])[0]),n = 1,p = Y_prob[j])
Y_test_path_list[i][t][Y_test_path_list[i][t] == fair_value[j]] = np.random.binomial(size = len(np.where(Y_test_path_list[i][t] == fair_value[j])[0]),n = 1,p = Y_prob[j])
# ## Performance Tests and Comparisons
# In[7]:
# ROC AUC of random forest trained via the fair data representation interpolation: RFModelsAUC_path_list
RFModelsAUC_path_list =[]
# classification discrimination (definition 6.1) of standard random forest trained via the fair data representation interpolation: RFDisc_path_list
RFDisc_path_list =[]
for i in range(0,len(TrainList)):
ModelsAUC_path = []
test_disc_path=[]
for t in range(0,len(T)):
rf=RandomForestClassifier()
rf.fit(X_train_path_list[i][t],Y_train_path_list[i][t])
proba=rf.predict_proba(X_test_path_list[i][t])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
ModelsAUC_path.append(testauc)
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc_path.append(maxdisc)
RFModelsAUC_path_list.append(ModelsAUC_path)
RFDisc_path_list.append(test_disc_path)
# ROC AUC of logistic regression trained via the fair data representation interpolation: LRModelsAUC_path_list
LRModelsAUC_path_list =[]
# classification discrimination (definition 6.1) of logistic regression trained via the fair data representation interpolation: LRDisc_path_list
LRDisc_path_list =[]
for i in range(0,len(TrainList)):
ModelsAUC_path = []
test_disc_path=[]
for t in range(0,len(T)):
lr=LogisticRegression()
lr.fit(X_train_path_list[i][t],Y_train_path_list[i][t])
proba=lr.predict_proba(X_test_path_list[i][t])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
ModelsAUC_path.append(testauc)
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc_path.append(maxdisc)
LRModelsAUC_path_list.append(ModelsAUC_path)
LRDisc_path_list.append(test_disc_path)
# average auc and discrimination of the five fold cross validation: acc_RF, dis_RF, acc_LR, dis_LR
acc_RF = np.average(np.array(RFModelsAUC_path_list),axis = 0)
dis_RF = np.average(np.array(RFDisc_path_list),axis = 0)
acc_LR = np.average(np.array(LRModelsAUC_path_list),axis = 0)
dis_LR = np.average(np.array(LRDisc_path_list),axis = 0)
# In[8]:
dis_LR, acc_LR, dis_RF, acc_RF
# ## Comparison Methods and Corresponding Numerical Implements
# In[9]:
interploation_model = interp1d(dis_RF, acc_RF, kind = "linear")
# Plotting the Graph
X_RF=np.linspace(dis_RF.min(), dis_RF.max(), 500)
Y_RF=interploation_model(X_RF)
plt.plot(X_RF, Y_RF)
plt.title("Pareto Frontier for Random Forest on COMPAS")
plt.xlabel("Disparity")
plt.ylabel("AUC")
plt.show()
# In[10]:
interploation_model = interp1d(dis_LR, acc_LR, kind = "linear")
# Plotting the Graph
X_LR=np.linspace(dis_LR.min(), dis_LR.max(), 500)
Y_LR=interploation_model(X_LR)
plt.plot(X_LR, Y_LR)
plt.title("Pareto Frontier for Logistic Regression on COMPAS")
plt.xlabel("Disparity")
plt.ylabel("AUC")
plt.show()
# In[11]:
# define the trained logisctic regression model in the paper by Calmon
def RunLRClassifier(TrainList,TestList,TrainNewList,TestNewList,Z_features,X_features,Y_features):
LRModelsAUC=[]
LRTestPreds=[]
for i in range(0,len(TrainNewList)):
dft = pd.get_dummies(TrainNewList[i][Z_features+X_features])
lr=LogisticRegression()
lr.fit(dft,TrainNewList[i][Y_features])
dft = pd.get_dummies(TestNewList[i][Z_features+X_features])
proba=lr.predict_proba(dft)
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
LRModelsAUC.append(testauc)
dft=TestList[i][Z_features+X_features+Y_features]
dft['pred']=proba[:,1]
LRTestPreds.append(dft)
return LRModelsAUC,LRTestPreds
# define the logistic regression model without using the sensitive information
def RunLRWithoutDClassifier(TrainList,TestList,Z_features,X_features,Y_features):
LRModelsAUC=[]
LRTestPreds=[]
for i in range(0,len(TrainList)):
dft = pd.get_dummies(TrainList[i][X_features])
lr=LogisticRegression()
lr.fit(dft,TrainList[i][Y_features])
dft = pd.get_dummies(TestList[i][X_features])
proba=lr.predict_proba(dft)
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
LRModelsAUC.append(testauc)
dft=TestList[i][Z_features+X_features+Y_features]
dft['pred']=proba[:,1]
LRTestPreds.append(dft)
return LRModelsAUC,LRTestPreds
# define the standard logistic regression model
def RunPlainLRClassifier(TrainList,TestList,Z_features,X_features,Y_features):
LRModelsAUC=[]
LRTestPreds=[]
for i in range(0,len(TrainList)):
dft = | pd.get_dummies(TrainList[i][Z_features+X_features]) | pandas.get_dummies |
import pandas as pd
import numpy as np
import time
import os
import datetime
import math
import requests
def normalize_str(s):
""" Function for name normalization (handle áéíóú). """
return unicodedata.normalize("NFKD", s).encode("ascii","ignore").decode("ascii").upper()
FILES_TO_DOWNLOAD = {
'Argentina_Provinces.csv': 'https://raw.githubusercontent.com/mariano22/argcovidapi/master/csvs/Argentina_Provinces.csv',
'SantaFe_AllData.csv': 'https://raw.githubusercontent.com/mariano22/argcovidapi/master/csvs/SantaFe_AllData.csv',
}
DATA_DIR = './data/'
def _download_file(url, out_file):
response = requests.get(url)
assert response.status_code == 200,\
'Wrong status code at dowloading {}'.format(out_file)
f = open(out_file, "wb")
f.write(response.content)
f.close()
def _download_expired_data():
for csv_fn, csv_remote_fp in FILES_TO_DOWNLOAD.items():
csv_fp = os.path.join(DATA_DIR, csv_fn)
if (not os.path.isfile(csv_fp)) or (time.time()-os.stat(csv_fp).st_mtime>30*60):
print('Downloading',csv_fn)
_download_file(csv_remote_fp, csv_fp)
def _load_National_data(csv_fp):
df_arg = pd.read_csv(csv_fp)
df_arg['LOCATION'] = 'ARGENTINA/' + df_arg['PROVINCIA']
df_arg = df_arg.drop(columns=['PROVINCIA'])
df_arg = df_arg.set_index(['TYPE','LOCATION'])
df_arg = df_arg.rename(columns=lambda colname: pd.to_datetime(colname,format='%d/%m').replace(year=2020))
total_arg = df_arg.groupby(level=[0]).sum()
total_arg['LOCATION']='ARGENTINA'
total_arg = total_arg.reset_index().set_index(['TYPE','LOCATION'])
df_arg = pd.concat([df_arg,total_arg]).sort_index()
df_arg = df_arg[df_arg.columns[:-1]]
return df_arg
def _set_location_safe(row):
location_prefix = 'ARGENTINA/SANTA FE'
if row['DEPARTMENT']=='##TOTAL':
return location_prefix
location_prefix += '/'+row['DEPARTMENT'][3:]
if row['PLACE'].startswith('#'):
return location_prefix
return location_prefix +'/'+ row['PLACE']
def _load_SantaFe_data(csv_fp):
df_safe = pd.read_csv(csv_fp)
df_safe['LOCATION'] = df_safe.apply(_set_location_safe, axis=1)
df_safe = df_safe[ (df_safe['TYPE']=='CONFIRMADOS') & (df_safe['DEPARTMENT']!='##TOTAL') ]
df_safe['LOCATION'] = df_safe['LOCATION'].replace({
'ARGENTINA/SANTA FE/IRIONDO/CLASSON':'ARGENTINA/SANTA FE/IRIONDO/CLASON',
'ARGENTINA/SANTA FE/ROSARIO/VILLA GOB. GALVEZ':'ARGENTINA/SANTA FE/ROSARIO/VILLA GOBERNADOR GALVEZ',
'ARGENTINA/SANTA FE/SAN LORENZO/PUERTO GRAL. SAN MARTIN': 'ARGENTINA/SANTA FE/SAN LORENZO/PUERTO GENERAL SAN MARTIN',
})
df_safe = df_safe.drop(columns=['DEPARTMENT', 'PLACE'])
df_safe = df_safe.set_index(['TYPE','LOCATION'])
df_safe = df_safe.rename(columns=lambda colname: pd.to_datetime(colname,format='%d/%m/%Y'))
return df_safe
def _load_data_time_series(df_geoinfo):
df_arg = _load_National_data(os.path.join(DATA_DIR, 'Argentina_Provinces.csv'))
df_safe = _load_SantaFe_data(os.path.join(DATA_DIR, 'SantaFe_AllData.csv'))
df = pd.concat([df_arg,df_safe])
# Non described dates are 0's
df = df.fillna(0).sort_index()
# Set day 0 (prior any date) with all 0's
day_zero = df.columns[0]- | pd.Timedelta(days=1) | pandas.Timedelta |
import numpy as np, pandas as pd, networkx as nx, itertools, sys, traceback
def assemble_data(data_folder):
"""
We don't include a dummy for missings for data.church because Card and Guiliano do not report its coefficient. We don't include a dummy for missings for data.parent_HS, data.parent_college because Card and Giuliano do not report its coefficient. We only use one measure for physical development index because the other measures have too much missing data.
"""
network_data = pd.read_csv(data_folder + '/21600-0003-Data.tsv', sep='\t', usecols=['AID','ODGX2'], low_memory=False)
network_data.columns = ['id','outdeg']
wave1 = | pd.read_csv(data_folder + '/21600-0001-Data.tsv', sep='\t', usecols=['AID','H1MP4','H1EE14','H1EE15','S1','S2','S6B','S11','S12','S17','S18','PA22','PA23','PA63'], low_memory=False) | pandas.read_csv |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from application import model_builder
def test_validate_types_numeric_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [3, 4, 5]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = new_expect["Some Feature"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_numeric_string_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [3, 4, 5]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = ["3", "4", "5"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_numeric_string_converts_throws_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["3d", "4d", "5d"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_throws_value_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["0.3s c", "0.4", "0.5"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Percentage"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 2"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 3"] = [30.0, 40.0, 50.0]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = [0.3, 0.4, 0.5]
df["Some Feature 2"] = ["0.3%", "0.4 %", " 0.5 %"]
df["Some Feature 3"] = ["30", "40", " 50"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Percentage"],
["Some Feature 2", "Percentage"],
["Some Feature 3", "Percentage"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_money_converts_throws_value_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["0.3s$", "$0.4", "0.5"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Money"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 2"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 3"] = [50000, 40000.0, 50000]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = [30, 40, 50]
df["Some Feature 2"] = ["$30", "$ 40 ", " $50 "]
df["Some Feature 3"] = ["$50,000", "40000", " 50,000"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Money"],
["Some Feature 2", "Money"],
["Some Feature 3", "Money"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_value_set_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = ["Married", "Single", "Married"]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = new_expect["Some Feature"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Value Set"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_value_set_throws_value_exception_too_many_values():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = range(1, 2000)
df["Answer"] = range(1, 2000)
fields = [["Some Feature", "Value Set"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_yes_no_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = ["Yes", "No", "No Data"]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = new_expect["Some Feature"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Yes/No"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_yes_no_throws_value_exception_too_many_values():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = range(1, 5)
df["Answer"] = range(1, 5)
fields = [["Some Feature", "Yes/No"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_invalid_field_type():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = range(1, 5)
df["Answer"] = range(1, 5)
fields = [["Some Feature", "Invalid Type"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_stripdown_splits_x_variables():
# Arrange
df = pd.DataFrame()
x_expect = pd.DataFrame()
x_expect["Some Feature"] = [3, 4, 5]
df["Some Feature"] = x_expect["Some Feature"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_frame_equal(x, x_expect)
def test_stripdown_splits_response_variable_works():
# Arrange
df = pd.DataFrame()
y_expect = pd.Series([1, 0, 0], name="Answer")
df["Some Feature"] = [3, 4, 5]
df["Answer"] = y_expect
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_series_equal(y, y_expect)
def test_stripdown_splits_response_variable_works_if_scale_of_0_to_100():
# Arrange
df = pd.DataFrame()
y_expect = pd.Series([0, 0, 1], dtype="int32")
df["Some Feature"] = [3, 4, 5]
df["Answer"] = [50, 70, 100]
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_series_equal(y, y_expect)
def test_stripdown_removes_contact_details():
# Arrange
df = pd.DataFrame()
x_expect = pd.DataFrame()
x_expect["Some Feature"] = [3, 4, 5]
df["Some Feature"] = x_expect["Some Feature"]
df["Contacts1"] = ["tom", "john", "sarah"]
df["Contacts2"] = ["tom", "john", "sarah"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"],
["Contacts1", "Contact Details"], ["Contacts2", "Contact Details"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_frame_equal(x, x_expect)
def test_stripdown_removes_string_fields():
# Arrange
df = pd.DataFrame()
x_expect = pd.DataFrame()
x_expect["Some Feature"] = [3, 4, 5]
df["Some Feature"] = x_expect["Some Feature"]
df["Postcodes"] = ["2104", "2000", "2756"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"],
["Postcodes", "String"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_frame_equal(x, x_expect)
def test_stripdown_removes_columns_with_many_nulls_fields():
# Arrange
df = pd.DataFrame()
x_expect = pd.DataFrame()
x_expect["Some Feature"] = range(1, 12)
df["Some Feature"] = x_expect["Some Feature"]
df["A lot of Nulls"] = [None, 1, 2, 3, 4, 5, 6, 7, 8, None, 9]
df["Answer"] = range(1, 12)
fields = [["Some Feature", "Numeric"], ["Answer", "Response Variable"],
["A lot of Nulls", "Numeric"]]
# Act
x, y, fields = model_builder.stripdown_features(df, fields)
# Assert
assert_frame_equal(x, x_expect)
def test_stripdown_doesnt_remove_columns_with_some_nulls():
# Arrange
df = pd.DataFrame()
x_expect = | pd.DataFrame() | pandas.DataFrame |
"""
General utility functions that are used in a variety of contexts.
The functions in this module are used in various stages of the ETL and post-etl
processes. They are usually not dataset specific, but not always. If a function
is designed to be used as a general purpose tool, applicable in multiple
scenarios, it should probably live here. There are lost of transform type
functions in here that help with cleaning and restructing dataframes.
"""
import itertools
import logging
import pathlib
import re
import shutil
from functools import partial
import addfips
import numpy as np
import pandas as pd
import requests
import sqlalchemy as sa
import timezonefinder
from sqlalchemy.engine import reflection
import pudl
from pudl import constants as pc
logger = logging.getLogger(__name__)
# This is a little abbreviated function that allows us to propagate the NA
# values through groupby aggregations, rather than using inefficient lambda
# functions in each one.
sum_na = partial(pd.Series.sum, skipna=False)
# Initializing this TimezoneFinder opens a bunch of geography files and holds
# them open for efficiency. I want to avoid doing that for every call to find
# the timezone, so this is global.
tz_finder = timezonefinder.TimezoneFinder()
def download_zip_url(url, save_path, chunk_size=128):
"""
Download and save a Zipfile locally.
Useful for acquiring and storing non-PUDL data locally.
Args:
url (str): The URL from which to download the Zipfile
save_path (pathlib.Path): The location to save the file.
chunk_size (int): Data chunk in bytes to use while downloading.
Returns:
None
"""
# This is a temporary hack to avoid being filtered as a bot:
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'DNT': '1',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
r = requests.get(url, stream=True, headers=headers)
with save_path.open(mode='wb') as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
def add_fips_ids(df, state_col="state", county_col="county", vintage=2015):
"""Add State and County FIPS IDs to a dataframe."""
# force the columns to be the nullable string types so we have a consistent
# null value to filter out before feeding to addfips
df = df.astype({
state_col: pd.StringDtype(),
county_col: pd.StringDtype(),
})
af = addfips.AddFIPS(vintage=vintage)
# Lookup the state and county FIPS IDs and add them to the dataframe:
df["state_id_fips"] = df.apply(
lambda x: (af.get_state_fips(state=x[state_col])
if pd.notnull(x[state_col]) else pd.NA),
axis=1)
logger.info(
f"Assigned state FIPS codes for "
f"{len(df[df.state_id_fips.notnull()])/len(df):.2%} of records."
)
df["county_id_fips"] = df.apply(
lambda x: (af.get_county_fips(state=x[state_col], county=x[county_col])
if pd.notnull(x[county_col]) else pd.NA),
axis=1)
# force the code columns to be nullable strings - the leading zeros are
# important
df = df.astype({
"county_id_fips": pd.StringDtype(),
"state_id_fips": pd.StringDtype(),
})
logger.info(
f"Assigned county FIPS codes for "
f"{len(df[df.county_id_fips.notnull()])/len(df):.2%} of records."
)
return df
def clean_eia_counties(df, fixes, state_col="state", county_col="county"):
"""Replace non-standard county names with county nmes from US Census."""
df = df.copy()
df[county_col] = (
df[county_col].str.strip()
.str.replace(r"\s+", " ", regex=True) # Condense multiple whitespace chars.
.str.replace(r"^St ", "St. ", regex=True) # Standardize abbreviation.
.str.replace(r"^Ste ", "Ste. ", regex=True) # Standardize abbreviation.
.str.replace("Kent & New Castle", "Kent, New Castle") # Two counties
# Fix ordering, remove comma
.str.replace("Borough, Kodiak Island", "Kodiak Island Borough")
# Turn comma-separated counties into lists
.str.replace(r",$", "", regex=True).str.split(',')
)
# Create new records for each county in a multi-valued record
df = df.explode(county_col)
df[county_col] = df[county_col].str.strip()
# Yellowstone county is in MT, not WY
df.loc[(df[state_col] == "WY") &
(df[county_col] == "Yellowstone"), state_col] = "MT"
# Replace individual bad county names with identified correct names in fixes:
for fix in fixes.itertuples():
state_mask = df[state_col] == fix.state
county_mask = df[county_col] == fix.eia_county
df.loc[state_mask & county_mask, county_col] = fix.fips_county
return df
def oob_to_nan(df, cols, lb=None, ub=None):
"""
Set non-numeric values and those outside of a given rage to NaN.
Args:
df (pandas.DataFrame): The dataframe containing values to be altered.
cols (iterable): Labels of the columns whose values are to be changed.
lb: (number): Lower bound, below which values are set to NaN. If None,
don't use a lower bound.
ub: (number): Upper bound, below which values are set to NaN. If None,
don't use an upper bound.
Returns:
pandas.DataFrame: The altered DataFrame.
"""
out_df = df.copy()
for col in cols:
# Force column to be numeric if possible, NaN otherwise:
out_df.loc[:, col] = | pd.to_numeric(out_df[col], errors="coerce") | pandas.to_numeric |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import ttest_ind, kruskal
label_col = 'Indel frequency at endogenous target site (background subtracted)'
f = 'data/41587_2018_BFnbt4061_MOESM39_ESM.xlsx'
col_mappings = {
'Indel frequency at endogenous target site (background subtracted)': 'efficiency',
'Indel frequency at endogenous target site': 'efficiency',
'Indel freqeuncy\n(Background substracted, %)': 'efficiency',
'Indel frequency at synthetic target site (background subtracted)': 'syn_efficiency',
'Chromatin accessibility (1= DNase I hypersensitive sites, 0 = Dnase I non-sensitive sites)': 'chromatin'
}
def get_key(i):
return col_mappings[i] if i in col_mappings else i
lenti_df = pd.read_excel(f, 'Data set HEK-lenti', header=1, index_col='Target number').rename(get_key, axis='columns')
hct_df = pd.read_excel(f, 'Data set HCT-plasmid', header=1, index_col='Target number').rename(get_key, axis='columns')
hek_df = | pd.read_excel(f, 'Data set HEK-plasmid', header=1, index_col='Target number') | pandas.read_excel |
# Author: <NAME>
# Date: 28th Jan, 2021
# ETL Function to load COVID Data
import requests
import json
import re
import pandas as pd
from datetime import datetime
import sqlite3
import concurrent.futures
from typing import List
from tqdm import tqdm
import time
class COVID_ETL(object):
# Default URL and DB Name
url = ""
db_name = ""
def __init__(self, url= "https://health.data.ny.gov/api/views/xdss-u53e/rows.json?accessType=DOWNLOAD",\
db_name = "covid.db"):
self.url = url
self.db_name = db_name
## Performs basic transformations to data from response and returns a Pandas DataFrame
def transform(response) -> pd.DataFrame:
## Parsing Column Names
columns_meta_data = response.json()["meta"]["view"]["columns"]
column_names = [re.sub(':', '' , x["fieldName"]) for x in columns_meta_data]
print("There are {} columns in this data set".format (str(len(column_names))))
print("There are {} rows of data".format(str(len(response.json()["data"]))))
## Storing and cleaning data as a DataFrame
df = pd.DataFrame(response.json()["data"], columns=column_names)
df["test_date"] = pd.to_datetime(df["test_date"]).astype("str")
df["county"] = df["county"].apply(lambda x: re.sub(' ', '_' , x.lower()).replace(".", ""))
df[["new_positives", "cumulative_number_of_positives", "total_number_of_tests", "cumulative_number_of_tests"]] = df[["new_positives", "cumulative_number_of_positives", "total_number_of_tests", "cumulative_number_of_tests"]].astype("int")
## Selecting Desired Columns
df = df[["county", "test_date", "new_positives", "cumulative_number_of_positives", "total_number_of_tests", "cumulative_number_of_tests"]]
df["load_date"] = datetime.today().strftime("%Y-%m-%d")
return df
def load(df, county_names, db_name = "covid.db"):
# Create tables
conn = sqlite3.connect(db_name)
## Since our program is CPU bound and not IO bound - using multi-processing instead of multi-threading
t1 = time.perf_counter()
with concurrent.futures.ProcessPoolExecutor() as executer:
results = [executer.submit(COVID_ETL.ingest, df, county_name, db_name) for county_name in county_names]
for f in concurrent.futures.as_completed(results):
print(f.result())
t2 = time.perf_counter()
print(f'Finished in {t2-t1} seconds')
## Function to generate table creation command
def create_table_cmd(county_name):
type_map = { "test_date": "TEXT",
"new_positives": "INTEGER",
"cumulative_number_of_positives": "INTEGER",
"total_number_of_tests": "INTEGER",
"cumulative_number_of_tests": "INTEGER",
"load_date": "TEXT" }
sql_cols = []
sql_cols += [f" {col} {type_map[col]}" for col in type_map]
sql_cols = ',\n'.join(sql_cols)
cmd = f"""CREATE TABLE if not exists {county_name} (
{sql_cols}
);"""
return cmd
## Function to add data to table
def ingest(df, county_name, db_name) -> str:
conn_temp = sqlite3.connect(db_name)
c = conn_temp.cursor()
# Create Table with for County if it does not exist
cmd = COVID_ETL.create_table_cmd(county_name)
c.execute(cmd)
# Adding Data to Table
df_county = df[df["county"] == county_name].drop(["county"], axis = 1)
max_date_in_table = pd.read_sql(f"select max(test_date) from {county_name}", conn_temp).values[0][0]
if max_date_in_table is not None:
df_county = df_county[pd.to_datetime(df_county.test_date) > | pd.to_datetime(max_date_in_table) | pandas.to_datetime |
import logging
import librosa
import numpy as np
import pandas as pd
from common.utilities import KnownRequestParseError
logger = logging.getLogger(__name__)
# Resample all uploaded files to this sample rate. Ideally, should match the SR used for training.
SUPPORTED_SAMPLE_RATE = 22050
# Duration of a single unit of recognition. The input file will be split to chunks of this size.
SECONDS_PER_CHUNK = 1.0
# Signal with RMS lower than this value will be considered silence.
ABSOLUTE_SILENCE_RMS_THRESHOLD = 1e-5
# Signal with RMS lower than this percentile in the input file will be considered silence.
ADAPTIVE_SILENCE_RMS_PERCENTILE = 25
def is_chunk_silent(rms_chunk, adaptive_threshold):
"""
Determines whether the specified audio segment is silent or not.
Parameters
----------
rms_chunk : numpy.array
A 1D vector of RMS values for the chunk.
adaptive_threshold : float
An RMS threshold below which the audio is considered silent.
Returns
-------
bool
"""
mean_rms = np.mean(rms_chunk)
return mean_rms < ABSOLUTE_SILENCE_RMS_THRESHOLD or mean_rms < adaptive_threshold
def featurize_chroma_chunk(chunk):
"""
Extract features from a chromagram segment.
Parameters
----------
chunk : numpy.array
A 2D array (*, 12) representing the chromagram for the chunk.
Returns
-------
numpy.array
Extracted 1D feature vector.
"""
return np.mean(chunk, axis=1)
def featurize_file(filename):
"""
Extracts audio features from the specified audio file.
Parameters
----------
filename : str
Path to a saved audio file.
Returns
-------
pandas.DataFrame
A data frame with extracted audio features, one line for each SECONDS_PER_CHUNK seconds.
"""
try:
logger.info(f'Reading audio file: "{str(filename)}"')
signal, sample_rate = librosa.load(filename, sr=SUPPORTED_SAMPLE_RATE)
except Exception as e:
error_desc = str(e) or e.__class__.__name__
raise KnownRequestParseError('Cannot load audio file. Error: ' + error_desc)
duration = len(signal) / sample_rate
logger.info(f'File duration: {duration:.1f} seconds')
spectrogram = np.abs(librosa.stft(signal))
spectrogram_per_second = spectrogram.shape[1] / duration
logger.info(f'Spectrogram shape: {spectrogram.shape}')
rms = librosa.feature.rms(S=spectrogram).T.ravel()
chroma = librosa.feature.chroma_stft(S=spectrogram, sr=sample_rate)
adaptive_rms_threshold = np.percentile(rms, ADAPTIVE_SILENCE_RMS_PERCENTILE)
# Split RMS and Chroma arrays into equally sized chunks, each taking SECONDS_PER_CHUNK.
chunk_split_points = np.arange(0, chroma.shape[-1], spectrogram_per_second * SECONDS_PER_CHUNK)
chunk_split_points = np.round(chunk_split_points).astype(int)[1:-1]
rms_chunks = np.split(rms, chunk_split_points)
chroma_chunks = np.split(chroma, chunk_split_points, axis=1)
time_markers = np.arange(0, len(chroma_chunks)) * SECONDS_PER_CHUNK
# Featurize each chunk, detect silence, and store the results as a DataFrame row.
logger.info('Generating features')
features = [
featurize_chroma_chunk(chunk)
for chunk in chroma_chunks
]
feature_names = [
'chroma-' + note
for note in ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
]
is_silent = [
is_chunk_silent(chunk, adaptive_rms_threshold)
for chunk in rms_chunks
]
# Assemble results.
df = | pd.DataFrame(features, columns=feature_names) | pandas.DataFrame |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
import numpy as np
import seaborn as sn
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
def get_model_torch(in_shape, out_shape):
from models import model_torch
return model_torch(in_shape, out_shape)
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
return np.eye(num_classes, dtype='uint8')[y]
def visualize_conf_matrix(matrix, class_list):
df_cm = pd.DataFrame(matrix, index = [i for i in class_list],
columns = [i for i in class_list])
plt.figure(figsize = (13,7))
sn.set(font_scale=1.8)
sn.heatmap(df_cm, annot=True, cmap='Greys', fmt='g', annot_kws={"size": 20})
plt.show(block=False);
folder = './sound_event_detection/figures/'
if not os.path.exists(folder):
os.makedirs(folder)
plt.savefig(folder + 'confusion_matrix' + '.png', bbox_inches='tight')
def get_conf_matrix(y_pred, y_test):
y_pred_max = []
y_test_max = []
for j in y_pred:
y_pred_max.append(np.argmax(j))
for j in y_test:
y_test_max.append(np.argmax(j))
return confusion_matrix(y_test_max, y_pred_max)
def get_metrics(conf_matrix):
tn = 0.0
fp = 0.0
tp = 0.0
fn = 0.0
epsilon = 0.01
for it1 in range(conf_matrix.shape[0]):
tp += conf_matrix[it1][it1]
for it2 in range(conf_matrix.shape[1]):
if it2 != it1:
fp += conf_matrix[it2][it1]
fn += conf_matrix[it1][it2]
precision = tp / (tp + fp + epsilon)
recall = tp / (tp + fn + epsilon)
f1 = 2 * (precision*recall) / (precision + recall + epsilon)
return tp, precision, recall, f1
print('#'*40, "\n\t\tTesting\n")
token = ''
test_reader = pd.read_table('./sound_event_detection/src/test'+token+'.csv', sep='\t', encoding='utf-8')
file_test_df = | pd.DataFrame(test_reader) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scanpy as sc
import anndata
import scprep
import tempfile
import os
def load_scicar(
rna_url,
rna_cells_url,
rna_genes_url,
atac_url,
atac_cells_url,
atac_genes_url,
test=False,
):
rna_cells = pd.read_csv(rna_cells_url, low_memory=False)["sample"]
rna_genes = pd.read_csv(rna_genes_url, low_memory=False)["gene_id"]
atac_cells = pd.read_csv(atac_cells_url, low_memory=False)["sample"]
atac_genes = | pd.read_csv(atac_genes_url, low_memory=False, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
| StringIO(data) | pandas.compat.StringIO |
"""
provide data pre-processing methods
author: Xiaoqi
date: 2019.06.24
"""
import pandas as pd
from matplotlib import colors
from scipy.stats import spearmanr
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
class FeatureProcessing(object):
def __init__(self, df_data):
self.df_data = df_data
self.scaler = None
def calc_correlation(self):
corr = np.round(spearmanr(self.df_data, nan_policy='omit').correlation, 4)
df_corr = pd.DataFrame(data=corr, index=self.df_data.columns, columns=self.df_data.columns)
return df_corr
def show_corr_features(self, cut_off=0.5):
df_corr = self.calc_correlation()
corr_dic = {}
for f in self.df_data.columns:
indices = df_corr.loc[(df_corr[f] >= cut_off) & (df_corr[f] != 1)].index
corr = df_corr.loc[(df_corr[f] >= cut_off) & (df_corr[f] != 1)][f]
if len(indices) > 0:
for i in range(len(corr)):
if not (indices[i], f) in corr_dic.keys():
corr_dic[(f, indices[i])] = corr[i]
if len(corr_dic) == 0:
print('There is no correlated features with coefficient larger than {} in this dataset'.format(cut_off))
else:
print('The following dataframe shows the correlated features in this dataset')
corr_df = | pd.DataFrame(corr_dic, index=['spearmanr_corr']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from pkg_resources import resource_stream
import numpy as np
import pandas as pd
import pytest
import pytz
from eemeter.transform import (
as_freq,
clean_caltrack_billing_data,
downsample_and_clean_caltrack_daily_data,
clean_caltrack_billing_daily_data,
day_counts,
get_baseline_data,
get_reporting_data,
get_terms,
remove_duplicates,
NoBaselineDataError,
NoReportingDataError,
overwrite_partial_rows_with_nan,
)
def test_as_freq_not_series(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
with pytest.raises(ValueError):
as_freq(meter_data, freq="H")
def test_as_freq_hourly(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_hourly = as_freq(meter_data.value, freq="H")
assert as_hourly.shape == (18961,)
assert round(meter_data.value.sum(), 1) == round(as_hourly.sum(), 1) == 21290.2
def test_as_freq_daily(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21290.2
def test_as_freq_daily_all_nones_instantaneous(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D", series_type="instantaneous")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_daily_all_nones(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_month_start(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_month_start = as_freq(meter_data.value, freq="MS")
assert as_month_start.shape == (28,)
assert round(meter_data.value.sum(), 1) == round(as_month_start.sum(), 1) == 21290.2
def test_as_freq_hourly_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_hourly = as_freq(temperature_data, freq="H", series_type="instantaneous")
assert as_hourly.shape == (19417,)
assert round(temperature_data.mean(), 1) == round(as_hourly.mean(), 1) == 54.6
def test_as_freq_daily_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (811,)
assert abs(temperature_data.mean() - as_daily.mean()) <= 0.1
def test_as_freq_month_start_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_month_start = as_freq(temperature_data, freq="MS", series_type="instantaneous")
assert as_month_start.shape == (29,)
assert round(as_month_start.mean(), 1) == 53.4
def test_as_freq_daily_temperature_monthly(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.groupby(pd.Grouper(freq="MS")).mean()
assert temperature_data.shape == (28,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (824,)
assert round(as_daily.mean(), 1) == 54.5
def test_as_freq_empty():
meter_data = pd.DataFrame({"value": []})
empty_meter_data = as_freq(meter_data.value, freq="H")
assert empty_meter_data.empty
def test_as_freq_perserves_nulls(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
monthly_with_nulls = meter_data[meter_data.index.year != 2016].reindex(
meter_data.index
)
daily_with_nulls = as_freq(monthly_with_nulls.value, freq="D")
assert (
round(monthly_with_nulls.value.sum(), 2)
== round(daily_with_nulls.sum(), 2)
== 11094.05
)
assert monthly_with_nulls.value.isnull().sum() == 13
assert daily_with_nulls.isnull().sum() == 365
def test_day_counts(il_electricity_cdd_hdd_billing_monthly):
data = il_electricity_cdd_hdd_billing_monthly["meter_data"].value
counts = day_counts(data.index)
assert counts.shape == (27,)
assert counts.iloc[0] == 29.0
assert pd.isnull(counts.iloc[-1])
assert counts.sum() == 790.0
def test_day_counts_empty_series():
index = pd.DatetimeIndex([])
index.freq = None
data = pd.Series([], index=index)
counts = day_counts(data.index)
assert counts.shape == (0,)
def test_get_baseline_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(meter_data)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_baseline_data_with_end(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(meter_data, end=blackout_start_date)
assert meter_data.shape != baseline_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_end_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date, max_days=None
)
assert meter_data.shape != baseline_data.shape == (9595, 1)
assert len(warnings) == 0
def test_get_baseline_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
with pytest.raises(NoBaselineDataError):
get_baseline_data(meter_data, end=pd.Timestamp("2000").tz_localize("UTC"))
def test_get_baseline_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, start=start, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_start"
assert (
warning.description
== "Data does not have coverage at requested baseline start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_baseline_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, end=end, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_end"
assert (
warning.description
== "Data does not have coverage at requested baseline end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_baseline_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_n_days_billing_period_overshoot(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2017, 11, 9, tzinfo=pytz.UTC),
max_days=45,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 526.25
assert len(warnings) == 0
def test_get_baseline_data_too_far_from_date(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
end_date = datetime(2020, 11, 9, tzinfo=pytz.UTC)
max_days = 45
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 1393.4
assert len(warnings) == 0
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (3, 1)
assert round(baseline_data.value.sum(), 2) == 2043.92
assert len(warnings) == 0
# Includes 3 data points because data at index -3 is closer to start target
# then data at index -2
start_target = baseline_data.index[-1] - timedelta(days=max_days)
assert abs((baseline_data.index[0] - start_target).days) < abs(
(baseline_data.index[1] - start_target).days
)
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
def test_get_reporting_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(meter_data)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_reporting_data_with_start(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(meter_data, start=blackout_end_date)
assert meter_data.shape != reporting_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_start_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(
meter_data, start=blackout_end_date, max_days=None
)
assert meter_data.shape != reporting_data.shape == (9607, 1)
assert len(warnings) == 0
def test_get_reporting_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
with pytest.raises(NoReportingDataError):
get_reporting_data(meter_data, start=pd.Timestamp("2030").tz_localize("UTC"))
def test_get_reporting_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
reporting_data, warnings = get_reporting_data(
meter_data, start=start, max_days=None
)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_start"
assert (
warning.description
== "Data does not have coverage at requested reporting start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_reporting_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
reporting_data, warnings = get_reporting_data(meter_data, end=end, max_days=None)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_end"
assert (
warning.description
== "Data does not have coverage at requested reporting end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_reporting_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_terms_unrecognized_method(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index, term_lengths=[365], method="unrecognized")
def test_get_terms_unsorted_index(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index[::-1], term_lengths=[365])
def test_get_terms_bad_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
terms = get_terms(
meter_data.index,
term_lengths=[60, 60, 60],
term_labels=["abc", "def"], # too short
)
def test_get_terms_default_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index, term_lengths=[60, 60, 60])
assert [t.label for t in terms] == ["term_001", "term_002", "term_003"]
def test_get_terms_custom_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(
meter_data.index, term_lengths=[60, 60, 60], term_labels=["abc", "def", "ghi"]
)
assert [t.label for t in terms] == ["abc", "def", "ghi"]
def test_get_terms_empty_index_input(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index[:0], term_lengths=[60, 60, 60])
assert len(terms) == 0
def test_get_terms_strict(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
strict_terms = get_terms(
meter_data.index,
term_lengths=[365, 365],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="strict",
)
assert len(strict_terms) == 2
year1 = strict_terms[0]
assert year1.label == "year1"
assert year1.index.shape == (12,)
assert (
year1.target_start_date
== pd.Timestamp("2016-01-15 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert (
year1.target_end_date
== pd.Timestamp("2017-01-14 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year1.target_term_length_days == 365
assert (
year1.actual_start_date
== year1.index[0]
== pd.Timestamp("2016-01-22 06:00:00+0000", tz="UTC")
)
assert (
year1.actual_end_date
== year1.index[-1]
== pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC")
)
assert year1.actual_term_length_days == 332
assert year1.complete
year2 = strict_terms[1]
assert year2.index.shape == (13,)
assert year2.label == "year2"
assert year2.target_start_date == | pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC") | pandas.Timestamp |
# Copyright 2022 <NAME>, <NAME>, <NAME>.
# Licensed under the BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
# This file may not be copied, modified, or distributed
# except according to those terms.
import sys
sys.stderr = open(snakemake.log[0], "w")
import pandas as pd
from pandas._typing import FilePathOrBuffer
summary = pd.DataFrame()
def register_quality_data(path_to_type_summary: FilePathOrBuffer, assembly_type: str):
if path_to_type_summary != "resources/genomes/main.fasta":
global summary
quality_data = pd.read_csv(path_to_type_summary, sep="\t", index_col="Sample")
quality_data["filter"] = (
quality_data["identity"] > snakemake.params.min_identity
) & (quality_data["n_share"] < snakemake.params.max_n)
quality_data[["identity", "n_share"]] = quality_data[
["identity", "n_share"]
].applymap(lambda x: "{:,.2f}%".format(x * 100))
quality_data.rename(
columns={
"identity": "{}: Identity".format(assembly_type),
"n_share": "{}: Share N".format(assembly_type),
"filter": "{}: Pass Filter".format(assembly_type),
},
inplace=True,
)
summary = | pd.concat([summary, quality_data], axis=1) | pandas.concat |
import datetime as dt
import multiprocessing as mp
import numpy as np
import pandas as pd
import sys
import time
def mpPandasObj(func, pdObj, numThreads=24, mpBatches=1, linMols=True, **kargs):
'''
Parallelize jobs, return a dataframe or series
multiprocessing snippet [20.7]
+ func: function to be parallelized. Returns a DataFrame
+ pdObj[0]: Name of argument used to pass the molecule
+ pdObj[1]: List of atoms that will be grouped into molecules
+ kwds: any other argument needed by func
Example: df1=mpPandasObj(func,('molecule',df0.index),24,**kwds)
'''
import pandas as pd
#if linMols:parts=linParts(len(argList[1]),numThreads*mpBatches)
# else:parts=nestedParts(len(argList[1]),numThreads*mpBatches)
if linMols:
parts = linParts(len(pdObj[1]), numThreads*mpBatches)
else:
parts = nestedParts(len(pdObj[1]), numThreads*mpBatches)
jobs = []
for i in range(1, len(parts)):
job = {pdObj[0]: pdObj[1][parts[i-1]:parts[i]], 'func': func}
job.update(kargs)
jobs.append(job)
if numThreads == 1:
out = processJobs_(jobs)
else:
out = processJobs(jobs, numThreads=numThreads)
if isinstance(out[0], pd.DataFrame):
df0 = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Data Mining #
# File : \mymain.py #
# Python : 3.9.1 #
# --------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : nov8.ai #
# Email : <EMAIL> #
# URL : https://github.com/john-james-sf/Data-Mining/ #
# --------------------------------------------------------------------------- #
# Created : Tuesday, March 9th 2021, 12:24:24 am #
# Last Modified : Tuesday, March 9th 2021, 12:24:24 am #
# Modified By : <NAME> (<EMAIL>) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2021 nov8.ai #
# =========================================================================== #
# =========================================================================== #
# 1. LIBRARIES #
# =========================================================================== #
#%%
# System and python libraries
from abc import ABC, abstractmethod
import datetime
import glob
import itertools
from joblib import dump, load
import os
import pickle
import time
import uuid
# Manipulating, analyzing and processing data
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy as sp
from scipy.stats.stats import pearsonr, f_oneway
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, SimpleImputer
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from sklearn.preprocessing import OneHotEncoder, PowerTransformer
from category_encoders import TargetEncoder, LeaveOneOutEncoder
# Feature and model selection and evaluation
from sklearn.feature_selection import RFECV, SelectKBest
from sklearn.feature_selection import VarianceThreshold, f_regression
from sklearn.metrics import make_scorer, mean_squared_error
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline, Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
# Regression based estimators
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
# Tree-based estimators
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
# Visualizing data
import seaborn as sns
import matplotlib.pyplot as plt
from tabulate import tabulate
# Utilities
from utils import notify, PersistEstimator, comment, print_dict, print_dict_keys
# Data Source
from data import AmesData
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
pd.set_option('mode.chained_assignment', None)
# =========================================================================== #
# COLUMNS #
# =========================================================================== #
discrete = ["Year_Built","Year_Remod_Add","Bsmt_Full_Bath","Bsmt_Half_Bath",
"Full_Bath","Half_Bath","Bedroom_AbvGr","Kitchen_AbvGr","TotRms_AbvGrd",
"Fireplaces","Garage_Cars","Mo_Sold","Year_Sold", "Garage_Yr_Blt"]
continuous = ["Lot_Frontage","Lot_Area","Mas_Vnr_Area","BsmtFin_SF_1","BsmtFin_SF_2",
"Bsmt_Unf_SF","Total_Bsmt_SF","First_Flr_SF","Second_Flr_SF","Low_Qual_Fin_SF",
"Gr_Liv_Area","Garage_Area","Wood_Deck_SF","Open_Porch_SF","Enclosed_Porch",
"Three_season_porch","Screen_Porch","Pool_Area","Misc_Val"]
numeric = discrete + continuous
n_nominal_levels = 191
nominal = ['MS_SubClass', 'MS_Zoning', 'Street', 'Alley', 'Land_Contour', 'Lot_Config', 'Neighborhood',
'Condition_1', 'Condition_2', 'Bldg_Type', 'House_Style', 'Roof_Style', 'Roof_Matl',
'Exterior_1st', 'Exterior_2nd', 'Mas_Vnr_Type', 'Foundation', 'Heating', 'Central_Air',
'Garage_Type', 'Misc_Feature', 'Sale_Type', 'Sale_Condition']
ordinal = ['BsmtFin_Type_1', 'BsmtFin_Type_2', 'Bsmt_Cond', 'Bsmt_Exposure',
'Bsmt_Qual', 'Electrical', 'Exter_Cond', 'Exter_Qual', 'Fence', 'Fireplace_Qu',
'Functional', 'Garage_Cond', 'Garage_Finish', 'Garage_Qual', 'Heating_QC', 'Kitchen_Qual',
'Land_Slope', 'Lot_Shape', 'Overall_Cond', 'Overall_Qual', 'Paved_Drive', 'Pool_QC', 'Utilities']
pre_features = ['PID', 'MS_SubClass', 'MS_Zoning', 'Lot_Frontage', 'Lot_Area', 'Street',
'Alley', 'Lot_Shape', 'Land_Contour', 'Utilities', 'Lot_Config',
'Land_Slope', 'Neighborhood', 'Condition_1', 'Condition_2', 'Bldg_Type',
'House_Style', 'Overall_Qual', 'Overall_Cond', 'Year_Built',
'Year_Remod_Add', 'Roof_Style', 'Roof_Matl', 'Exterior_1st',
'Exterior_2nd', 'Mas_Vnr_Type', 'Mas_Vnr_Area', 'Exter_Qual',
'Exter_Cond', 'Foundation', 'Bsmt_Qual', 'Bsmt_Cond', 'Bsmt_Exposure',
'BsmtFin_Type_1', 'BsmtFin_SF_1', 'BsmtFin_Type_2', 'BsmtFin_SF_2',
'Bsmt_Unf_SF', 'Total_Bsmt_SF', 'Heating', 'Heating_QC', 'Central_Air',
'Electrical', 'First_Flr_SF', 'Second_Flr_SF', 'Low_Qual_Fin_SF',
'Gr_Liv_Area', 'Bsmt_Full_Bath', 'Bsmt_Half_Bath', 'Full_Bath',
'Half_Bath', 'Bedroom_AbvGr', 'Kitchen_AbvGr', 'Kitchen_Qual',
'TotRms_AbvGrd', 'Functional', 'Fireplaces', 'Fireplace_Qu',
'Garage_Type', 'Garage_Yr_Blt', 'Garage_Finish', 'Garage_Cars',
'Garage_Area', 'Garage_Qual', 'Garage_Cond', 'Paved_Drive',
'Wood_Deck_SF', 'Open_Porch_SF', 'Enclosed_Porch', 'Three_season_porch',
'Screen_Porch', 'Pool_Area', 'Pool_QC', 'Fence', 'Misc_Feature',
'Misc_Val', 'Mo_Sold', 'Year_Sold', 'Sale_Type', 'Sale_Condition',
'Longitude', 'Latitude']
post_features = ['PID', 'MS_SubClass', 'MS_Zoning', 'Lot_Frontage', 'Lot_Area', 'Street',
'Alley', 'Lot_Shape', 'Land_Contour', 'Utilities', 'Lot_Config',
'Land_Slope', 'Neighborhood', 'Condition_1', 'Condition_2', 'Bldg_Type',
'House_Style', 'Overall_Qual', 'Overall_Cond', 'Year_Built',
'Year_Remod_Add', 'Roof_Style', 'Roof_Matl', 'Exterior_1st',
'Exterior_2nd', 'Mas_Vnr_Type', 'Mas_Vnr_Area', 'Exter_Qual',
'Exter_Cond', 'Foundation', 'Bsmt_Qual', 'Bsmt_Cond', 'Bsmt_Exposure',
'BsmtFin_Type_1', 'BsmtFin_SF_1', 'BsmtFin_Type_2', 'BsmtFin_SF_2',
'Bsmt_Unf_SF', 'Total_Bsmt_SF', 'Heating', 'Heating_QC', 'Central_Air',
'Electrical', 'First_Flr_SF', 'Second_Flr_SF', 'Low_Qual_Fin_SF',
'Gr_Liv_Area', 'Bsmt_Full_Bath', 'Bsmt_Half_Bath', 'Full_Bath',
'Half_Bath', 'Bedroom_AbvGr', 'Kitchen_AbvGr', 'Kitchen_Qual',
'TotRms_AbvGrd', 'Functional', 'Fireplaces', 'Fireplace_Qu',
'Garage_Type', 'Garage_Yr_Blt', 'Garage_Finish', 'Garage_Cars',
'Garage_Area', 'Garage_Qual', 'Garage_Cond', 'Paved_Drive',
'Wood_Deck_SF', 'Open_Porch_SF', 'Enclosed_Porch', 'Three_season_porch',
'Screen_Porch', 'Pool_Area', 'Pool_QC', 'Fence', 'Misc_Feature',
'Misc_Val', 'Mo_Sold', 'Year_Sold', 'Sale_Type', 'Sale_Condition',
'Age', 'Garage_Age']
# =========================================================================== #
# ORDINAL MAP #
# =========================================================================== #
ordinal_map = {'BsmtFin_Type_1': {'ALQ': 5, 'BLQ': 4, 'GLQ': 6, 'LwQ': 2, 'No_Basement': 0, 'Rec': 3, 'Unf': 1},
'BsmtFin_Type_2': {'ALQ': 5, 'BLQ': 4, 'GLQ': 6, 'LwQ': 2, 'No_Basement': 0, 'Rec': 3, 'Unf': 1},
'Bsmt_Cond': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Basement': 0, 'Poor': 1, 'Typical': 3},
'Bsmt_Exposure': {'Av': 3, 'Gd': 4, 'Mn': 2, 'No': 1, 'No_Basement': 0},
'Bsmt_Qual': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Basement': 0, 'Poor': 1, 'Typical': 3},
'Electrical': {'FuseA': 4, 'FuseF': 2, 'FuseP': 1, 'Mix': 0, 'SBrkr': 5, 'Unknown': 3},
'Exter_Cond': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'Poor': 0, 'Typical': 2},
'Exter_Qual': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'Poor': 0, 'Typical': 2},
'Fence': {'Good_Privacy': 4, 'Good_Wood': 2, 'Minimum_Privacy': 3, 'Minimum_Wood_Wire': 1,'No_Fence': 0},
'Fireplace_Qu': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Fireplace': 0, 'Poor': 1, 'Typical': 3},
'Functional': {'Maj1': 3, 'Maj2': 2, 'Min1': 5, 'Min2': 6, 'Mod': 4, 'Sal': 0, 'Sev': 1, 'Typ': 7},
'Garage_Cond': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Garage': 0, 'Poor': 1, 'Typical': 3},
'Garage_Finish': {'Fin': 3, 'No_Garage': 0, 'RFn': 2, 'Unf': 1},
'Garage_Qual': {'Excellent': 5, 'Fair': 2, 'Good': 4, 'No_Garage': 0, 'Poor': 1, 'Typical': 3},
'Heating_QC': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'Poor': 0, 'Typical': 2},
'Kitchen_Qual': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'Poor': 0, 'Typical': 2},
'Land_Slope': {'Gtl': 0, 'Mod': 1, 'Sev': 2},
'Lot_Shape': {'Irregular': 0, 'Moderately_Irregular': 1, 'Regular': 3, 'Slightly_Irregular': 2},
'Overall_Cond': {'Above_Average': 5, 'Average': 4,'Below_Average': 3,'Excellent': 8,'Fair': 2,
'Good': 6,'Poor': 1,'Very_Excellent': 9,'Very_Good': 7,'Very_Poor': 0},
'Overall_Qual': {'Above_Average': 5,'Average': 4,'Below_Average': 3,'Excellent': 8,'Fair': 2,
'Good': 6,'Poor': 1,'Very_Excellent': 9,'Very_Good': 7,'Very_Poor': 0},
'Paved_Drive': {'Dirt_Gravel': 0, 'Partial_Pavement': 1, 'Paved': 2},
'Pool_QC': {'Excellent': 4, 'Fair': 1, 'Good': 3, 'No_Pool': 0, 'Typical': 2},
'Utilities': {'AllPub': 2, 'NoSeWa': 0, 'NoSewr': 1}}
# =========================================================================== #
# ESTIMATORS #
# =========================================================================== #
model_groups = {
"Regressors": {
"Linear Regression": {
"Estimator": LinearRegression(),
"Parameters": {"normalize": [False],"n_jobs": [4],"copy_X": [True]}
},
"Lasso": {
"Estimator": Lasso(),
"Parameters": {
"alpha": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.25, 0.50, 0.75, 1.0]}
},
"Ridge": {
"Estimator": Ridge(),
"Parameters": {
"alpha": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.25, 0.50, 0.75, 1.0]}
},
"ElasticNet": {
"Estimator": ElasticNet(),
"Parameters": {
"alpha": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.25, 0.50, 0.75, 1.0],
"l1_ratio": np.arange(0.0,1.0,0.1)}
}
},
"Ensembles": {
"Random Forest": {
"Estimator": RandomForestRegressor(),
"Parameters": {
"n_estimators": [50,100],
"max_depth": [2,3,4,5,6],
"criterion": ["mse"],
"min_samples_split": [0.005, 0.01, 0.05, 0.10],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10],
"max_features": ["auto"],
"n_jobs": [4]}
},
"AdaBoost": {
"Estimator": AdaBoostRegressor(),
"Parameters": {
"base_estimator": [DecisionTreeRegressor()],
"n_estimators": [50,100],
"learning_rate": [0.001, 0.01, 0.05, 0.1, 0.25, 0.50, 0.75, 1.0]}
},
"Bagging": {
"Estimator": BaggingRegressor(),
"Parameters": {
"base_estimator": [DecisionTreeRegressor()],
"n_estimators": [50,100],
"max_features": [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"n_jobs": [4]}
},
"Extra Trees": {
"Estimator": ExtraTreesRegressor(),
"Parameters": {
"n_estimators": [50,100],
"max_depth": [2,3,4,5,6],
"min_samples_split": [0.005, 0.01, 0.05, 0.10],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10],
"max_features": ["auto"],
"n_jobs": [4]}
},
"Gradient Boosting": {
"Estimator": GradientBoostingRegressor(),
"Parameters": {
"learning_rate": [0.15,0.1,0.05,0.01,0.005,0.001],
"n_estimators": [50,100],
"max_depth": [2,3,4,5,6],
"criterion": ["mse"],
"min_samples_split": [0.005, 0.01, 0.05, 0.10],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10],
"max_features": ["auto"]}
},
"Histogram Gradient Boosting": {
"Estimator": HistGradientBoostingRegressor(),
"Parameters": {
"learning_rate": [0.15,0.1,0.05,0.01,0.005,0.001],
"max_depth": [2,3,4,5,6],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10]}
}
}
}
regressors = model_groups["Regressors"]
ensembles = model_groups["Ensembles"]
# =========================================================================== #
# 0. BASE TRANSFORMER #
# =========================================================================== #
class BaseTransformer(ABC):
def __init__(self):
pass
def fit(self, X, y=None):
X_old = pd.DataFrame(data=X["X"], columns=X["Features"])
y_old = X["y"]
self._fit(X_old, y_old)
return self
def transform(self, X, y=None):
X_old = pd.DataFrame(data=X["X"], columns=X["Features"])
y_old = X["y"]
X_new = self._transform(X_old, y_old)
assert(len(X_new.columns) == len(X_old.columns)), f"Old new columns mismatch"
assert(X_new.isnull().sum().sum() == 0), f"Warning nulls in test after clean"
X["X"] = X_new
return X
# =========================================================================== #
# 1. BASE SELECTOR #
# =========================================================================== #
class BaseSelector(ABC):
def __init__(self):
pass
def fit(self, X, y=None):
X_old = pd.DataFrame(data=X["X"], columns=X["Features"])
y_old = X["y"]
self._fit(X_old, y_old)
return self
def transform(self, X, y=None):
X_old = pd.DataFrame(data=X["X"], columns=X["Features"])
y_old = X["y"]
X_new = self._transform(X_old, y_old)
assert(X_new.isnull().sum().sum() == 0), f"Warning nulls"
X["X"] = X_new
X["Features"] = X_new.columns
return X
# =========================================================================== #
# 2. DATA CLEANER #
# =========================================================================== #
class DataCleaner(BaseSelector):
def __init__(self):
pass
def _fit(self, X, y=None):
return X, y
def _transform(self, X, y=None):
notify.entering(__class__.__name__, "transform")
X["Garage_Yr_Blt"].replace(to_replace=2207, value=2007, inplace=True)
X = X.drop(columns=["Latitude", "Longitude"])
X = X.fillna(X.median())
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 3. FEATURE ENGINEERING #
# =========================================================================== #
class FeatureEngineer(BaseSelector):
def __init__(self):
pass
def _fit(self, X, y=None):
return X, y
def _transform(self, X, y=None):
notify.entering(__class__.__name__, "transform")
# Add an age feature and remove year built
X["Age"] = X["Year_Sold"] - X["Year_Built"]
X["Age"].fillna(X["Age"].median())
# Add age feature for garage.
X["Garage_Age"] = X["Year_Sold"] - X["Garage_Yr_Blt"]
X["Garage_Age"].fillna(value=0,inplace=True)
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 4. CONTINUOUS PREPROCESSING #
# =========================================================================== #
class ContinuousPreprocessor(BaseEstimator, TransformerMixin, BaseTransformer):
def __init__(self, continuous=continuous):
self._continuous = continuous
def _fit(self, X, y=None, **fit_params):
return self
def _transform(self, X, y=None, **transform_params):
notify.entering(__class__.__name__, "transform")
# Impute missing values as linear function of other features
imputer = IterativeImputer()
X[self._continuous] = imputer.fit_transform(X[self._continuous])
# Power transformation to make feature distributions closer to Guassian
power = PowerTransformer(method="yeo-johnson", standardize=False)
X[self._continuous] = power.fit_transform(X[self._continuous])
# Scale the features and standardize to zero mean unit variance
scaler = StandardScaler()
X[self._continuous] = scaler.fit_transform(X[self._continuous])
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 5. DISCRETE PREPROCESSING #
# =========================================================================== #
class DiscretePreprocessor(BaseEstimator, TransformerMixin, BaseTransformer):
def __init__(self, strategy="most_frequent", discrete=discrete):
self._strategy = strategy
self._discrete = discrete
def _fit(self, X, y=None, **fit_params):
return self
def _transform(self, X, y=None, **transform_params):
notify.entering(__class__.__name__, "transform")
# Missing discrete variables will be imputed according to the strategy provided
# Default strategy is the mean.
imputer = SimpleImputer(strategy=self._strategy)
X[self._discrete] = imputer.fit_transform(X[self._discrete])
# Scale the features and standardize to zero mean unit variance
scaler = StandardScaler()
X[self._discrete] = scaler.fit_transform(X[self._discrete])
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 6. ORDINAL PREPROCESSING #
# =========================================================================== #
class OrdinalPreprocessor(BaseEstimator, TransformerMixin, BaseTransformer):
def __init__(self, strategy="most_frequent", ordinal=ordinal,
ordinal_map=ordinal_map):
self._strategy = strategy
self._ordinal = ordinal
self._ordinal_map = ordinal_map
def _fit(self, X, y=None, **fit_params):
return self
def _transform(self, X, y=None, **transform_params):
notify.entering(__class__.__name__, "transform")
categorical = list(X.select_dtypes(include=["object"]).columns)
# Create imputer object
imputer = SimpleImputer(strategy=self._strategy)
# Perform imputation of categorical variables to most frequent
X[self._ordinal] = imputer.fit_transform(X[self._ordinal])
# Map levels to ordinal mappings
for variable, mappings in self._ordinal_map.items():
for k,v in mappings.items():
X[variable].replace({k:v}, inplace=True)
# Scale the features and standardize to zero mean unit variance
scaler = StandardScaler()
X[self._ordinal] = scaler.fit_transform(X[self._ordinal])
notify.leaving(__class__.__name__, "transform")
return X
# =========================================================================== #
# 7. TARGET LEAVE-ONE-OUT ENCODER #
# =========================================================================== #
class TargetEncoderLOO(TargetEncoder):
"""Leave-one-out target encoder.
Source: https://brendanhasz.github.io/2019/03/04/target-encoding
"""
def __init__(self, cols=nominal):
"""Leave-one-out target encoding for categorical features.
Parameters
----------
cols : list of str
Columns to target encode.
"""
self.cols = cols
def fit(self, X, y):
"""Fit leave-one-out target encoder to X and y
Parameters
----------
X : pandas DataFrame, shape [n_samples, n_columns]
DataFrame containing columns to target encode
y : pandas Series, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [col for col in X
if str(X[col].dtype)=='object']
# Check columns are in X
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
# Encode each element of each column
self.sum_count = dict()
for col in self.cols:
self.sum_count[col] = dict()
uniques = X[col].unique()
for unique in uniques:
ix = X[col]==unique
count = X[X[col] == unique].shape[0]
singleton = "N" if (count > 1) else "Y"
self.sum_count[col][unique] = \
(y[ix].sum(),ix.sum(), singleton)
# Return the fit object
return self
def transform(self, X, y=None):
"""Perform the target encoding transformation.
Uses leave-one-out target encoding for the training fold,
and uses normal target encoding for the test fold.
Parameters
----------
X : pandas DataFrame, shape [n_samples, n_columns]
DataFrame containing columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
# Create output dataframe
Xo = X.copy()
# Use normal target encoding if this is test data
if y is None:
for col in self.sum_count.keys():
vals = np.full(X.shape[0], np.nan)
for cat, sum_count in self.sum_count[col].items():
vals[X[col]==cat] = sum_count[0]/sum_count[1]
Xo[col] = vals
# LOO target encode each column
else:
for col in self.sum_count.keys():
vals = np.full(X.shape[0], np.nan)
for cat, sum_count in self.sum_count[col].items():
ix = X[col]==cat
if sum_count[2] == "Y":
vals[ix] = sum_count[0]/sum_count[1]
else:
vals[ix] = (sum_count[0]-y[ix])/(sum_count[1]-1)
Xo[col] = vals
# Return encoded DataFrame
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data via target encoding.
Parameters
----------
X : pandas DataFrame, shape [n_samples, n_columns]
DataFrame containing columns to encode
y : pandas Series, shape = [n_samples]
Target values (required!).
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
# =========================================================================== #
# 8. NOMINAL PREPROCESSING #
# =========================================================================== #
class NominalPreprocessor(BaseEstimator, TransformerMixin, BaseTransformer):
def __init__(self, encoder=TargetEncoderLOO(cols=nominal), nominal=nominal):
self._encoder = encoder
self._nominal = nominal
def _fit(self, X, y=None, **fit_params):
notify.entering(__class__.__name__, "fit")
notify.leaving(__class__.__name__, "fit")
return self
def _transform(self, X, y=None, **transform_params):
notify.entering(__class__.__name__, "transform")
notify.leaving(__class__.__name__, "transform")
self._encoder.fit(X, y)
X = self._encoder.transform(X, y)
# Scale the features and standardize to zero mean unit variance
scaler = StandardScaler()
X[self._nominal] = scaler.fit_transform(X[self._nominal])
#X = X.fillna(X.mean())
return X
def fit_transform(self, X,y=None):
self.fit(X,y)
return self.transform(X,y)
# =========================================================================== #
# 9. BASE FILTER #
# =========================================================================== #
class BaseFilter(BaseSelector):
def __init__(self):
pass
def report(self, X, y=None):
classname = self.__class__.__name__
message = f"The following {len(self.features_removed_)} features were removed from the data.\n{self.features_removed_}."
comment.regarding(classname, message)
# =========================================================================== #
# 10. COLLINEARITY FILTER #
# =========================================================================== #
class CollinearityFilter(BaseFilter):
def __init__(self, features, threshold=0.65, alpha=0.05, numeric=numeric):
self._threshold = threshold
self._alpha = alpha
self._features = features
self._numeric = numeric
def _fit(self, X, y=None):
return X, y
def _transform(self, X, y=None):
notify.entering(__class__.__name__, "_fit")
self.features_removed_ = []
correlations = pd.DataFrame()
all_columns = X.columns.tolist()
columns = list(set(X.columns.tolist()).intersection(self._numeric))
# Perform pairwise correlation coefficient calculations
for col_a, col_b in itertools.combinations(columns,2):
r, p = pearsonr(X[col_a], X[col_b])
cols = col_a + "__" + col_b
d = {"Columns": cols, "A": col_a, "B": col_b,"Correlation": r, "p-value": p}
df = pd.DataFrame(data=d, index=[0])
correlations = pd.concat((correlations, df), axis=0)
# Now compute correlation between features and target.
relevance = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import sklearn.datasets, sklearn.decomposition
from sklearn.cluster import KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import sklearn_extra
from scipy import stats
from scipy.stats import kurtosis, skew
from collections import defaultdict
import statistics
from itertools import chain
from scipy.interpolate import interp1d
from collections import defaultdict
from nested_dict import nested_dict
import clustring_kmean_forced
def kmedoid_clusters(path_test):
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
city = editable_data['city']
save_path = os.path.join(path_test, str('Scenario Generation') , city)
representative_days_path = os.path.join(save_path,'Operation Representative days')
if not os.path.exists(representative_days_path):
os.makedirs(representative_days_path)
folder_path = os.path.join(path_test,str(city))
range_data = ['low','medium','high']
scenario_genrated = {}
scenario_probability = defaultdict(list)
scenario_number = {}
num_scenario = 0
i_solar= range_data[1]
i_wind= range_data[1]
i_emission= range_data[1]
#laod the energy deamnd, solar, wind, and electricity emissions from scenario generation file
for i_demand in range_data:
if i_demand=='low':
p_demand = 0.277778
elif i_demand=='medium':
p_demand = 0.444444
elif i_demand=='high':
p_demand = 0.277778
for day in range(365):
scenario_probability['D:'+i_demand].append(p_demand)
scenario_number['D:'+i_demand]= num_scenario
num_scenario = num_scenario + 1
scenario_genrated['D:'+i_demand] = pd.read_csv(os.path.join(save_path, 'D_'+i_demand+'_S_'+i_solar+'_W_'+i_wind+'_C_'+i_emission+'.csv'), header=None)
features_scenarios = defaultdict(list)
features_scenarios_list = []
features_probability_list = []
features_scenarios_nested = nested_dict()
k=0
days= 365
for scenario in scenario_genrated.keys():
scenario_genrated[scenario]=scenario_genrated[scenario]
for i in range(days):
if i==0:
data = scenario_genrated[scenario][1:25]
else:
data = scenario_genrated[scenario][25+(i-1)*24:25+(i)*24]
#Total electricity, heating, solar, wind, EF.
daily_list =list(chain(data[0].astype('float', copy=False),data[1].astype('float', copy=False)))
features_scenarios[k*days+i] = daily_list
features_scenarios_nested[scenario][i] = features_scenarios[k*days+i]
features_scenarios_list.append(features_scenarios[k*days+i])
features_probability_list.append(scenario_probability[scenario][i])
k = k+1
A = np.asarray(features_scenarios_list)
#Convert the dictionary of features to Series
standardization_data = StandardScaler()
A_scaled = standardization_data.fit_transform(A)
inertia_list = []
search_optimum_cluster = editable_data['Search optimum clusters'] # if I want to search for the optimum number of clusters: 1 is yes, 0 is no
cluster_range = range(2,20,1)
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
if search_optimum_cluster=='yes':
print('Defining the optimum number of clusters: ')
fig, ax = plt.subplots(figsize=(12, 6))
for cluster_numbers in cluster_range:
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=0).fit(A_scaled)
inertia_list.append(kmedoids.inertia_)
plt.scatter(cluster_numbers,kmedoids.inertia_)
print('Cluster number:', cluster_numbers, ' Inertia of the cluster:', int(kmedoids.inertia_))
ax.set_xlabel('Number of clusters',fontsize=BIGGER_SIZE)
ax.set_ylabel('Inertia',fontsize=BIGGER_SIZE)
ax.set_title('The user should use "Elbow method" to select the number of optimum clusters',fontsize=BIGGER_SIZE)
ax.plot(list(cluster_range),inertia_list)
ax.set_xticks(np.arange(2,20,1))
plt.savefig(os.path.join(sys.path[0], 'Inertia vs Clusters.png'),dpi=300,facecolor='w')
plt.close()
print('"Inertia vs Clusters" figure is saved in the directory folder')
print('You can use the figure to select the optimum number of clusters' )
print('You should enter the new optimum number of clusters in EditableFile.csv file and re-run this part')
cluster_numbers= int(editable_data['Cluster numbers'])
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(A_scaled)
#kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(scores_pca)
label = kmedoids.fit_predict(A_scaled)
#filter rows of original data
probability_label = defaultdict(list)
index_label = defaultdict(list)
index_label_all = []
filtered_label={}
for i in range(cluster_numbers):
filtered_label[i] = A_scaled[label == i]
index_cluster=np.where(label==i)
if len(filtered_label[i])!=0:
index_cluster = index_cluster[0]
for j in index_cluster:
probability_label[i].append(features_probability_list[j])
index_label[i].append(j)
index_label_all.append(j)
else:
probability_label[i].append(0)
sum_probability = []
for key in probability_label.keys():
sum_probability.append(sum(probability_label[key]))
#print(kmedoids.predict([[0,0,0], [4,4,4]]))
#print(kmedoids.cluster_centers_,kmedoids.cluster_centers_[0],len(kmedoids.cluster_centers_))
A_scaled_list={}
clusters={}
clusters_list = []
label_list = []
data_labels={}
data_all_labels = defaultdict(list)
for center in range(len(kmedoids.cluster_centers_)):
clusters['cluster centers '+str(center)]= kmedoids.cluster_centers_[center]
clusters_list.append(kmedoids.cluster_centers_[center].tolist())
for scenario in range(len(A_scaled)):
A_scaled_list[scenario]=A_scaled[scenario].tolist()
data_all_labels[kmedoids.labels_[scenario]].append(standardization_data.inverse_transform(A_scaled_list[scenario]))
A_scaled_list[scenario].insert(0,kmedoids.labels_[scenario])
data_labels['labels '+str(scenario)]= A_scaled_list[scenario]
label_list.append(A_scaled[scenario].tolist())
df_clusters= | pd.DataFrame(clusters) | pandas.DataFrame |
from argparse import ArgumentParser
import glob
import json
import pandas as pd
from collections import defaultdict
def main():
p = ArgumentParser()
p.add_argument("dir")
p.add_argument("out")
args = p.parse_args()
notebooks = sorted(glob.glob(args.dir + "/*.ipynb"))
data = defaultdict(list)
for notebook in notebooks:
if "MISC" in notebook:
continue
fname = notebook.split("/")[-1]
with open(notebook) as nb:
nb_content = json.load(nb)
for i, cell in enumerate(nb_content["cells"]):
if not cell["source"]:
continue
cell_source = cell["source"][:]
if "@slideshow slide" in cell_source[0]:
cell_source = cell_source[1:]
slide_type = "slide"
elif "@slideshow fragment" in cell_source[0]:
cell_source = cell_source[1:]
slide_type = "slide"
else:
slide_type = "-"
cell_type = cell["cell_type"]
n_lines = len(cell_source)
cell_text = "\t".join([s.strip() for s in cell_source])
data["filename"].append(fname)
data["cell_index"].append(i)
data["cell_type"].append(cell_type)
data["slide_type"].append(slide_type)
data["n_lines"].append(n_lines)
data["cell_text"].append(cell_text)
df = | pd.DataFrame(data) | pandas.DataFrame |
"""
Test for the visualization functionality
"""
import pandas as pd
from pandas_profiling import ProfileReport
from pandas_profiling.utils.cache import cache_file
test_data = | pd.DataFrame([1, 2, 3]) | pandas.DataFrame |
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = pd.Timestamp('2021-04-05') # Easter holiday starts
t13 = pd.Timestamp('2021-04-18') # Easter holiday ends
# ------
# WAVE 1
# ------
if t <= t1:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 < t < t1 + tau_days:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 + tau_days < t <= t1 + tau_days + l_days:
t = pd.Timestamp(t.date())
policy_old = all_contact(t)
policy_new = contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t1)
elif t1 + tau_days + l_days < t <= t2:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t2 < t <= t3:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t3 < t <= t4:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
# ------
# WAVE 2
# ------
elif t4 < t <= t5 + tau_days:
return contact_matrix_4prev(t, school=1)
elif t5 + tau_days < t <= t5 + tau_days + l_days:
policy_old = contact_matrix_4prev(t, school=1)
policy_new = contact_matrix_4prev(t, prev_schools, prev_work, prev_rest,
school=1)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t5)
elif t5 + tau_days + l_days < t <= t6:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t6 < t <= t7:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t7 < t <= t8:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t8 < t <= t9:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t9 < t <= t10:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t10 < t <= t11:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t11 < t <= t12:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t12 < t <= t13:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
else:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Define initial states
initial_states = {"S": initN, "E": np.ones(9)}
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------------
# Define necessary function to plot fit
# -------------------------------------
LL = conf_int/2
UL = 1-conf_int/2
def add_poisson(state_name, output, n_samples, n_draws_per_sample, UL=1-0.05*0.5, LL=0.05*0.5):
data = output[state_name].sum(dim="Nc").values
# Initialize vectors
vector = np.zeros((data.shape[1],n_draws_per_sample*n_samples))
# Loop over dimension draws
for n in range(data.shape[0]):
binomial_draw = np.random.poisson( np.expand_dims(data[n,:],axis=1),size = (data.shape[1],n_draws_per_sample))
vector[:,n*n_draws_per_sample:(n+1)*n_draws_per_sample] = binomial_draw
# Compute mean and median
mean = np.mean(vector,axis=1)
median = np.median(vector,axis=1)
# Compute quantiles
LL = np.quantile(vector, q = LL, axis = 1)
UL = np.quantile(vector, q = UL, axis = 1)
return mean, median, LL, UL
def plot_fit(ax, state_name, state_label, data_df, time, vector_mean, vector_LL, vector_UL, start_calibration='2020-03-15', end_calibration='2020-07-01' , end_sim='2020-09-01'):
ax.fill_between(pd.to_datetime(time), vector_LL, vector_UL,alpha=0.30, color = 'blue')
ax.plot(time, vector_mean,'--', color='blue', linewidth=1.5)
ax.scatter(data_df[start_calibration:end_calibration].index,data_df[state_name][start_calibration:end_calibration], color='black', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax.scatter(data_df[pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim].index,data_df[state_name][pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim], color='red', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax = _apply_tick_locator(ax)
ax.set_xlim(start_calibration,end_sim)
ax.set_ylabel(state_label)
return ax
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 3]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 0.5, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,300, 600])
axes[i,j].set_ylim([0,700])
plt.tight_layout()
plt.show()
model_results_WAVE1 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
#####################################
## PART 2: Hospitals vs. R0 figure ##
#####################################
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] * np.sum(Nc, axis=1)[i]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_overall = np.mean(np.sum(R0_norm,axis=0))
return R0, R0_overall
R0, R0_overall = compute_R0(initN, Nc_all['total'], samples_dicts[-1], params)
cumsum = out['H_in'].cumsum(dim='time').values
cumsum_mean = np.mean(cumsum[:,:,-1], axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_LL = cumsum_mean - np.quantile(cumsum[:,:,-1], q = 0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_UL = np.quantile(cumsum[:,:,-1], q = 1-0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0)) - cumsum_mean
cumsum = (out['H_in'].mean(dim="draws")).cumsum(dim='time').values
fraction = cumsum[:,-1]/sum(cumsum[:,-1])
fig,ax = plt.subplots(figsize=(12,4))
bars = ('$[0, 10[$', '$[10, 20[$', '$[20, 30[$', '$[30, 40[$', '$[40, 50[$', '$[50, 60[$', '$[60, 70[$', '$[70, 80[$', '$[80, \infty[$')
x_pos = np.arange(len(bars))
#ax.bar(x_pos, np.mean(R0,axis=1), yerr = [np.mean(R0,axis=1) - np.quantile(R0,q=0.05/2,axis=1), np.quantile(R0,q=1-0.05/2,axis=1) - np.mean(R0,axis=1)], width=1, color='b', alpha=0.5, capsize=10)
ax.bar(x_pos, np.mean(R0,axis=1), width=1, color='b', alpha=0.8)
ax.set_ylabel('$R_0$ (-)')
ax.grid(False)
ax2 = ax.twinx()
#ax2.bar(x_pos, cumsum_mean, yerr = [cumsum_LL, cumsum_UL], width=1,color='orange',alpha=0.9,hatch="/", capsize=10)
ax2.bar(x_pos, cumsum_mean, width=1,color='orange',alpha=0.6,hatch="/")
ax2.set_ylabel('Fraction of hospitalizations (-)')
ax2.grid(False)
plt.xticks(x_pos, bars)
plt.tight_layout()
plt.show()
#########################################
## Part 3: Robustness figure of WAVE 2 ##
#########################################
n_prevention = 4
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-06.json')), # 2020-11-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-05.json')), # 2020-11-16
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-04.json')), # 2020-12-24
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json')), # 2021-02-01
]
n_calibrations = len(samples_dicts)
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-09-01'
# Last datapoint used in inference
end_calibrations = ['2020-11-06','2020-11-16','2020-12-24','2021-02-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2021-02-14'
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Model initial condition on September 1st
warmup = 0
with open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/initial_states_2020-09-01.json', 'r') as fp:
initial_states = json.load(fp)
initial_states.update({
'VE': np.zeros(9),
'V': np.zeros(9),
'V_new': np.zeros(9),
'alpha': np.zeros(9)
})
#initial_states['ICU_tot'] = initial_states.pop('ICU')
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_schools'] = samples_dict['prev_schools'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 1, 6]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{schools}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_schools', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, start_calibration = start_calibration, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,250, 500, 750])
axes[i,j].set_ylim([0,850])
plt.tight_layout()
plt.show()
model_results_WAVE2 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
model_results = [model_results_WAVE1, model_results_WAVE2]
#################################################################
## Part 4: Comparing the maximal dataset prevention parameters ##
#################################################################
samples_dict_WAVE1 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json'))
samples_dict_WAVE2 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json'))
labels = ['$\Omega_{schools}$','$\Omega_{work}$', '$\Omega_{rest}$', '$\Omega_{home}$']
keys = ['prev_schools','prev_work','prev_rest','prev_home']
fig,axes = plt.subplots(1,4,figsize=(12,4))
for idx,ax in enumerate(axes):
if idx != 0:
(n1, bins, patches) = ax.hist(samples_dict_WAVE1[keys[idx]],bins=15,color='blue',alpha=0.4, density=True)
(n2, bins, patches) =ax.hist(samples_dict_WAVE2[keys[idx]],bins=15,color='black',alpha=0.4, density=True)
max_n = max([max(n1),max(n2)])*1.10
ax.axvline(np.mean(samples_dict_WAVE1[keys[idx]]),ls=':',ymin=0,ymax=1,color='blue')
ax.axvline(np.mean(samples_dict_WAVE2[keys[idx]]),ls=':',ymin=0,ymax=1,color='black')
if idx ==1:
ax.annotate('$\mu_1 = \mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
else:
ax.annotate('$\mu_1 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.annotate('$\mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE2[keys[idx]])), xy=(np.mean(samples_dict_WAVE2[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.set_xlabel(labels[idx])
ax.set_yticks([])
ax.spines['left'].set_visible(False)
else:
ax.hist(samples_dict_WAVE2['prev_schools'],bins=15,color='black',alpha=0.6, density=True)
ax.set_xlabel('$\Omega_{schools}$')
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.set_xlim([0,1])
ax.xaxis.grid(False)
ax.yaxis.grid(False)
plt.tight_layout()
plt.show()
################################################################
## Part 5: Relative contributions of each contact: both waves ##
################################################################
# --------------------------------
# Re-define function to compute R0
# --------------------------------
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] *Nc[i,j]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_mean = np.sum(R0_norm,axis=0)
return R0, R0_mean
# -----------------------
# Pre-allocate dataframes
# -----------------------
index=df_google.index
columns = [['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],['work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL','rest_mean','rest_LL','rest_UL',
'home_mean','home_LL','home_UL','total_mean','total_LL','total_UL','work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL',
'rest_mean','rest_LL','rest_UL','home_mean','home_LL','home_UL','total_mean','total_LL','total_UL']]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["WAVE", "Type"])
data = np.zeros([len(df_google.index),30])
df_rel = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_abs = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_Re = pd.DataFrame(data=data, index=df_google.index, columns=columns)
samples_dicts = [samples_dict_WAVE1, samples_dict_WAVE2]
start_dates =[pd.to_datetime('2020-03-15'), pd.to_datetime('2020-10-19')]
waves=["1", "2"]
for j,samples_dict in enumerate(samples_dicts):
print('\n WAVE: ' + str(j)+'\n')
# ---------------
# Rest prevention
# ---------------
print('Rest\n')
data_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
Re_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_rest[idx,:] = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
new = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
data_rest[idx,:]= old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
new_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_rest[idx,:] = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_rest_mean = np.mean(Re_rest,axis=1)
Re_rest_LL = np.quantile(Re_rest,q=0.05/2,axis=1)
Re_rest_UL = np.quantile(Re_rest,q=1-0.05/2,axis=1)
# ---------------
# Work prevention
# ---------------
print('Work\n')
data_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = | pd.Timedelta(l, unit='D') | pandas.Timedelta |
import os
import numpy as np
import pandas as pd
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_sum(df, window=10):
"""
Wrapper function to estimate rolling sum.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series sum over the past 'window' days.
"""
return df.rolling(window).sum()
def ts_prod(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).prod()
def sma(df, window=10): #simple moving average
"""
Wrapper function to estimate SMA.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series SMA over the past 'window' days.
"""
return df.rolling(window).mean()
def ema(df, n, m): #exponential moving average
"""
Wrapper function to estimate EMA.
:param df: a pandas DataFrame.
:return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
"""
result = df.copy()
for i in range(1,len(df)):
result.iloc[i]= (m*df.iloc[i-1] + (n-m)*result[i-1]) / n
return result
def wma(df, n):
"""
Wrapper function to estimate WMA.
:param df: a pandas DataFrame.
:return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
"""
weights = pd.Series(0.9*np.flipud(np.arange(1,n+1)))
result = pd.Series(np.nan, index=df.index)
for i in range(n-1,len(df)):
result.iloc[i]= sum(df[i-n+1:i+1].reset_index(drop=True)*weights.reset_index(drop=True))
return result
def stddev(df, window=10):
"""
Wrapper function to estimate rolling standard deviation.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).std()
def correlation(x, y, window=10):
"""
Wrapper function to estimate rolling corelations.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).corr(y)
def covariance(x, y, window=10):
"""
Wrapper function to estimate rolling covariance.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).cov(y)
def rolling_rank(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The rank of the last value in the array.
"""
return rankdata(na)[-1]
def ts_rank(df, window=10):
"""
Wrapper function to estimate rolling rank.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series rank over the past window days.
"""
return df.rolling(window).apply(rolling_rank)
def rolling_prod(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The product of the values in the array.
"""
return np.prod(na)
def product(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).apply(rolling_prod)
def ts_min(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).min()
def ts_max(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series max over the past 'window' days.
"""
return df.rolling(window).max()
def delta(df, period=1):
"""
Wrapper function to estimate difference.
:param df: a pandas DataFrame.
:param period: the difference grade.
:return: a pandas DataFrame with today’s value minus the value 'period' days ago.
"""
return df.diff(period)
def delay(df, period=1):
"""
Wrapper function to estimate lag.
:param df: a pandas DataFrame.
:param period: the lag grade.
:return: a pandas DataFrame with lagged time series
"""
return df.shift(period)
def rank(df):
"""
Cross sectional rank
:param df: a pandas DataFrame.
:return: a pandas DataFrame with rank along columns.
"""
#return df.rank(axis=1, pct=True)
return df.rank(pct=True)
def scale(df, k=1):
"""
Scaling time serie.
:param df: a pandas DataFrame.
:param k: scaling factor.
:return: a pandas DataFrame rescaled df such that sum(abs(df)) = k
"""
return df.mul(k).div(np.abs(df).sum())
def ts_argmax(df, window=10):
"""
Wrapper function to estimate which day ts_max(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmax) + 1
def ts_argmin(df, window=10):
"""
Wrapper function to estimate which day ts_min(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmin) + 1
def decay_linear(df, period=10):
"""
Linear weighted moving average implementation.
:param df: a pandas DataFrame.
:param period: the LWMA period
:return: a pandas DataFrame with the LWMA.
"""
try:
df = df.to_frame() #Series is not supported for the calculations below.
except:
pass
# Clean data
if df.isnull().values.any():
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
df.fillna(value=0, inplace=True)
na_lwma = np.zeros_like(df)
na_lwma[:period, :] = df.iloc[:period, :]
na_series = df.values
divisor = period * (period + 1) / 2
y = (np.arange(period) + 1) * 1.0 / divisor
# Estimate the actual lwma with the actual close.
# The backtest engine should assure to be snooping bias free.
for row in range(period - 1, df.shape[0]):
x = na_series[row - period + 1: row + 1, :]
na_lwma[row, :] = (np.dot(x.T, y))
return pd.DataFrame(na_lwma, index=df.index, columns=['CLOSE'])
def highday(df, n): #计算df前n期时间序列中最大值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmax()
return result
def lowday(df, n): #计算df前n期时间序列中最小值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmin()
return result
def daily_panel_csv_initializer(csv_name): #not used now
if os.path.exists(csv_name)==False:
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY')
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
dataset=0
for date in date_list["TRADE_DATE"]:
stock_list[date]=stock_list["INDUSTRY"]
stock_list.drop("INDUSTRY",axis=1,inplace=True)
stock_list.set_index("TS_CODE", inplace=True)
dataset = pd.DataFrame(stock_list.stack())
dataset.reset_index(inplace=True)
dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
else:
dataset=pd.read_csv(csv_name)
return dataset
def IndustryAverage_vwap():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_vwap.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average vwap data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average vwap data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average vwap data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = VWAP
result_unaveraged_piece.rename("VWAP_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VWAP_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_close():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_close.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average close data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average close data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average close data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = CLOSE
result_unaveraged_piece.rename("CLOSE_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["CLOSE_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_low():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_low.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average low data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average low data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average low data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
result_unaveraged_piece = LOW
result_unaveraged_piece.rename("LOW_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["LOW_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_volume():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_volume.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average volume data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average volume data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average volume data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = VOLUME
result_unaveraged_piece.rename("VOLUME_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VOLUME_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_adv(num):
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_adv{num}.csv".format(num=num))
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average adv{num} data needs not to be updated.".format(num=num))
return result_industryaveraged_df
else:
print("The corresponding industry average adv{num} data needs to be updated.".format(num=num))
first_date_update = date_list_update[0]
except:
print("The corresponding industry average adv{num} data is missing.".format(num=num))
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = sma(VOLUME, num)
result_unaveraged_piece.rename("ADV{num}_UNAVERAGED".format(num=num),inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["ADV{num}_UNAVERAGED".format(num=num)].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_adv{num}.csv".format(num=num),encoding='utf-8-sig')
return result_industryaveraged_df
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha048 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha048 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha048 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(vwap * 0.728317) + (vwap *(1 - 0.728317))
def IndustryAverage_PreparationForAlpha059():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha059 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha059 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha059 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA059_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha059.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(close * 0.60733) + (open * (1 - 0.60733))
def IndustryAverage_PreparationForAlpha079():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha079.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha079 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha079 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha079 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (CLOSE * 0.60733) + (OPEN * (1 - 0.60733))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA079_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA079_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha079.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#((open * 0.868128) + (high * (1 - 0.868128))
def IndustryAverage_PreparationForAlpha080():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha080.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha080 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha080 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha080 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
HIGH = quotations_daily_chosen['HIGH']
result_unaveraged_piece = (OPEN * 0.868128) + (HIGH * (1 - 0.868128))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA080_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA080_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha080.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#((low * 0.721001) + (vwap * (1 - 0.721001))
def IndustryAverage_PreparationForAlpha097():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha097.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha097 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha097 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha097 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (LOW * 0.721001) + (VWAP * (1 - 0.721001))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA097_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA097_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha097.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#rank(((((close - low) - (high -close)) / (high - low)) * volume))
def IndustryAverage_PreparationForAlpha100_1():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha100_1.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha100_1 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha100_1 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha100_1 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
HIGH = quotations_daily_chosen['HIGH']
LOW = quotations_daily_chosen['LOW']
CLOSE = quotations_daily_chosen['CLOSE']
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = rank(((((CLOSE - LOW) - (HIGH -CLOSE)) / (HIGH - LOW)) * VOLUME))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA100_1_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA100_1_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha100_1.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(correlation(close, rank(adv20), 5) - rank(ts_argmin(close, 30)))
def IndustryAverage_PreparationForAlpha100_2():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha100_2.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha100_2 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha100_2 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha100_2 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0] -30
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
VOLUME = quotations_daily_chosen['VOL']*100
adv20 = sma(VOLUME, 30)
result_unaveraged_piece = (correlation(CLOSE, rank(adv20), 5) - rank(ts_argmin(CLOSE, 30)))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA100_2_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA100_2_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha100_2.csv",encoding='utf-8-sig')
return result_industryaveraged_df
class Alphas(object):
def __init__(self, ts_code="000001.SZ",start_date=20210101,end_date=20211231):
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_indicators_daily_chosen=local_source.get_stock_indicators_daily(cols='TRADE_DATE,TS_CODE,TOTAL_SHARE',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_data_chosen=pd.merge(quotations_daily_chosen,stock_indicators_daily_chosen,on=['TRADE_DATE','TS_CODE'],how="left")
stock_data_chosen["TOTAL_MV"]=stock_data_chosen["TOTAL_SHARE"]*stock_data_chosen["CLOSE"]
stock_data_chosen=stock_data_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
stock_data_chosen["TRADE_DATE"]=stock_data_chosen["TRADE_DATE"].astype(int)
self.open = stock_data_chosen['OPEN']
self.high = stock_data_chosen['HIGH']
self.low = stock_data_chosen['LOW']
self.close = stock_data_chosen['CLOSE']
self.volume = stock_data_chosen['VOL']*100
self.returns = stock_data_chosen['CHANGE'] / stock_data_chosen['OPEN']
self.vwap = (stock_data_chosen['AMOUNT']*1000)/(stock_data_chosen['VOL']*100+1)
self.cap = stock_data_chosen['TOTAL_MV']
self.industry = local_source.get_stock_list(cols='TS_CODE,INDUSTRY', condition='TS_CODE = '+'"'+ts_code+'"')['INDUSTRY'].iloc[0]
self.available_dates = stock_data_chosen["TRADE_DATE"]
output_dates = stock_data_chosen[(stock_data_chosen["TRADE_DATE"]>=start_date)*(stock_data_chosen["TRADE_DATE"]<=end_date)]["TRADE_DATE"]
start_available_date = output_dates.iloc[0]
end_available_date = output_dates.iloc[-1]
self.start_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == start_available_date].index[0]
self.end_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == end_available_date].index[0] +1
# Alpha#1 (rank(Ts_ArgMax(SignedPower(((returns < 0) ? stddev(returns, 20) : close), 2.), 5)) -0.5)
def alpha001(self):
inner = self.close
inner[self.returns < 0] = stddev(self.returns, 20)
alpha = rank(ts_argmax(inner ** 2, 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#2 (-1 * correlation(rank(delta(log(volume), 2)), rank(((close - open) / open)), 6))
def alpha002(self):
df = -1 * correlation(rank(delta(log(self.volume), 2)), rank((self.close - self.open) / self.open), 6)
alpha = df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#3 (-1 * correlation(rank(open), rank(volume), 10))
def alpha003(self):
df = -1 * correlation(rank(self.open), rank(self.volume), 10)
alpha = df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#4 (-1 * Ts_Rank(rank(low), 9))
def alpha004(self):
alpha = -1 * ts_rank(rank(self.low), 9)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#5 (rank((open - (sum(vwap, 10) / 10))) * (-1 * abs(rank((close - vwap)))))
def alpha005(self):
alpha = (rank((self.open - (sum(self.vwap, 10) / 10))) * (-1 * abs(rank((self.close - self.vwap)))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#6 (-1 * correlation(open, volume, 10))
def alpha006(self):
df = -1 * correlation(self.open, self.volume, 10)
alpha = df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#7 ((adv20 < volume) ? ((-1 * ts_rank(abs(delta(close, 7)), 60)) * sign(delta(close, 7))) : (-1* 1))
def alpha007(self):
adv20 = sma(self.volume, 20)
alpha = -1 * ts_rank(abs(delta(self.close, 7)), 60) * sign(delta(self.close, 7))
alpha[adv20 >= self.volume] = -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#8 (-1 * rank(((sum(open, 5) * sum(returns, 5)) - delay((sum(open, 5) * sum(returns, 5)),10))))
def alpha008(self):
alpha = -1 * (rank(((ts_sum(self.open, 5) * ts_sum(self.returns, 5)) - delay((ts_sum(self.open, 5) * ts_sum(self.returns, 5)), 10))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#9 ((0 < ts_min(delta(close, 1), 5)) ? delta(close, 1) : ((ts_max(delta(close, 1), 5) < 0) ?delta(close, 1) : (-1 * delta(close, 1))))
def alpha009(self):
delta_close = delta(self.close, 1)
cond_1 = ts_min(delta_close, 5) > 0
cond_2 = ts_max(delta_close, 5) < 0
alpha = -1 * delta_close
alpha[cond_1 | cond_2] = delta_close
return alpha[self.start_date_index:self.end_date_index]
# Alpha#10 rank(((0 < ts_min(delta(close, 1), 4)) ? delta(close, 1) : ((ts_max(delta(close, 1), 4) < 0)? delta(close, 1) : (-1 * delta(close, 1)))))
def alpha010(self):
delta_close = delta(self.close, 1)
cond_1 = ts_min(delta_close, 4) > 0
cond_2 = ts_max(delta_close, 4) < 0
alpha = -1 * delta_close
alpha[cond_1 | cond_2] = delta_close
return alpha[self.start_date_index:self.end_date_index]
# Alpha#11 ((rank(ts_max((vwap - close), 3)) + rank(ts_min((vwap - close), 3))) *rank(delta(volume, 3)))
def alpha011(self):
alpha = ((rank(ts_max((self.vwap - self.close), 3)) + rank(ts_min((self.vwap - self.close), 3))) *rank(delta(self.volume, 3)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#12 (sign(delta(volume, 1)) * (-1 * delta(close, 1)))
def alpha012(self):
alpha = sign(delta(self.volume, 1)) * (-1 * delta(self.close, 1))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#13 (-1 * rank(covariance(rank(close), rank(volume), 5)))
def alpha013(self):
alpha = -1 * rank(covariance(rank(self.close), rank(self.volume), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#14 ((-1 * rank(delta(returns, 3))) * correlation(open, volume, 10))
def alpha014(self):
df = correlation(self.open, self.volume, 10)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * rank(delta(self.returns, 3)) * df
return alpha[self.start_date_index:self.end_date_index]
# Alpha#15 (-1 * sum(rank(correlation(rank(high), rank(volume), 3)), 3))
def alpha015(self):
df = correlation(rank(self.high), rank(self.volume), 3)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * ts_sum(rank(df), 3)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#16 (-1 * rank(covariance(rank(high), rank(volume), 5)))
def alpha016(self):
alpha = -1 * rank(covariance(rank(self.high), rank(self.volume), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#17 (((-1 * rank(ts_rank(close, 10))) * rank(delta(delta(close, 1), 1))) *rank(ts_rank((volume / adv20), 5)))
def alpha017(self):
adv20 = sma(self.volume, 20)
alpha = -1 * (rank(ts_rank(self.close, 10)) * rank(delta(delta(self.close, 1), 1)) * rank(ts_rank((self.volume / adv20), 5)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#18 (-1 * rank(((stddev(abs((close - open)), 5) + (close - open)) + correlation(close, open,10))))
def alpha018(self):
df = correlation(self.close, self.open, 10)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * (rank((stddev(abs((self.close - self.open)), 5) + (self.close - self.open)) + df))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#19 ((-1 * sign(((close - delay(close, 7)) + delta(close, 7)))) * (1 + rank((1 + sum(returns,250)))))
def alpha019(self):
alpha = ((-1 * sign((self.close - delay(self.close, 7)) + delta(self.close, 7))) * (1 + rank(1 + ts_sum(self.returns, 250))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#20 (((-1 * rank((open - delay(high, 1)))) * rank((open - delay(close, 1)))) * rank((open -delay(low, 1))))
def alpha020(self):
alpha = -1 * (rank(self.open - delay(self.high, 1)) * rank(self.open - delay(self.close, 1)) * rank(self.open - delay(self.low, 1)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#21 ((((sum(close, 8) / 8) + stddev(close, 8)) < (sum(close, 2) / 2)) ? (-1 * 1) : (((sum(close,2) / 2) < ((sum(close, 8) / 8) - stddev(close, 8))) ? 1 : (((1 < (volume / adv20)) || ((volume /adv20) == 1)) ? 1 : (-1 * 1))))
def alpha021(self):
cond_1 = sma(self.close, 8) + stddev(self.close, 8) < sma(self.close, 2)
cond_2 = sma(self.volume, 20) / self.volume < 1
alpha = pd.DataFrame(np.ones_like(self.close), index=self.close.index)
#alpha = pd.DataFrame(np.ones_like(self.close), index=self.close.index, columns=self.close.columns)
alpha[cond_1 | cond_2] = -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#22 (-1 * (delta(correlation(high, volume, 5), 5) * rank(stddev(close, 20))))
def alpha022(self):
df = correlation(self.high, self.volume, 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * delta(df, 5) * rank(stddev(self.close, 20))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#23 (((sum(high, 20) / 20) < high) ? (-1 * delta(high, 2)) : 0)
def alpha023(self):
cond = sma(self.high, 20) < self.high
alpha = pd.DataFrame(np.zeros_like(self.close),index=self.close.index,columns=['close'])
alpha.at[cond,'close'] = -1 * delta(self.high, 2).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#24 ((((delta((sum(close, 100) / 100), 100) / delay(close, 100)) < 0.05) ||((delta((sum(close, 100) / 100), 100) / delay(close, 100)) == 0.05)) ? (-1 * (close - ts_min(close,100))) : (-1 * delta(close, 3)))
def alpha024(self):
cond = delta(sma(self.close, 100), 100) / delay(self.close, 100) <= 0.05
alpha = -1 * delta(self.close, 3)
alpha[cond] = -1 * (self.close - ts_min(self.close, 100))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#25 rank(((((-1 * returns) * adv20) * vwap) * (high - close)))
def alpha025(self):
adv20 = sma(self.volume, 20)
alpha = rank(((((-1 * self.returns) * adv20) * self.vwap) * (self.high - self.close)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#26 (-1 * ts_max(correlation(ts_rank(volume, 5), ts_rank(high, 5), 5), 3))
def alpha026(self):
df = correlation(ts_rank(self.volume, 5), ts_rank(self.high, 5), 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * ts_max(df, 3)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#27 ((0.5 < rank((sum(correlation(rank(volume), rank(vwap), 6), 2) / 2.0))) ? (-1 * 1) : 1)
def alpha027(self): #there maybe problems
alpha = rank((sma(correlation(rank(self.volume), rank(self.vwap), 6), 2) / 2.0))
alpha[alpha > 0.5] = -1
alpha[alpha <= 0.5]=1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#28 scale(((correlation(adv20, low, 5) + ((high + low) / 2)) - close))
def alpha028(self):
adv20 = sma(self.volume, 20)
df = correlation(adv20, self.low, 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = scale(((df + ((self.high + self.low) / 2)) - self.close))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#29 (min(product(rank(rank(scale(log(sum(ts_min(rank(rank((-1 * rank(delta((close - 1),5))))), 2), 1))))), 1), 5) + ts_rank(delay((-1 * returns), 6), 5))
def alpha029(self):
alpha = (ts_min(rank(rank(scale(log(ts_sum(rank(rank(-1 * rank(delta((self.close - 1), 5)))), 2))))), 5) + ts_rank(delay((-1 * self.returns), 6), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#30 (((1.0 - rank(((sign((close - delay(close, 1))) + sign((delay(close, 1) - delay(close, 2)))) +sign((delay(close, 2) - delay(close, 3)))))) * sum(volume, 5)) / sum(volume, 20))
def alpha030(self):
delta_close = delta(self.close, 1)
inner = sign(delta_close) + sign(delay(delta_close, 1)) + sign(delay(delta_close, 2))
alpha = ((1.0 - rank(inner)) * ts_sum(self.volume, 5)) / ts_sum(self.volume, 20)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#31 ((rank(rank(rank(decay_linear((-1 * rank(rank(delta(close, 10)))), 10)))) + rank((-1 *delta(close, 3)))) + sign(scale(correlation(adv20, low, 12))))
def alpha031(self):
adv20 = sma(self.volume, 20)
df = correlation(adv20, self.low, 12).replace([-np.inf, np.inf], 0).fillna(value=0)
p1=rank(rank(rank(decay_linear((-1 * rank(rank(delta(self.close, 10)))), 10))))
p2=rank((-1 * delta(self.close, 3)))
p3=sign(scale(df))
alpha = p1.CLOSE+p2+p3
return alpha[self.start_date_index:self.end_date_index]
# Alpha#32 (scale(((sum(close, 7) / 7) - close)) + (20 * scale(correlation(vwap, delay(close, 5),230))))
def alpha032(self):
alpha = scale(((sma(self.close, 7) / 7) - self.close)) + (20 * scale(correlation(self.vwap, delay(self.close, 5),230)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#33 rank((-1 * ((1 - (open / close))^1)))
def alpha033(self):
alpha = rank(-1 + (self.open / self.close))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#34 rank(((1 - rank((stddev(returns, 2) / stddev(returns, 5)))) + (1 - rank(delta(close, 1)))))
def alpha034(self):
inner = stddev(self.returns, 2) / stddev(self.returns, 5)
inner = inner.replace([-np.inf, np.inf], 1).fillna(value=1)
alpha = rank(2 - rank(inner) - rank(delta(self.close, 1)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#35 ((Ts_Rank(volume, 32) * (1 - Ts_Rank(((close + high) - low), 16))) * (1 -Ts_Rank(returns, 32)))
def alpha035(self):
alpha = ((ts_rank(self.volume, 32) * (1 - ts_rank(self.close + self.high - self.low, 16))) * (1 - ts_rank(self.returns, 32)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#36 (((((2.21 * rank(correlation((close - open), delay(volume, 1), 15))) + (0.7 * rank((open- close)))) + (0.73 * rank(Ts_Rank(delay((-1 * returns), 6), 5)))) + rank(abs(correlation(vwap,adv20, 6)))) + (0.6 * rank((((sum(close, 200) / 200) - open) * (close - open)))))
def alpha036(self):
adv20 = sma(self.volume, 20)
alpha = (((((2.21 * rank(correlation((self.close - self.open), delay(self.volume, 1), 15))) + (0.7 * rank((self.open- self.close)))) + (0.73 * rank(ts_rank(delay((-1 * self.returns), 6), 5)))) + rank(abs(correlation(self.vwap,adv20, 6)))) + (0.6 * rank((((sma(self.close, 200) / 200) - self.open) * (self.close - self.open)))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#37 (rank(correlation(delay((open - close), 1), close, 200)) + rank((open - close)))
def alpha037(self):
alpha = rank(correlation(delay(self.open - self.close, 1), self.close, 200)) + rank(self.open - self.close)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#38 ((-1 * rank(Ts_Rank(close, 10))) * rank((close / open)))
def alpha038(self):
inner = self.close / self.open
inner = inner.replace([-np.inf, np.inf], 1).fillna(value=1)
alpha = -1 * rank(ts_rank(self.open, 10)) * rank(inner)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#39 ((-1 * rank((delta(close, 7) * (1 - rank(decay_linear((volume / adv20), 9)))))) * (1 +rank(sum(returns, 250))))
def alpha039(self):
adv20 = sma(self.volume, 20)
alpha = ((-1 * rank(delta(self.close, 7) * (1 - rank(decay_linear((self.volume / adv20), 9).CLOSE)))) * (1 + rank(sma(self.returns, 250))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#40 ((-1 * rank(stddev(high, 10))) * correlation(high, volume, 10))
def alpha040(self):
alpha = -1 * rank(stddev(self.high, 10)) * correlation(self.high, self.volume, 10)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#41 (((high * low)^0.5) - vwap)
def alpha041(self):
alpha = pow((self.high * self.low),0.5) - self.vwap
return alpha[self.start_date_index:self.end_date_index]
# Alpha#42 (rank((vwap - close)) / rank((vwap + close)))
def alpha042(self):
alpha = rank((self.vwap - self.close)) / rank((self.vwap + self.close))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#43 (ts_rank((volume / adv20), 20) * ts_rank((-1 * delta(close, 7)), 8))
def alpha043(self):
adv20 = sma(self.volume, 20)
alpha = ts_rank(self.volume / adv20, 20) * ts_rank((-1 * delta(self.close, 7)), 8)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#44 (-1 * correlation(high, rank(volume), 5))
def alpha044(self):
df = correlation(self.high, rank(self.volume), 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * df
return alpha[self.start_date_index:self.end_date_index]
# Alpha#45 (-1 * ((rank((sum(delay(close, 5), 20) / 20)) * correlation(close, volume, 2)) *rank(correlation(sum(close, 5), sum(close, 20), 2))))
def alpha045(self):
df = correlation(self.close, self.volume, 2)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * (rank(sma(delay(self.close, 5), 20)) * df * rank(correlation(ts_sum(self.close, 5), ts_sum(self.close, 20), 2)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#46 ((0.25 < (((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10))) ?(-1 * 1) : (((((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10)) < 0) ? 1 :((-1 * 1) * (close - delay(close, 1)))))
def alpha046(self):
inner = ((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10)
alpha = (-1 * delta(self.close))
alpha[inner < 0] = 1
alpha[inner > 0.25] = -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#47 ((((rank((1 / close)) * volume) / adv20) * ((high * rank((high - close))) / (sum(high, 5) /5))) - rank((vwap - delay(vwap, 5))))
def alpha047(self):
adv20 = sma(self.volume, 20)
alpha = ((((rank((1 / self.close)) * self.volume) / adv20) * ((self.high * rank((self.high - self.close))) / (sma(self.high, 5) /5))) - rank((self.vwap - delay(self.vwap, 5))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#48 (indneutralize(((correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close), IndClass.subindustry) / sum(((delta(close, 1) / delay(close, 1))^2), 250))
def alpha048(self):
indaverage_data = IndustryAverage_PreparationForAlpha048()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (correlation(delta(self.close, 1), delta(delay(self.close, 1), 1), 250) *delta(self.close, 1)) / self.close
indneutralized_data = unindneutralized_data - indaverage_data
alpha = indneutralized_data / sma(((delta(self.close, 1) / delay(self.close, 1))**2), 250)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#49 (((((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10)) < (-1 *0.1)) ? 1 : ((-1 * 1) * (close - delay(close, 1))))
def alpha049(self):
inner = (((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10))
alpha = (-1 * delta(self.close))
alpha[inner < -0.1] = 1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#50 (-1 * ts_max(rank(correlation(rank(volume), rank(vwap), 5)), 5))
def alpha050(self):
alpha = (-1 * ts_max(rank(correlation(rank(self.volume), rank(self.vwap), 5)), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#51 (((((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10)) < (-1 *0.05)) ? 1 : ((-1 * 1) * (close - delay(close, 1))))
def alpha051(self):
inner = (((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10))
alpha = (-1 * delta(self.close))
alpha[inner < -0.05] = 1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#52 ((((-1 * ts_min(low, 5)) + delay(ts_min(low, 5), 5)) * rank(((sum(returns, 240) -sum(returns, 20)) / 220))) * ts_rank(volume, 5))
def alpha052(self):
alpha = (((-1 * delta(ts_min(self.low, 5), 5)) * rank(((ts_sum(self.returns, 240) - ts_sum(self.returns, 20)) / 220))) * ts_rank(self.volume, 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#53 (-1 * delta((((close - low) - (high - close)) / (close - low)), 9))
def alpha053(self):
inner = (self.close - self.low).replace(0, 0.0001)
alpha = -1 * delta((((self.close - self.low) - (self.high - self.close)) / inner), 9)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#54 ((-1 * ((low - close) * (open^5))) / ((low - high) * (close^5)))
def alpha054(self):
inner = (self.low - self.high).replace(0, -0.0001)
alpha = -1 * (self.low - self.close) * (self.open ** 5) / (inner * (self.close ** 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#55 (-1 * correlation(rank(((close - ts_min(low, 12)) / (ts_max(high, 12) - ts_min(low,12)))), rank(volume), 6))
def alpha055(self):
divisor = (ts_max(self.high, 12) - ts_min(self.low, 12)).replace(0, 0.0001)
inner = (self.close - ts_min(self.low, 12)) / (divisor)
df = correlation(rank(inner), rank(self.volume), 6)
alpha = -1 * df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#56 (0 - (1 * (rank((sum(returns, 10) / sum(sum(returns, 2), 3))) * rank((returns * cap)))))
def alpha056(self):
alpha = (0 - (1 * (rank((sma(self.returns, 10) / sma(sma(self.returns, 2), 3))) * rank((self.returns * self.cap)))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#57 (0 - (1 * ((close - vwap) / decay_linear(rank(ts_argmax(close, 30)), 2))))
def alpha057(self):
alpha = (0 - (1 * ((self.close - self.vwap) / decay_linear(rank(ts_argmax(self.close, 30)), 2).CLOSE)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#58 (-1 * Ts_Rank(decay_linear(correlation(IndNeutralize(vwap, IndClass.sector), volume,3.92795), 7.89291), 5.50322))
def alpha058(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
alpha = (-1 * ts_rank(decay_linear(correlation(indneutralized_vwap, self.volume, 4), 8), 6))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#59 (-1 * Ts_Rank(decay_linear(correlation(IndNeutralize(((vwap * 0.728317) + (vwap *(1 - 0.728317))), IndClass.industry), volume, 4.25197), 16.2289), 8.19648))
def alpha059(self):
indaverage_data = IndustryAverage_PreparationForAlpha059()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.vwap * 0.728317) + (self.vwap *(1 - 0.728317))
indneutralized_data = unindneutralized_data - indaverage_data
alpha = (-1 * ts_rank(decay_linear(correlation(indneutralized_data, self.volume, 4), 16), 8))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#60 (0 - (1 * ((2 * scale(rank(((((close - low) - (high - close)) / (high - low)) * volume)))) -scale(rank(ts_argmax(close, 10))))))
def alpha060(self):
divisor = (self.high - self.low).replace(0, 0.0001)
inner = ((self.close - self.low) - (self.high - self.close)) * self.volume / divisor
alpha = - ((2 * scale(rank(inner))) - scale(rank(ts_argmax(self.close, 10))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#61 (rank((vwap - ts_min(vwap, 16.1219))) < rank(correlation(vwap, adv180, 17.9282)))
def alpha061(self):
adv180 = sma(self.volume, 180)
alpha = (rank((self.vwap - ts_min(self.vwap, 16))) < rank(correlation(self.vwap, adv180, 18)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#62 ((rank(correlation(vwap, sum(adv20, 22.4101), 9.91009)) < rank(((rank(open) +rank(open)) < (rank(((high + low) / 2)) + rank(high))))) * -1)
def alpha062(self):
adv20 = sma(self.volume, 20)
alpha = ((rank(correlation(self.vwap, sma(adv20, 22), 10)) < rank(((rank(self.open) +rank(self.open)) < (rank(((self.high + self.low) / 2)) + rank(self.high))))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#63 ((rank(decay_linear(delta(IndNeutralize(close, IndClass.industry), 2.25164), 8.22237))- rank(decay_linear(correlation(((vwap * 0.318108) + (open * (1 - 0.318108))), sum(adv180,37.2467), 13.557), 12.2883))) * -1)
def alpha063(self):
indaverage_close = IndustryAverage_close()
indaverage_close = indaverage_close[indaverage_close.index.isin(self.available_dates)]
indaverage_close = indaverage_close[self.industry]
indaverage_close = indaverage_close.reset_index(drop=True)
indneutralized_close = self.close - indaverage_close
adv180 = sma(self.volume, 180)
alpha = ((rank(decay_linear(delta(indneutralized_close, 2), 8))- rank(decay_linear(correlation(((self.vwap * 0.318108) + (self.open * (1 - 0.318108))), sma(adv180, 38), 14), 12))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#64 ((rank(correlation(sum(((open * 0.178404) + (low * (1 - 0.178404))), 12.7054),sum(adv120, 12.7054), 16.6208)) < rank(delta(((((high + low) / 2) * 0.178404) + (vwap * (1 -0.178404))), 3.69741))) * -1)
def alpha064(self):
adv120 = sma(self.volume, 120)
alpha = ((rank(correlation(sma(((self.open * 0.178404) + (self.low * (1 - 0.178404))), 13),sma(adv120, 13), 17)) < rank(delta(((((self.high + self.low) / 2) * 0.178404) + (self.vwap * (1 -0.178404))), 3.69741))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#65 ((rank(correlation(((open * 0.00817205) + (vwap * (1 - 0.00817205))), sum(adv60,8.6911), 6.40374)) < rank((open - ts_min(open, 13.635)))) * -1)
def alpha065(self):
adv60 = sma(self.volume, 60)
alpha = ((rank(correlation(((self.open * 0.00817205) + (self.vwap * (1 - 0.00817205))), sma(adv60,9), 6)) < rank((self.open - ts_min(self.open, 14)))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#66 ((rank(decay_linear(delta(vwap, 3.51013), 7.23052)) + Ts_Rank(decay_linear(((((low* 0.96633) + (low * (1 - 0.96633))) - vwap) / (open - ((high + low) / 2))), 11.4157), 6.72611)) * -1)
def alpha066(self):
alpha = ((rank(decay_linear(delta(self.vwap, 4), 7).CLOSE) + ts_rank(decay_linear(((((self.low* 0.96633) + (self.low * (1 - 0.96633))) - self.vwap) / (self.open - ((self.high + self.low) / 2))), 11).CLOSE, 7)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#67 ((rank((high - ts_min(high, 2.14593)))^rank(correlation(IndNeutralize(vwap,IndClass.sector), IndNeutralize(adv20, IndClass.subindustry), 6.02936))) * -1)
def alpha067(self):
indaverage_adv20 = IndustryAverage_adv(20)
indaverage_adv20 = indaverage_adv20[indaverage_adv20.index.isin(self.available_dates)]
indaverage_adv20 = indaverage_adv20[self.industry]
indaverage_adv20 = indaverage_adv20.reset_index(drop=True)
adv20 = sma(self.volume, 20)
indneutralized_adv20 = adv20 - indaverage_adv20
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
alpha = rank((self.high - ts_min(self.high, 2))) ** rank(correlation(indneutralized_vwap, indneutralized_adv20, 6)) * -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#68 ((Ts_Rank(correlation(rank(high), rank(adv15), 8.91644), 13.9333) <rank(delta(((close * 0.518371) + (low * (1 - 0.518371))), 1.06157))) * -1)
def alpha068(self):
adv15 = sma(self.volume, 15)
alpha = ((ts_rank(correlation(rank(self.high), rank(adv15), 9), 14) <rank(delta(((self.close * 0.518371) + (self.low * (1 - 0.518371))), 1.06157))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#69 ((rank(ts_max(delta(IndNeutralize(vwap, IndClass.industry), 2.72412),4.79344))^Ts_Rank(correlation(((close * 0.490655) + (vwap * (1 - 0.490655))), adv20, 4.92416),9.0615)) * -1)
def alpha069(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
adv20 = sma(self.volume, 20)
alpha = ((rank(ts_max(delta(indneutralized_vwap, 3),5)) ** ts_rank(correlation(((self.close * 0.490655) + (self.vwap * (1 - 0.490655))), adv20, 5),9)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#70 ((rank(delta(vwap, 1.29456))^Ts_Rank(correlation(IndNeutralize(close,IndClass.industry), adv50, 17.8256), 17.9171)) * -1)
def alpha070(self):
indaverage_close = IndustryAverage_close()
indaverage_close = indaverage_close[indaverage_close.index.isin(self.available_dates)]
indaverage_close = indaverage_close[self.industry]
indaverage_close = indaverage_close.reset_index(drop=True)
indneutralized_close = self.close - indaverage_close
adv50 = sma(self.volume, 50)
alpha = (rank(delta(self.vwap, 1)) ** ts_rank(correlation(indneutralized_close, adv50, 18), 18)) * -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#71 max(Ts_Rank(decay_linear(correlation(Ts_Rank(close, 3.43976), Ts_Rank(adv180,12.0647), 18.0175), 4.20501), 15.6948), Ts_Rank(decay_linear((rank(((low + open) - (vwap +vwap)))^2), 16.4662), 4.4388))
def alpha071(self):
adv180 = sma(self.volume, 180)
p1=ts_rank(decay_linear(correlation(ts_rank(self.close, 3), ts_rank(adv180,12), 18), 4).CLOSE, 16)
p2=ts_rank(decay_linear((rank(((self.low + self.open) - (self.vwap +self.vwap))).pow(2)), 16).CLOSE, 4)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = df['max']
#alpha = max(ts_rank(decay_linear(correlation(ts_rank(self.close, 3), ts_rank(adv180,12), 18).to_frame(), 4).CLOSE, 16), ts_rank(decay_linear((rank(((self.low + self.open) - (self.vwap +self.vwap))).pow(2)).to_frame(), 16).CLOSE, 4))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#72 (rank(decay_linear(correlation(((high + low) / 2), adv40, 8.93345), 10.1519)) /rank(decay_linear(correlation(Ts_Rank(vwap, 3.72469), Ts_Rank(volume, 18.5188), 6.86671),2.95011)))
def alpha072(self):
adv40 = sma(self.volume, 40)
alpha = (rank(decay_linear(correlation(((self.high + self.low) / 2), adv40, 9).to_frame(), 10).CLOSE) /rank(decay_linear(correlation(ts_rank(self.vwap, 4), ts_rank(self.volume, 19), 7).to_frame(),3).CLOSE))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#73 (max(rank(decay_linear(delta(vwap, 4.72775), 2.91864)),Ts_Rank(decay_linear(((delta(((open * 0.147155) + (low * (1 - 0.147155))), 2.03608) / ((open *0.147155) + (low * (1 - 0.147155)))) * -1), 3.33829), 16.7411)) * -1)
def alpha073(self):
p1=rank(decay_linear(delta(self.vwap, 5).to_frame(), 3).CLOSE)
p2=ts_rank(decay_linear(((delta(((self.open * 0.147155) + (self.low * (1 - 0.147155))), 2) / ((self.open *0.147155) + (self.low * (1 - 0.147155)))) * -1).to_frame(), 3).CLOSE, 17)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1 * df['max']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#74 ((rank(correlation(close, sum(adv30, 37.4843), 15.1365)) <rank(correlation(rank(((high * 0.0261661) + (vwap * (1 - 0.0261661)))), rank(volume), 11.4791)))* -1)
def alpha074(self):
adv30 = sma(self.volume, 30)
alpha = ((rank(correlation(self.close, sma(adv30, 37), 15)) <rank(correlation(rank(((self.high * 0.0261661) + (self.vwap * (1 - 0.0261661)))), rank(self.volume), 11)))* -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#75 (rank(correlation(vwap, volume, 4.24304)) < rank(correlation(rank(low), rank(adv50),12.4413)))
def alpha075(self):
adv50 = sma(self.volume, 50)
alpha = (rank(correlation(self.vwap, self.volume, 4)) < rank(correlation(rank(self.low), rank(adv50),12)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#76 (max(rank(decay_linear(delta(vwap, 1.24383), 11.8259)),Ts_Rank(decay_linear(Ts_Rank(correlation(IndNeutralize(low, IndClass.sector), adv81,8.14941), 19.569), 17.1543), 19.383)) * -1)
def alpha076(self):
indaverage_low = IndustryAverage_low()
indaverage_low = indaverage_low[indaverage_low.index.isin(self.available_dates)]
indaverage_low = indaverage_low[self.industry]
indaverage_low = indaverage_low.reset_index(drop=True)
indneutralized_low = self.low - indaverage_low
adv81 = sma(self.volume, 81)
p1 = rank(decay_linear(delta(self.vwap.to_frame(), 1), 12))
p2 = ts_rank(decay_linear(ts_rank(correlation(indneutralized_low, adv81, 8).to_frame(), 20), 17), 19)
p1=p1.iloc[:,0]
p2=p2.iloc[:,0]
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1 * df['max']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#77 min(rank(decay_linear(((((high + low) / 2) + high) - (vwap + high)), 20.0451)),rank(decay_linear(correlation(((high + low) / 2), adv40, 3.1614), 5.64125)))
def alpha077(self):
adv40 = sma(self.volume, 40)
p1=rank(decay_linear(((((self.high + self.low) / 2) + self.high) - (self.vwap + self.high)).to_frame(), 20).CLOSE)
p2=rank(decay_linear(correlation(((self.high + self.low) / 2), adv40, 3).to_frame(), 6).CLOSE)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#78 (rank(correlation(sum(((low * 0.352233) + (vwap * (1 - 0.352233))), 19.7428),sum(adv40, 19.7428), 6.83313))^rank(correlation(rank(vwap), rank(volume), 5.77492)))
def alpha078(self):
adv40 = sma(self.volume, 40)
alpha = (rank(correlation(ts_sum(((self.low * 0.352233) + (self.vwap * (1 - 0.352233))), 20),ts_sum(adv40,20), 7)).pow(rank(correlation(rank(self.vwap), rank(self.volume), 6))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#79 (rank(delta(IndNeutralize(((close * 0.60733) + (open * (1 - 0.60733))),IndClass.sector), 1.23438)) < rank(correlation(Ts_Rank(vwap, 3.60973), Ts_Rank(adv150,9.18637), 14.6644)))
def alpha079(self):
indaverage_data = IndustryAverage_PreparationForAlpha079()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.close * 0.60733) + (self.open * (1 - 0.60733))
indneutralized_data = unindneutralized_data - indaverage_data
adv150 = sma(self.volume, 150)
alpha = (rank(delta(indneutralized_data, 1)) < rank(correlation(ts_rank(self.vwap, 4), ts_rank(adv150, 9), 15))) *1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#80 ((rank(Sign(delta(IndNeutralize(((open * 0.868128) + (high * (1 - 0.868128))),IndClass.industry), 4.04545)))^Ts_Rank(correlation(high, adv10, 5.11456), 5.53756)) * -1)
def alpha080(self):
indaverage_data = IndustryAverage_PreparationForAlpha080()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.open * 0.868128) + (self.high * (1 - 0.868128))
indneutralized_data = unindneutralized_data - indaverage_data
adv10 = sma(self.volume, 10)
alpha = rank(sign(delta(indneutralized_data, 4))) ** (ts_rank(correlation(self.high, adv10, 5), 6)) * -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#81 ((rank(Log(product(rank((rank(correlation(vwap, sum(adv10, 49.6054),8.47743))^4)), 14.9655))) < rank(correlation(rank(vwap), rank(volume), 5.07914))) * -1)
def alpha081(self):
adv10 = sma(self.volume, 10)
alpha = ((rank(log(product(rank((rank(correlation(self.vwap, ts_sum(adv10, 50),8)).pow(4))), 15))) < rank(correlation(rank(self.vwap), rank(self.volume), 5))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#82 (min(rank(decay_linear(delta(open, 1.46063), 14.8717)),Ts_Rank(decay_linear(correlation(IndNeutralize(volume, IndClass.sector), ((open * 0.634196) +(open * (1 - 0.634196))), 17.4842), 6.92131), 13.4283)) * -1)
def alpha082(self):
indaverage_volume = IndustryAverage_volume()
indaverage_volume = indaverage_volume[indaverage_volume.index.isin(self.available_dates)]
indaverage_volume = indaverage_volume[self.industry]
indaverage_volume = indaverage_volume.reset_index(drop=True)
indneutralized_volume = self.volume - indaverage_volume
p1 = rank(decay_linear(delta(self.open, 1).to_frame(), 15))
p2 = ts_rank(decay_linear(correlation(indneutralized_volume, ((self.open * 0.634196)+(self.open * (1 - 0.634196))), 17).to_frame(), 7), 13)
p1=p1.iloc[:,0]
p2=p2.iloc[:,0]
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = -1 * df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#83 ((rank(delay(((high - low) / (sum(close, 5) / 5)), 2)) * rank(rank(volume))) / (((high -low) / (sum(close, 5) / 5)) / (vwap - close)))
def alpha083(self):
alpha = ((rank(delay(((self.high - self.low) / (ts_sum(self.close, 5) / 5)), 2)) * rank(rank(self.volume))) / (((self.high -self.low) / (ts_sum(self.close, 5) / 5)) / (self.vwap - self.close)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#84 SignedPower(Ts_Rank((vwap - ts_max(vwap, 15.3217)), 20.7127), delta(close,4.96796))
def alpha084(self):
alpha = pow(ts_rank((self.vwap - ts_max(self.vwap, 15)), 21), delta(self.close,5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#85 (rank(correlation(((high * 0.876703) + (close * (1 - 0.876703))), adv30,9.61331))^rank(correlation(Ts_Rank(((high + low) / 2), 3.70596), Ts_Rank(volume, 10.1595),7.11408)))
def alpha085(self):
adv30 = sma(self.volume, 30)
alpha = (rank(correlation(((self.high * 0.876703) + (self.close * (1 - 0.876703))), adv30,10)).pow(rank(correlation(ts_rank(((self.high + self.low) / 2), 4), ts_rank(self.volume, 10),7))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#86 ((Ts_Rank(correlation(close, sum(adv20, 14.7444), 6.00049), 20.4195) < rank(((open+ close) - (vwap + open)))) * -1)
def alpha086(self):
adv20 = sma(self.volume, 20)
alpha = ((ts_rank(correlation(self.close, sma(adv20, 15), 6), 20) < rank(((self.open+ self.close) - (self.vwap +self.open)))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#87 (max(rank(decay_linear(delta(((close * 0.369701) + (vwap * (1 - 0.369701))),1.91233), 2.65461)), Ts_Rank(decay_linear(abs(correlation(IndNeutralize(adv81,IndClass.industry), close, 13.4132)), 4.89768), 14.4535)) * -1)
def alpha087(self):
indaverage_adv81 = IndustryAverage_adv(81)
indaverage_adv81 = indaverage_adv81[indaverage_adv81.index.isin(self.available_dates)]
indaverage_adv81 = indaverage_adv81[self.industry]
indaverage_adv81 = indaverage_adv81.reset_index(drop=True)
adv81 = sma(self.volume, 81)
indneutralized_adv81 = adv81 - indaverage_adv81
p1 = rank(decay_linear(delta(((self.close * 0.369701) + (self.vwap * (1 - 0.369701))),2).to_frame(), 3))
p2 = ts_rank(decay_linear(abs(correlation(indneutralized_adv81, self.close, 13)), 5), 14)
p1=p1.iloc[:,0]
p2=p2.iloc[:,0]
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1 * df['max']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#88 min(rank(decay_linear(((rank(open) + rank(low)) - (rank(high) + rank(close))),8.06882)), Ts_Rank(decay_linear(correlation(Ts_Rank(close, 8.44728), Ts_Rank(adv60,20.6966), 8.01266), 6.65053), 2.61957))
def alpha088(self):
adv60 = sma(self.volume, 60)
p1=rank(decay_linear(((rank(self.open) + rank(self.low)) - (rank(self.high) + rank(self.close))).to_frame(),8).CLOSE)
p2=ts_rank(decay_linear(correlation(ts_rank(self.close, 8), ts_rank(adv60,21), 8).to_frame(), 7).CLOSE, 3)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#89 (Ts_Rank(decay_linear(correlation(((low * 0.967285) + (low * (1 - 0.967285))), adv10,6.94279), 5.51607), 3.79744) - Ts_Rank(decay_linear(delta(IndNeutralize(vwap,IndClass.industry), 3.48158), 10.1466), 15.3012))
def alpha089(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
adv10 = sma(self.volume, 10)
alpha = ts_rank(decay_linear(correlation(((self.low * 0.967285) + (self.low * (1 - 0.967285))), adv10, 7), 6), 4) - ts_rank(decay_linear(delta(indneutralized_vwap, 10)), 15)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#90 ((rank((close - ts_max(close, 4.66719)))^Ts_Rank(correlation(IndNeutralize(adv40,IndClass.subindustry), low, 5.38375), 3.21856)) * -1)
def alpha090(self):
indaverage_adv40 = IndustryAverage_adv(40)
indaverage_adv40 = indaverage_adv40[indaverage_adv40.index.isin(self.available_dates)]
indaverage_adv40 = indaverage_adv40[self.industry]
indaverage_adv40 = indaverage_adv40.reset_index(drop=True)
adv40 = sma(self.volume, 40)
indneutralized_adv40 = adv40 - indaverage_adv40
alpha = ((rank((self.close - ts_max(self.close, 5))) ** ts_rank(correlation(indneutralized_adv40, self.low, 5), 3)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#91 ((Ts_Rank(decay_linear(decay_linear(correlation(IndNeutralize(close,IndClass.industry), volume, 9.74928), 16.398), 3.83219), 4.8667) -rank(decay_linear(correlation(vwap, adv30, 4.01303), 2.6809))) * -1)
def alpha091(self):
indaverage_close = IndustryAverage_close()
indaverage_close = indaverage_close[indaverage_close.index.isin(self.available_dates)]
indaverage_close = indaverage_close[self.industry]
indaverage_close = indaverage_close.reset_index(drop=True)
indneutralized_close = self.close - indaverage_close
adv30 = sma(self.volume, 30)
alpha = ((ts_rank(decay_linear(decay_linear(correlation(indneutralized_close, self.volume, 10), 16), 4), 5) -rank(decay_linear(correlation(self.vwap, adv30, 4), 3))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#92 min(Ts_Rank(decay_linear(((((high + low) / 2) + close) < (low + open)), 14.7221),18.8683), Ts_Rank(decay_linear(correlation(rank(low), rank(adv30), 7.58555), 6.94024),6.80584))
def alpha092(self):
adv30 = sma(self.volume, 30)
p1=ts_rank(decay_linear(((((self.high + self.low) / 2) + self.close) < (self.low + self.open)).to_frame(), 15).CLOSE,19)
p2=ts_rank(decay_linear(correlation(rank(self.low), rank(adv30), 8).to_frame(), 7).CLOSE,7)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#93 (Ts_Rank(decay_linear(correlation(IndNeutralize(vwap, IndClass.industry), adv81,17.4193), 19.848), 7.54455) / rank(decay_linear(delta(((close * 0.524434) + (vwap * (1 -0.524434))), 2.77377), 16.2664)))
def alpha093(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
adv81 = sma(self.volume, 81)
alpha = (ts_rank(decay_linear(correlation(indneutralized_vwap, adv81, 17).to_frame(), 20), 8) / rank(decay_linear(delta(((self.close * 0.524434) + (self.vwap * (1 -0.524434))), 3).to_frame(), 16)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#94 ((rank((vwap - ts_min(vwap, 11.5783)))^Ts_Rank(correlation(Ts_Rank(vwap,19.6462), Ts_Rank(adv60, 4.02992), 18.0926), 2.70756)) * -1)
def alpha094(self):
adv60 = sma(self.volume, 60)
alpha = ((rank((self.vwap - ts_min(self.vwap, 12))).pow(ts_rank(correlation(ts_rank(self.vwap,20), ts_rank(adv60, 4), 18), 3)) * -1))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#95 (rank((open - ts_min(open, 12.4105))) < Ts_Rank((rank(correlation(sum(((high + low)/ 2), 19.1351), sum(adv40, 19.1351), 12.8742))^5), 11.7584))
def alpha095(self):
adv40 = sma(self.volume, 40)
alpha = (rank((self.open - ts_min(self.open, 12))) < ts_rank((rank(correlation(sma(((self.high + self.low)/ 2), 19), sma(adv40, 19), 13)).pow(5)), 12))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#96 (max(Ts_Rank(decay_linear(correlation(rank(vwap), rank(volume), 3.83878),4.16783), 8.38151), Ts_Rank(decay_linear(Ts_ArgMax(correlation(Ts_Rank(close, 7.45404),Ts_Rank(adv60, 4.13242), 3.65459), 12.6556), 14.0365), 13.4143)) * -1)
def alpha096(self):
adv60 = sma(self.volume, 60)
p1=ts_rank(decay_linear(correlation(rank(self.vwap), rank(self.volume).to_frame(), 4),4).CLOSE, 8)
p2=ts_rank(decay_linear(ts_argmax(correlation(ts_rank(self.close, 7),ts_rank(adv60, 4), 4), 13).to_frame(), 14).CLOSE, 13)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1*df['max']
#alpha = (max(ts_rank(decay_linear(correlation(rank(self.vwap), rank(self.volume).to_frame(), 4),4).CLOSE, 8), ts_rank(decay_linear(ts_argmax(correlation(ts_rank(self.close, 7),ts_rank(adv60, 4), 4), 13).to_frame(), 14).CLOSE, 13)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#97 ((rank(decay_linear(delta(IndNeutralize(((low * 0.721001) + (vwap * (1 - 0.721001))),IndClass.industry), 3.3705), 20.4523)) - Ts_Rank(decay_linear(Ts_Rank(correlation(Ts_Rank(low,7.87871), Ts_Rank(adv60, 17.255), 4.97547), 18.5925), 15.7152), 6.71659)) * -1)
def alpha097(self):
indaverage_data = IndustryAverage_PreparationForAlpha097()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.low * 0.721001) + (self.vwap * (1 - 0.721001))
indneutralized_data = unindneutralized_data - indaverage_data
adv60 = sma(self.volume, 60)
alpha = ((rank(decay_linear(delta(indneutralized_data, 3).to_frame(), 20)) - ts_rank(decay_linear(ts_rank(correlation(ts_rank(self.low,8), ts_rank(adv60, 17), 5), 19).to_frame(), 16), 7)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#98 (rank(decay_linear(correlation(vwap, sum(adv5, 26.4719), 4.58418), 7.18088)) -rank(decay_linear(Ts_Rank(Ts_ArgMin(correlation(rank(open), rank(adv15), 20.8187), 8.62571),6.95668), 8.07206)))
def alpha098(self):
adv5 = sma(self.volume, 5)
adv15 = sma(self.volume, 15)
alpha = (rank(decay_linear(correlation(self.vwap, sma(adv5, 26), 5).to_frame(), 7).CLOSE) -rank(decay_linear(ts_rank(ts_argmin(correlation(rank(self.open), rank(adv15), 21), 9),7).to_frame(), 8).CLOSE))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#99 ((rank(correlation(sum(((high + low) / 2), 19.8975), sum(adv60, 19.8975), 8.8136)) <rank(correlation(low, volume, 6.28259))) * -1)
def alpha099(self):
adv60 = sma(self.volume, 60)
alpha = ((rank(correlation(ts_sum(((self.high + self.low) / 2), 20), ts_sum(adv60, 20), 9)) <rank(correlation(self.low, self.volume, 6))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#100 (0 - (1 * (((1.5 * scale(indneutralize(indneutralize(rank(((((close - low) - (high -close)) / (high - low)) * volume)), IndClass.subindustry), IndClass.subindustry))) -scale(indneutralize((correlation(close, rank(adv20), 5) - rank(ts_argmin(close, 30))),IndClass.subindustry))) * (volume / adv20))))
def alpha100(self):
indaverage_data_1 = IndustryAverage_PreparationForAlpha100_1()
indaverage_data_1 = indaverage_data_1[indaverage_data_1.index.isin(self.available_dates)]
indaverage_data_1 = indaverage_data_1[self.industry]
indaverage_data_1 = indaverage_data_1.reset_index(drop=True)
unindneutralized_data_1 = rank(((((self.close - self.low) - (self.high - self.close)) / (self.high - self.low)) * self.volume))
indneutralized_data_1 = unindneutralized_data_1 - indaverage_data_1 #there's a problem in calculation here.
indaverage_data_2 = IndustryAverage_PreparationForAlpha100_2()
indaverage_data_2 = indaverage_data_2[indaverage_data_2.index.isin(self.available_dates)]
indaverage_data_2 = indaverage_data_2[self.industry]
indaverage_data_2 = indaverage_data_2.reset_index(drop=True)
adv20 = sma(self.volume, 20)
unindneutralized_data_2 = (correlation(self.close, rank(adv20), 5) - rank(ts_argmin(self.close, 30)))
indneutralized_data_2 = unindneutralized_data_2 - indaverage_data_2
alpha = (0 - (1 * (((1.5 * scale(indneutralized_data_1))-scale(indneutralized_data_2)) * (self.volume / adv20))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#101 ((close - open) / ((high - low) + .001))
def alpha101(self):
alpha = (self.close - self.open) /((self.high - self.low) + 0.001)
return alpha[self.start_date_index:self.end_date_index]
class GTJAalphas(object):
def __init__(self, ts_code="000001.SZ",start_date=20210101,end_date=20211231):
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_indicators_daily_chosen=local_source.get_stock_indicators_daily(cols='TRADE_DATE,TS_CODE,TOTAL_SHARE',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_data_chosen=pd.merge(quotations_daily_chosen,stock_indicators_daily_chosen,on=['TRADE_DATE','TS_CODE'],how="left")
stock_data_chosen["TOTAL_MV"]=stock_data_chosen["TOTAL_SHARE"]*stock_data_chosen["CLOSE"]
stock_data_chosen=stock_data_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
stock_data_chosen["TRADE_DATE"]=stock_data_chosen["TRADE_DATE"].astype(int)
self.open = stock_data_chosen['OPEN']
self.high = stock_data_chosen['HIGH']
self.low = stock_data_chosen['LOW']
self.close = stock_data_chosen['CLOSE']
self.volume = stock_data_chosen['VOL']*100
self.amount = stock_data_chosen['AMOUNT']*1000
self.returns = stock_data_chosen['CHANGE'] / stock_data_chosen['OPEN']
self.vwap = (stock_data_chosen['AMOUNT']*1000)/(stock_data_chosen['VOL']*100+1)
self.cap = stock_data_chosen['TOTAL_MV']
self.industry = local_source.get_stock_list(cols='TS_CODE,INDUSTRY', condition='TS_CODE = '+'"'+ts_code+'"')['INDUSTRY'].iloc[0]
self.available_dates = stock_data_chosen["TRADE_DATE"]
if ts_code[-2:]=='SZ': index_code = "399001.SZ"
else: index_code = "000001.SH"
indices_daily_chosen=local_source.get_indices_daily(cols='TRADE_DATE,INDEX_CODE,OPEN,CLOSE',condition='INDEX_CODE = '+'"'+index_code+'"').sort_values(by="TRADE_DATE", ascending=True)
indices_daily_chosen=indices_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
indices_daily_chosen = pd.merge(stock_data_chosen["TRADE_DATE"], indices_daily_chosen, on=['TRADE_DATE'], how="left")
self.benchmarkindexopen = indices_daily_chosen['OPEN']
self.benchmarkindexclose = indices_daily_chosen['CLOSE']
output_dates = stock_data_chosen[(stock_data_chosen["TRADE_DATE"]>=start_date)*(stock_data_chosen["TRADE_DATE"]<=end_date)]["TRADE_DATE"]
start_available_date = output_dates.iloc[0]
end_available_date = output_dates.iloc[-1]
self.start_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == start_available_date].index[0]
self.end_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == end_available_date].index[0] +1
#Alpha1 (-1 * CORR(RANK(DELTA(LOG(VOLUME), 1)), RANK(((CLOSE - OPEN) / OPEN)), 6))
def GTJAalpha001(self):
alpha = -1 * correlation(rank(delta(np.log(self.volume),1)),rank(((self.close-self.open)/self.open)), 6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha2 (-1 * DELTA((((CLOSE - LOW) - (HIGH - CLOSE)) / (HIGH - LOW)), 1))
def GTJAalpha002(self):
alpha = -1 * delta((((self.close - self.low) - (self.high - self.close)) / (self.high - self.low)), 1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha3 SUM((CLOSE=DELAY(CLOSE,1)?0:CLOSE-(CLOSE>DELAY(CLOSE,1)?MIN(LOW,DELAY(CLOSE,1)):MAX(HIGH,DELAY(CLOSE,1)))),6)
def GTJAalpha003(self):
delay1 = self.close.shift()
condition1 = (self.close > delay1)
inner1_true = np.minimum(self.low, delay1)
inner1_false = np.maximum(self.low, delay1)
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close == delay1)
inner2_true = pd.Series(np.zeros(len(condition2)))
inner2_false = self.close - inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner2, 6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha4 ((((SUM(CLOSE, 8) / 8) + STD(CLOSE, 8)) < (SUM(CLOSE, 2) / 2)) ? (-1 * 1) : (((SUM(CLOSE, 2) / 2) < ((SUM(CLOSE, 8) / 8) - STD(CLOSE, 8))) ? 1 : (((1 < (VOLUME / MEAN(VOLUME,20))) || ((VOLUME / MEAN(VOLUME,20)) == 1)) ? 1 : (-1 * 1))))
def GTJAalpha004(self):
condition1 = ((1 < (self.volume / sma(self.volume,20))) | ((self.volume / sma(self.volume,20)) == 1))
inner1_true = pd.Series(np.ones(len(condition1)))
inner1_false = -1 * pd.Series(np.ones(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = ((ts_sum(self.close, 2) / 2) < ((ts_sum(self.close, 8) / 8) - stddev(self.close, 8)))
inner2_true = -1 * pd.Series(np.ones(len(condition2)))
inner2_false = inner1
alpha = pd.Series(np.where(condition2, inner2_true, inner2_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha5 (-1 * TSMAX(CORR(TSRANK(VOLUME, 5), TSRANK(HIGH, 5), 5), 3))
def GTJAalpha005(self):
alpha = -1 * ts_max(correlation(ts_rank(self.volume,5), ts_rank(self.high,5), 5) ,3)
return alpha[self.start_date_index:self.end_date_index]
#Alpha6 (RANK(SIGN(DELTA((((OPEN * 0.85) + (HIGH * 0.15))), 4)))* -1)
def GTJAalpha006(self):
alpha = rolling_rank(sign(delta((((self.open * 0.85) + (self.high * 0.15))), 4)))* -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha7 ((RANK(MAX((VWAP - CLOSE), 3)) + RANK(MIN((VWAP - CLOSE), 3))) * RANK(DELTA(VOLUME, 3)))
def GTJAalpha007(self):
alpha = (rolling_rank(np.maximum((self.vwap - self.close), 3)) + rolling_rank(np.minimum((self.vwap - self.close), 3))) * rolling_rank(delta(self.volume, 3))
return alpha[self.start_date_index:self.end_date_index]
#Alpha8 RANK(DELTA(((((HIGH + LOW) / 2) * 0.2) + (VWAP * 0.8)), 4) * -1)
def GTJAalpha008(self):
alpha = rolling_rank(delta(((((self.high + self.low) / 2) * 0.2) + (self.vwap * 0.8)), 4) * -1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha9 SMA(((HIGH+LOW)/2-(DELAY(HIGH,1)+DELAY(LOW,1))/2)*(HIGH-LOW)/VOLUME,7,2)
def GTJAalpha009(self):
alpha = ema(((self.high+self.low)/2-(delay(self.high,1)+delay(self.low,1))/2)*(self.high-self.low)/self.volume,7,2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha10 (RANK(MAX(((RET < 0) ? STD(RET, 20) : CLOSE)^2),5))
def GTJAalpha010(self):
condition1 = (self.returns < 0)
inner1_true = stddev(self.returns, 20)
inner1_false = self.close
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
alpha = rolling_rank(np.maximum(inner1**2, 5))
return alpha[self.start_date_index:self.end_date_index]
#Alpha11 SUM(((CLOSE-LOW)-(HIGH-CLOSE))./(HIGH-LOW).*VOLUME,6)
def GTJAalpha011(self):
alpha = ts_sum(((self.close-self.low)-(self.high-self.close))/(self.high-self.low)*self.volume,6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha12 (RANK((OPEN - (SUM(VWAP, 10) / 10)))) * (-1 * (RANK(ABS((CLOSE - VWAP)))))
def GTJAalpha012(self):
alpha = rolling_rank((self.open - (ts_sum(self.vwap, 10) / 10))) * -1 * (rolling_rank(abs((self.close - self.vwap))))
return alpha[self.start_date_index:self.end_date_index]
#Alpha13 (((HIGH * LOW)^0.5) - VWAP)
def GTJAalpha013(self):
alpha = ((self.high * self.low)**0.5) - self.vwap
return alpha[self.start_date_index:self.end_date_index]
#Alpha14 CLOSE-DELAY(CLOSE,5)
def GTJAalpha014(self):
alpha = self.close - delay(self.close,5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha15 OPEN/DELAY(CLOSE,1)-1
def GTJAalpha015(self):
alpha = (self.open/delay(self.close, 1)) -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha16 (-1 * TSMAX(RANK(CORR(RANK(VOLUME), RANK(VWAP), 5)), 5))
def GTJAalpha016(self):
alpha = -1 * ts_max(rolling_rank(correlation(rolling_rank(self.volume), rolling_rank(self.vwap), 5)), 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha17 RANK((VWAP - MAX(VWAP, 15)))^DELTA(CLOSE, 5)
def GTJAalpha17(self):
alpha = rolling_rank((self.vwap - np.maximum(self.vwap, 15)))**delta(self.close, 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha18 CLOSE/DELAY(CLOSE,5)
def GTJAalpha018(self):
alpha = self.close / delay(self.close, 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha19 (CLOSE<DELAY(CLOSE,5)?(CLOSE-DELAY(CLOSE,5))/DELAY(CLOSE,5):(CLOSE=DELAY(CLOSE,5)?0:(CLOSE-DELAY(CLOSE,5))/CLOSE))
def GTJAalpha019(self):
condition1 = (self.close == delay(self.close,5))
inner1_true=pd.Series(np.zeros(len(condition1)))
inner1_false=(self.close-delay(self.close,5))/self.close
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close<delay(self.close,5))
inner2_true = (self.close-delay(self.close,5))/delay(self.close,5)
inner2_false = inner1
alpha = pd.Series(np.where(condition2, inner2_true, inner2_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha20 (CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*100
def GTJAalpha020(self):
alpha = (self.close-delay(self.close,6)) / delay(self.close,6) *100
return alpha[self.start_date_index:self.end_date_index]
#Alpha21 REGBETA(MEAN(CLOSE,6),SEQUENCE(6))
def GTJAalpha021(self): #I'm not sure if I've understood the formula correctly.
y = sma(self.close, 6)
alpha = | pd.Series(np.nan, index=self.close.index) | pandas.Series |
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import numpy as np
def compareCounts(fileList, column):
df = | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from root_pandas import to_root
def generate_data(
size_mc=500000,
size_data=10000,
size_mc_offres=150000,
size_data_offres=8000,
frac_a=0.8):
"""Generate root files to represent data and MC samples to demonstrate
the re-weighting.
Parameters:
size_mc, size_data, size_mc_offres, size_data_offres: number of events
in the corresponding sample.
frac_a: fraction of events in componentA
Return:
data, componentA, componentB, data_offres, componentA_offres:
pd.DataFrames of the generated samples.
"""
frac_b = 1 - frac_a
# GENERATE DATA
print(
"Generating the following dataframes:\n"
"data, componentA, componentB, data_offres and componentA_offres ...")
# Random state for random number generation
rs = np.random.RandomState(seed=1)
# on res
data = pd.DataFrame()
componentA = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from technical_indicators.technical_indicators import trades_to_candles, moving_average, rate_of_change
from technical_indicators.technical_indicators import bollinger_bands, average_true_range, stochastic_oscillator
from technical_indicators.technical_indicators import trix
from db_connection.db_connection import load_data
from matplotlib import pyplot as plt
import numpy as np
from plotly.offline import download_plotlyjs, plot, iplot, init_notebook_mode
import plotly.graph_objs as go
"""
trades = load_data(db_path="../bitso-trades-db/bitso_api.db",
table="bitso_trades",
p_key="tid")
candles = trades_to_candles(trades_data=trades, time_interval="1min")
candles = candles[candles.index < "2021-09-18"]
candles = pd.read_csv("BTC-USD.csv", low_memory=False)
candles.columns = [col.lower() for col in candles.columns]
candles = moving_average(prices=candles, n=10)
candles = moving_average(prices=candles, n=50)
candles = rate_of_change(prices=candles, n=10)
candles = bollinger_bands(prices=candles, n_std=2)
candles = average_true_range(prices=candles, n=50)
candles = stochastic_oscillator(prices=candles)
candles = trix(prices=candles, n=10)
candles["roc_10"].plot()
plt.show()
candles["atr_50"].plot()
plt.show()
candles[["so_k", "so_d"]].plot()
plt.show()
candles["trix_10"].plot()
plt.show()
# CREAMOS LOGÍCA DE ESTRATEGIA
datos['alpha'] = datos['mv20'] - datos['mv160']
datos['alpha'].plot()
datos['alpha_bin'] = datos['alpha'].apply(np.sign)
datos['alpha_bin'].plot()
datos['alpha_bin'].value_counts()
datos['alpha_trade_long'] = ((datos['alpha_bin'] == 1) & (datos['alpha_bin'].shift(1) == -1) &
(datos['alpha_bin'].shift(2) == -1) & (datos['alpha_bin'].shift(3) == -1))
datos['alpha_trade_short'] = ((datos['alpha_bin'] == -1) & (datos['alpha_bin'].shift(1) == 1) &
(datos['alpha_bin'].shift(2) == 1) & (datos['alpha_bin'].shift(3) == 1))
datos['alpha_trade_compra'] = np.where(datos['alpha_trade_long'], datos['mv20'], np.nan)
datos['alpha_trade_venta'] = np.where(datos['alpha_trade_short'], datos['mv20'], np.nan)
columnas = ['price', 'mv20', 'mv160', 'alpha_trade_compra', 'alpha_trade_venta']
fig = go.Figure()
for columna in columnas:
if columna == 'alpha_trade_compra':
fig.add_trace(go.Scatter(x=datos.index, y=datos[columna],
mode='markers',
name=columna,
marker=dict(color='green')))
elif columna == 'alpha_trade_venta':
fig.add_trace(go.Scatter(x=datos.index, y=datos[columna],
mode='markers',
name=columna,
marker=dict(color='red')))
else:
fig.add_trace(go.Scatter(x=datos.index, y=datos[columna],
mode='lines',
name=columna))
fig.update_layout(template='plotly_dark')
plot(fig)
"""
if __name__ == "__main__":
# TODO: Parameters
# freq = None
length_fast = 50
length_slow = 200
# threshold = None
# fee_maker = 0.005
# fee_taker = 0.0065
# TODO: Take into account that moving average is lagged. Make backtesting to find optimal params.
# Read trades data and turn into prices data
candles = pd.read_csv("BTC-USD.csv", low_memory=False, index_col=0)
candles.columns = [col.lower() for col in candles.columns]
# candles = candles.loc[candles.index < "2018-07-01"]
# Strategy
threshold = 0
strategy = | pd.DataFrame(index=candles.index, columns=["close"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 25 16:58:09 2017
@author: dconly
PURPOSE:
Take in multiple CSV files of collision data and combine them
For coordinates, use POINT_X and POINT_Y as defaults; otherwise
use the CHP coordinates (less reliable, but something)
df to numpy array
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.values.html#pandas.DataFrame.values
http://pro.arcgis.com/en/pro-app/arcpy/data-access/numpyarraytotable.htm
"""
import pandas as pd
import arcpy
import re
import os
in_csv_folder = r'I:\Projects\Darren\PPA_V2_GIS\CSV\collision data'
#output as CSV
make_csv = True
out_csv_folder = r'I:\Projects\Darren\PPA_V2_GIS\CSV\collision data'
#output to FGDB
make_fc = False #there's an issue with this, see the make_fc function for details
arcpy.env.workspace = r"I:\Projects\Darren\PPA_V2_GIS\PPA_V2.gdb"
temp_colln_table = "in_memory/temp_collision_table"
colln_xylayer = "colln_xylayer"
collision_fc = "collisions2014_2018"
sr_tims = arcpy.SpatialReference(4326) #4326 = WGS 1984
sr_sacog = arcpy.SpatialReference(2226) #2226 = SACOG NAD 83 CA State Plane Zone 2
#=======================COLUMN INDICATORS============
case_id = 'CASE_ID'
colln_year = 'ACCIDENT_YEAR'
x_tims = 'POINT_X'
y_tims = 'POINT_Y'
x_chp = 'LONGITUDE'
y_chp = 'LATITUDE'
x_final = 'x_final'
y_final = 'y_final'
#============FUNCTIONS==========
def coordCombin(in_csv,folder = in_csv_folder):
in_csv = os.path.join(in_csv_folder,in_csv)
in_df = pd.DataFrame(pd.read_csv(in_csv,sep = ','))
in_df[x_final] = in_df[x_tims]
in_df[y_final] = in_df[y_tims]
# in_df.apply(set_final_cv, axis = 1)
#by default, x_final and y_final will = POINT_X and POINT_Y,
#which are through TIMS geocoding
in_df[x_final] = in_df[x_tims]
in_df[y_final] = in_df[y_tims]
#if x_final == 0 then set x_final = 'X_CHP'
in_df.loc[in_df[x_final] == 0, x_final] = in_df[x_chp]
in_df.loc[pd.isnull(in_df[x_final]), x_final] = in_df[x_chp]
in_df.loc[in_df[y_final] == 0, y_final] = in_df[y_chp]
in_df.loc[pd.isnull(in_df[y_final]), y_final] = in_df[y_chp]
#if CHP coords don't exist, then set final x/y values to zero
in_df.loc[in_df[x_final] == 0, x_final] = 0
in_df.loc[pd.isnull(in_df[x_final]), x_final] = 0
in_df.loc[in_df[y_final] == 0, y_final] = 0
in_df.loc[pd.isnull(in_df[y_final]), y_final] = 0
return in_df
#=============APPEND TABLES TOGETHER==============
def combine_tables(folder = in_csv_folder):
in_csvs = os.listdir(in_csv_folder) # returns all input CSVs as a list
in_csvs = [i for i in in_csvs if re.match(".*.csv",i)]
print('reading ' + in_csvs[0])
final_table = coordCombin(in_csvs[0])
for csv in in_csvs[1:]:
print('reading ' + csv)
final_table = final_table.append(coordCombin(csv))
return final_table
def validation_stats(in_df):
no_coords_yr = in_df[in_df[x_final] == 0] \
.groupby(colln_year).count()[x_final]
coords_yr = in_df[in_df[y_final] != 0] \
.groupby(colln_year).count()[y_final]
div = pd.DataFrame( | pd.concat([no_coords_yr, coords_yr], axis=1) | pandas.concat |
import glob
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
"""
Script to plot gantt charts for data availability
Specific for CETESB stations (MP2.5, MP10 and wind speed)
"""
def gantt_data(path, usecols, var, pos):
"""
Returns a dataframe with data availability info.
Parameters:
path (str): file name
usecols (list-like): columns to be read
var (str): selected variable
pos (int): position in the graph (from bottom to top)
"""
df = | pd.read_csv(path, usecols=usecols) | pandas.read_csv |
from datetime import datetime
import pandas as pd
import pytest
from contessa.rules import NotNullRule
class FakedDatetime(datetime):
@classmethod
def now(cls, **kwargs):
return cls(2018, 9, 12, 12, 0, 0)
@classmethod
def today(cls):
return cls(2018, 9, 12, 12, 0, 0)
@pytest.fixture(scope="session")
def rule():
return NotNullRule("not_null_name", "not_null", "src")
@pytest.fixture(scope="session")
def results():
return | pd.Series([True, True, False, False, True], name="src") | pandas.Series |
import collections
import numpy as np
import pandas as pd
import nltk, string
from nltk import word_tokenize # Convert paragraph in tokens
from sklearn.feature_extraction.text import TfidfVectorizer
nltk.download('punkt')
text_data = | pd.read_csv("Text_Similarity_Dataset.csv") | pandas.read_csv |
# analyze.py
# This file performs analyses on the results of optimization algorithms.
# Author: <NAME>
# Date: June 10, 2016
from __future__ import print_function
import h5py
import numpy as np
import os
import pandas
import seaborn
from coolcookies import makeobjects
from glob import glob
from math import ceil, sqrt
from matplotlib import pyplot
from matplotlib import ticker
from mpl_toolkits.mplot3d import Axes3D
from openpyxl import load_workbook
# Set environment for graphs
colors = ['#49ADA2', '#7797F4', '#C973F4', '#EF6E8B', '#FFAA6C']
markers = ["o", "^", "D", "s"]
figlabel = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
def main():
nop = 3
runs = 20
basefolder = '/Users/gelliebeenz/Documents/Python/ObjectiveMethod/TimeDependent/'
folders = [basefolder + 'GAMMA-PC/', basefolder + 'NSGA-II/',
basefolder + 'MOMA/',
'/Users/gelliebeenz/Documents/Python/ObjectiveMethod/Analysis/']
files = ['ApproximationSet.csv', 'ApproximationSet.csv', 'ApproximationSet.csv']
n = [1000]
batchsize = [100]
levels = ['Cookies1000/']
levellabels = ['1000 Cookies']
methods = ['GAMMA-PC', 'NSGA-II', 'MOMA']
solsindex = ['Level', 'Number of Solutions', 'Method']
analyze_sets(nop, runs, folders, files, n, batchsize, levels, levellabels,
methods, solsindex)
def analyze_sets(nop, runs, folders, files, n, batchsize, levels, levellabels,
methods, solsindex, wpareto=False):
dfsolseries = []
opcat = ['No. of Bins', 'Avg. Initial Bin Heat (W)', 'Max. Time to Move (s)']
seaborn.set(font_scale=1.25)
for lvl in range(len(levels)):
if not glob(folders[-1] + levels[lvl]):
os.mkdir(folders[-1] + levels[lvl])
datacollect = gather_solutions(runs, lvl, n, batchsize, nop, files,
folders, levels, methods, opcat,
wpareto=wpareto)
df_numsols = combineindicators(n[lvl], nop, runs, solsindex, datacollect,
opcat, folders[-1] + levels[lvl],
methods, wpareto=wpareto)
dfsolseries.append(df_numsols)
gettotalindicators(dfsolseries, solsindex, folders[nop])
def gather_solutions(runs, lvl, n, batchsize, nop, files, folders, levels,
methods, opcat, wpareto=False):
# Uncomment makeobjects if you need to check constraints
cookies = makeobjects(n[lvl], batchsize[lvl], folders[0] + levels[lvl] +
'Experiment01/Cookies{0:d}.txt'.format(n[lvl]))
print('Gathering Data')
datacollect = []
for r in range(runs):
runfolds = listfolders(r, lvl, nop, folders, levels)
# checkforviolations(n[lvl], runfolds, cookies, methods)
data = getparetofront(nop, opcat, runfolds, files, methods, r)
datacollect.extend(data)
paretoplot(data, opcat, nop, runfolds[-1], methods, colors)
getbininfo(cookies, nop, runfolds, methods)
if wpareto:
pareto = pandas.read_csv(folders[-2] + files[0], index_col=0)
pareto.columns = opcat
pareto['Method'] = 'Pareto'
pareto['Experiment'] = 1
datacollect.append(pareto)
return datacollect
def getparetofront(nop, opcat, folders, files, methods, r):
# This function creates the Pareto Front dataframe.
data = []
for opal in range(nop):
data.append(pandas.read_csv(folders[opal] + files[opal], index_col=0))
data[opal].columns = opcat
data[opal]['Method'] = methods[opal]
data[opal]['Experiment'] = r + 1
return data
def paretoplot(data, opcat, nop, folder, methods, color_choices, ignore_method=False):
# This function generates a plot of the Pareto Fronts.
# input: number of algorithms to compare, file locations
# output: 3D Pareto Front and 2D Scatter plots
print('Making Pareto Front plots.')
# Plot 0: 3D Pareto Front Plot
# -------------------------------------------------------------------------
seaborn.set_style('whitegrid')
plotname0 = folder + 'ParetoPlot3D'
plot0 = pyplot.figure().gca(projection='3d')
for opal in range(nop):
plot0.scatter(data[opal][opcat[0]], data[opal][opcat[1]],
data[opal][opcat[2]], c=color_choices[opal], label=methods[opal])
plot0.set_xlabel(opcat[0], labelpad=10)
plot0.set_ylabel(opcat[1], labelpad=10)
start, end = plot0.get_ylim()
plot0.yaxis.set_ticks(np.arange(start, end, 100))
plot0.set_zlabel(opcat[2], labelpad=10)
plot0.legend(bbox_to_anchor=(-0.25, -1, 1, 1), ncol=nop)
plot0.view_init(20, 45)
pyplot.savefig(plotname0 +'.pdf', format='pdf', dpi=2000)
pyplot.close()
# Plot 1: 2D Pareto Front Plots
# -------------------------------------------------------------------------
seaborn.set_style('darkgrid')
objectives = [[0, 1], [0, 2], [1, 2]]
for i, j in objectives:
plotname1 = folder + 'ParetoFront' + str(i+1) + str(j+1) + '.eps'
plot1 = pyplot.figure(dpi=2000)
ax1 = plot1.add_subplot(111)
for opal in range(nop):
x = getcolumn(i, opal, data)
y = getcolumn(j, opal, data)
ax1.scatter(x, y, s=40, c=color_choices[opal], marker='o', label=methods[opal])
ax1.set_xlabel(opcat[i])
ax1.set_ylabel(opcat[j])
ax1.legend(loc='upper right', frameon=True)
pyplot.savefig(plotname1, format='eps')
pyplot.close()
plot_scatter_approxset(data, opcat, nop, folder, methods, color_choices,
ignore_method=ignore_method)
def plot_scatter_approxset(data, opcat, nop, folder, methods, color_choices,
ignore_method=False):
# Plot 2: Scatter Matrix Plot
plotname2 = folder + 'ParetoPlot'
if ignore_method:
scat = seaborn.PairGrid(data[0], vars=opcat)
scat = scat.map_diag(pyplot.hist, facecolor=color_choices[0])
scat = scat.map_offdiag(pyplot.scatter, color=color_choices[0], linewidths=1,
edgecolor="w", s=40)
else:
dataset = pandas.concat(data, keys=methods)
scat = seaborn.PairGrid(dataset, vars=opcat, hue='Method',
palette=seaborn.color_palette(color_choices),
hue_kws={"marker": markers[:nop]})
scat = scat.map_diag(pyplot.hist)
scat = scat.map_offdiag(pyplot.scatter, linewidths=1, edgecolor="w", s=40)
# Set the tick labels to be at a 45 degree angle for better fit
for ax in scat.axes.flat:
ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(lambda xax, p: format(int(xax))))
pyplot.setp(ax.get_xticklabels(), rotation=45)
if not ignore_method:
scat.add_legend(title=None, frameon=True)
scat.fig.get_children()[-1].set_bbox_to_anchor((0.995, 0.925, 0, 0))
pyplot.savefig(plotname2 + '.eps', format='eps', dpi=4000)
pyplot.savefig(plotname2 + '.pdf', format='pdf', dpi=4000)
pyplot.close()
def getcolumn(index, opal, data):
# This function figures out which characteristic is being selected.
if index == 0:
column = data[opal]['No. of Bins']
elif index == 1:
column = data[opal]['Avg. Initial Bin Heat (W)']
else:
column = data[opal]['Max. Time to Move (s)']
return column
def combineindicators(n, nop, runs, solsindex, datacollect, opcat, folder,
methods, wpareto=False):
# This function combines the results given by getbinindicators(A, B)
# into a simple histogram.
print('Evaluating Performance.')
df_data = pandas.concat(datacollect)
df_list = []
# Calculate binary-epsilon indicator
df_ibivalues, df_epsilon = iepsilon(nop, runs, df_data, folder, methods)
df_list.append(['Epsilon Values', df_ibivalues])
df_list.append(['Epsilon Stats', df_epsilon])
print(' - epsilon complete')
# Calculate coverage indicator
df_icovers, df_coverage = icoverage(nop, runs, df_data, folder, methods)
df_list.append(['Coverage Values', df_icovers])
df_list.append(['Coverage Stats', df_coverage])
print(' - coverage complete')
# Calculate diversity indicator
df_spread = getspread(nop, runs, df_data, opcat, methods)
makespreadplot(df_spread, folder)
df_numsols = countsols(solsindex, runs, n, methods[:nop], df_data)
df_list.append(['Number Solutions', df_numsols])
df_list.append(['Spread Indicators', df_spread])
print(' - diversity complete')
if wpareto:
df_pareto = pareto_compare(nop, runs, df_data, folder,
methods, opcat)
df_list.append(['Pareto Measures', df_pareto])
print(' - comparison to Pareto Front complete')
makeperformxlsx(df_list, folder)
return df_numsols
def makeperformxlsx(df_list, folder):
# Write data to PerformanceMetrics.xlsx
writer = pandas.ExcelWriter(folder + 'PerformanceMetrics.xlsx')
for f in range(len(df_list)):
df_list[f][1].to_excel(writer, sheet_name=df_list[f][0])
writer.save()
def iepsilon(nop, runs, df_data, folder, methods, wpareto=False):
# This function controls the creation of the individual binary epsilon
# indicators and their combination into overall indicators.
# Reference: Zitzler, 2003
df_ibivalues = makedf_binary(nop, runs, df_data, methods)
epsilonmatrix = makebinarymatrix(nop, runs, df_ibivalues, folder, methods)
# Convert overall matrix into Dataframe
tuples = []
for opal in range(nop):
tuples.append((methods[opal], 'Average'))
tuples.append((methods[opal], 'St. Dev.'))
if wpareto:
tuples.append((methods[nop], 'Average'))
tuples.append((methods[nop], 'St. Dev.'))
indexa = pandas.MultiIndex.from_tuples(tuples, names=['MethodA', ''])
indexb = pandas.Index(methods, name='MethodB')
df_epsilon = pandas.DataFrame(epsilonmatrix, index=indexb, columns=indexa)
return df_ibivalues, df_epsilon
def makedf_binary(nop, runs, df_data, methods):
df_ivalues = emptydf_indicators(nop, runs, methods)
# Calculate binary epsilon indicators
for a in range(nop):
for b in range(a+1, nop):
for ra in range(runs):
data_a = df_data[(df_data['Method'] == methods[a]) &
(df_data['Experiment'] == ra + 1)]
for rb in range(runs):
data_b = df_data[(df_data['Method'] == methods[b]) &
(df_data['Experiment'] == rb + 1)]
# A into B
i_ab = getbinindicators(data_a, data_b)
df_ivalues.set_value((methods[b], str(rb+1)),
(methods[a], str(ra+1)), i_ab)
# B into A
i_ba = getbinindicators(data_b, data_a)
df_ivalues.set_value((methods[a], str(ra+1)),
(methods[b], str(rb+1)), i_ba)
return df_ivalues
def makebinarymatrix(nop, runs, df_ibivalues, folder, methods):
# Open pyplot figure
plotname = folder + 'Epsilon'
figsize = 4 * 2, 3 * 2
seaborn.set_context('paper', font_scale=1.25)
ymin = round(df_ibivalues.values.min() - 0.05, 2)
ymax = round(df_ibivalues.values.max() + 0.05, 2)
fig = pyplot.figure(figsize=figsize)
# Calculate overall matrix
part = 0
binarymatrix = np.ones((nop, 2 * nop))
for a in range(nop):
for b in range(a + 1, nop):
# Find A into B
df1 = gather_ivalues(a, b, runs, runs, df_ibivalues, methods)
binarymatrix[b, 2 * a] = df1.mean().get_value('I_C', 0)
binarymatrix[b, 2 * a + 1] = df1.std().get_value('I_C', 0)
# Find B into A
df2 = gather_ivalues(b, a, runs, runs, df_ibivalues, methods)
binarymatrix[a, 2 * b] = df2.mean().get_value('I_C', 0)
binarymatrix[a, 2 * b + 1] = df2.std().get_value('I_C', 0)
# Plot values
df_avb = pandas.concat([df1, df2])
ax = fig.add_subplot(2, 2, part + 1, ylim=(ymin, ymax))
ax = plotepsilon(ax, nop, df_avb, a, b, part)
part += 1
fig.tight_layout()
pyplot.savefig(plotname + '.eps', format='eps', dpi=2000)
pyplot.savefig(plotname + '.pdf', format='pdf')
pyplot.close()
return binarymatrix
def getbinindicators(opal_a, opal_b):
# This function calculates the binary epsilon-indicators I_eta(A,B)
# and I_eta(B,A) where A is the first method and B is the second method.
eta12 = np.zeros((len(opal_a), len(opal_b)))
for a in range(len(opal_a)):
a_0 = opal_a.get_value(a, 0, takeable=True)
a_1 = opal_a.get_value(a, 1, takeable=True)
a_2 = opal_a.get_value(a, 2, takeable=True)
for b in range(len(opal_b)):
ratios = np.array([(a_0 / (opal_b.get_value(b, 0, takeable=True))),
(a_1 / (opal_b.get_value(b, 1, takeable=True))),
(a_2 / (opal_b.get_value(b, 2, takeable=True)))])
eta12[a, b] = np.amax(ratios)
eta2 = np.amin(eta12, axis=0)
i_ab = np.amax(eta2)
return i_ab
def plotepsilon(ax, nop, df_avb, a, b, part):
# This function plots subplot #part
plotcolors = [colors[a], colors[b], colors[nop]]
seaborn.boxplot(x='Comparison', y='I_C', data=df_avb, ax=ax,
palette=plotcolors)
ax.set_xlabel(figlabel[part])
if part % 2 == 0:
ax.set_ylabel('$I_\epsilon(A,B)$', labelpad=10)
else:
ax.set_ylabel("")
ax.set_yticklabels("")
return ax
def icoverage(nop, runs, df_data, folder, methods, wpareto=False):
# This module calculates the coverage indicator
df_cover = makedf_cover(nop, runs, df_data, methods, wpareto=wpareto)
coveragematrix = makecoveragematrix(nop, runs, df_cover, folder, methods)
# Convert overall matrix into Dataframe
tuples = []
for opal in range(nop):
tuples.append((methods[opal], 'Average'))
tuples.append((methods[opal], 'St. Dev.'))
if wpareto:
tuples.append((methods[nop], 'Average'))
tuples.append((methods[nop], 'St. Dev.'))
indexa = pandas.MultiIndex.from_tuples(tuples, names=['MethodA', ''])
indexb = pandas.Index(methods, name='MethodB')
df_comcover = pandas.DataFrame(coveragematrix, index=indexb, columns=indexa)
return df_cover, df_comcover
def makedf_cover(nop, runs, df_data, methods, wpareto=False):
df_cover = emptydf_indicators(nop, runs, methods, with_pareto=wpareto)
# Calculate coverage indicators
for a in range(nop):
for b in range(a+1, nop):
for ra in range(runs):
data_a = df_data[(df_data['Method'] == methods[a]) &
(df_data['Experiment'] == ra + 1)]
for rb in range(runs):
data_b = df_data[(df_data['Method'] == methods[b]) &
(df_data['Experiment'] == rb + 1)]
# A into B
i_covab = calc_coverage(data_a, data_b)
df_cover.set_value((methods[b], str(rb+1)),
(methods[a], str(ra+1)), i_covab)
# B into A
i_covba = calc_coverage(data_b, data_a)
df_cover.set_value((methods[a], str(ra+1)),
(methods[b], str(rb+1)), i_covba)
if wpareto:
data_pareto = df_data[df_data['Method'] == methods[nop]]
for a in range(nop):
for ra in range(runs):
data_a = df_data[(df_data['Method'] == methods[a]) &
(df_data['Experiment'] == ra + 1)]
# Pareto into A
i_covpa = calc_coverage(data_pareto, data_a)
df_cover.set_value((methods[a], str(ra + 1)),
(methods[nop], str(1)), i_covpa)
# A into Pareto
i_covap = calc_coverage(data_a, data_pareto)
df_cover.set_value((methods[nop], str(1)),
(methods[a], str(ra + 1)), i_covap)
return df_cover
def makecoveragematrix(nop, runs, df_cover, folder, methods):
# Open pyplot figure
plotname = folder + 'Coverage'
figsize = 4 * 2, 3 * 2
seaborn.set_context('paper', font_scale=1.25)
ymin = max(round(df_cover.values.min() - 0.05, 2), 0.0)
fig = pyplot.figure(figsize=figsize)
# Calculate overall matrix
part = 0
coveragematrix = np.ones((nop, 2 * nop))
for a in range(nop):
for b in range(a + 1, nop):
# Find A into B
df1 = gather_ivalues(a, b, runs, runs, df_cover, methods)
coveragematrix[b, 2 * a] = df1.mean().get_value('I_C', 0)
coveragematrix[b, 2 * a + 1] = df1.std().get_value('I_C', 0)
# Find B into A
df2 = gather_ivalues(b, a, runs, runs, df_cover, methods)
coveragematrix[a, 2 * b] = df2.mean().get_value('I_C', 0)
coveragematrix[a, 2 * b + 1] = df2.std().get_value('I_C', 0)
# Plot values
df_avb = pandas.concat([df1, df2])
ax = fig.add_subplot(2, 2, part + 1, ylim=(ymin, 1.0))
ax = plotcoverage(ax, nop, df_avb, a, b, part)
part += 1
fig.tight_layout()
pyplot.savefig(plotname + '.eps', format='eps', dpi=2000)
pyplot.savefig(plotname + '.pdf', format='pdf')
pyplot.close()
return coveragematrix
def gather_ivalues(a, b, ra, rb, df_cover, methods):
# This function filters df_cover for I_C values of algorithm A into B
filter_metha = [(methods[a], str(r + 1)) for r in range(ra)]
newshape = int(ra * rb)
array_atob = df_cover.xs(methods[b], level='MethodB').as_matrix(columns=filter_metha)
ic_values = np.reshape(array_atob, newshape)
df = pandas.DataFrame(ic_values)
df.columns = ['I_C']
df['Comparison'] = '({0},{1})'.format(methods[a], methods[b])
return df
def calc_coverage(data_a, data_b):
# calculate coverage indicator of set a to set b
n_bfront = len(data_b) # fraction denominator
n_bcovered = 0 # fraction numerator
for mb in range(n_bfront):
v = np.matrix([data_b.get_value(mb, 0, takeable=True),
data_b.get_value(mb, 1, takeable=True),
data_b.get_value(mb, 2, takeable=True)])
v_covered = check_covered(data_a, v)
if v_covered:
n_bcovered += 1
if data_a['Method'].any() == 'NewMethod':
if not v_covered:
print(v)
i_covab = float(n_bcovered) / n_bfront
return i_covab
def check_covered(data_a, v):
# This function determines if any solution in set A covers vector v
# belonging to set B
for ma in range(len(data_a)):
u = np.matrix([data_a.get_value(ma, 0, takeable=True),
data_a.get_value(ma, 1, takeable=True),
data_a.get_value(ma, 2, takeable=True)])
if np.all(np.equal(u, v)) or dom(u, v):
return True
# If the solution made it all the way through set A, not covered
return False
def plotcoverage(ax, nop, df_avb, a, b, part):
# This function plots subplot #part
plotcolors = [colors[a], colors[b], colors[nop]]
seaborn.boxplot(x='Comparison', y='I_C', data=df_avb, ax=ax,
palette=plotcolors)
ax.set_xlabel(figlabel[part])
if part % 2 == 0:
ax.set_ylabel('$I_C(A,B)$', labelpad=10)
else:
ax.set_ylabel("")
ax.set_yticklabels("")
return ax
def getcoverbetter(a, b, covers, methods):
better = []
for i in range(len(covers)):
if coverbetter(a, b, covers[i]):
better.append(methods[a])
elif coverbetter(b, a, covers[i]):
better.append(methods[b])
else:
better.append('Neither')
return better
def coverbetter(a, b, covermatrix):
# Interpretation function from Zitzler (2003) for coverage indicator
aintob = covermatrix[b, a]
bintoa = covermatrix[a, b]
return aintob == 1 and bintoa < 1
def emptydf_indicators(nop, runs, methods, with_pareto=False):
# Create multilevel indices & empty dataframe
tuples = []
for a in range(nop):
for r in range(runs):
tuples.append((methods[a], str(r + 1)))
if with_pareto:
tuples.append((methods[nop], str(1)))
indexa = pandas.MultiIndex.from_tuples(tuples, names=['MethodA', 'Experiment'])
indexb = pandas.MultiIndex.from_tuples(tuples, names=['MethodB', 'Experiment'])
df_indicators = pandas.DataFrame(np.ones((len(indexb), len(indexa))),
index=indexb, columns=indexa)
return df_indicators
def gettotalindicators(dfsolseries, solsindex, folder):
# This function plots the number of solutions found for all levels.
df_sols = pandas.concat(dfsolseries, axis=0)
plt = seaborn.barplot(x=solsindex[0], y=solsindex[1], hue=solsindex[2], data=df_sols,
palette=seaborn.color_palette(colors), capsize=.2)
plt.set(xlabel='Experiment Level', ylabel='Avg. # of Approximate Solutions Found')
pyplot.savefig(folder + 'RangeofSolutions.eps', format='eps', dpi=2000)
def makeietaplots(a, b, ietas, folder, methods):
# This function plots the distribution of I_etas of A into B
plotname = folder + 'Indicator_' + methods[a] + '_into_' + methods[b] + '.eps'
distab = np.zeros(len(ietas))
for i in range(len(ietas)):
distab[i] = ietas[i][b][a]
seaborn.set_context('talk', font_scale=1.5)
pyplot.figure(figsize=(8, 6))
plot = seaborn.distplot(distab, kde=False, color=colors[a])
plot.axes.set_title('({0},{1})'.format(methods[a], methods[b]), fontsize=28)
start, end = plot.get_xlim()
newend = round((end + 0.05), 1)
plot.set_xlim(2 - newend, newend)
pyplot.savefig(plotname, format='eps', dpi=2000)
pyplot.close()
def plotetabetter(nop, better, a, b, xlabel, folder, methods):
plotname = folder + 'Ieta_' + methods[a] + 'vs' + methods[b] + '.eps'
plotcolors = [colors[a], colors[b], colors[nop]]
metha = 0
methb = 0
nope = 0
for eta in better:
if eta is methods[a]:
metha += 1
elif eta is methods[b]:
methb += 1
else:
nope += 1
betterdict = {'Method': [methods[a], methods[b], 'Neither'],
'Number of Experiments': [metha, methb, nope]}
bdf = pandas.DataFrame(betterdict)
pyplot.figure()
seaborn.set_context('talk', font_scale=1.5)
betplot = seaborn.barplot('Method', 'Number of Experiments', data=bdf,
ci=None, palette=plotcolors)
betplot.set_xlabel(xlabel)
betplot.set_ylabel('No. Better by $I_\epsilon(A,B)$', labelpad=10)
betplot.set(ylim=(0, len(better)))
pyplot.savefig(plotname, format='eps', dpi=2000)
def getetabetter(a, b, ietas, methods):
better = []
for i in range(len(ietas)):
if etabetter(a, b, ietas[i]):
better.append(methods[a])
elif etabetter(b, a, ietas[i]):
better.append(methods[b])
else:
better.append('Neither')
return better
def etabetter(a, b, ieta):
# Interpretation function from Zitzler (2003) for Binary Indicator
aintob = ieta[b][a]
bintoa = ieta[a][b]
return aintob <= 1 and bintoa > 1
def getspread(nop, runs, df_data, opcat, methods, wpareto=False):
# This module finds the spread indicator values for each algorithm
# and then normalizes them to the largest value.
# Calculate the spread of each front individually:
spreadvals = np.zeros((runs, nop))
for r in range(runs):
for opal in range(nop):
approxset = df_data[(df_data['Method'] == methods[opal]) &
(df_data['Experiment'] == r + 1)]
spreadvals[r, opal] = calcspread(approxset, opcat)
normfactor = np.max(spreadvals)
if wpareto:
data_pareto = df_data[df_data['Method'] == methods[nop]]
pareto_spread = calcspread(data_pareto, opcat)
normfactor = max(normfactor, pareto_spread)
spread = spreadvals / normfactor
# Combine spread indicators
spreadindicators = []
for r in range(runs):
for opal in range(nop):
spreadindicators.append([r + 1, spread[r, opal], methods[opal]])
df_spread = pandas.DataFrame(spreadindicators)
df_spread.columns = ['Run', 'Spread', 'Method']
return df_spread
def calcspread(opaldata, opcat):
# This module calculates the spread indicator from an approximate set
dsum = 0
for o in range(len(opcat)):
maxval = opaldata.nlargest(1, opcat[o]).get_value(0, o, takeable=True)
minval = opaldata.nsmallest(1, opcat[o]).get_value(0, o, takeable=True)
dsum += (maxval - minval)**2
dspread = sqrt(dsum)
return dspread
def makespreadplot(df_spread, folder):
# This module plots the spread indicators
# x-axis: Algorithm
# y-axis: Spread value
kws = dict(s=70, linewidth=1, edgecolor="w")
plotname = folder + 'SpreadIndicators'
seaborn.set_context('paper', font_scale=1.25)
plot = seaborn.boxplot(x='Method', y='Spread', data=df_spread,
palette=seaborn.color_palette(colors))
plot.set(ylabel='Maximum Spread Indicator (Normalized)')
# plot.set(yscale='log')
plot.set(ylim=(0, 1))
pyplot.savefig(plotname + '.eps', format='eps')
pyplot.savefig(plotname + '.pdf', format='pdf')
pyplot.close()
def getdeltas(nop, data, opcat):
# This function calculates the delta values for the Deb diversity indicator
deltas = np.zeros(nop)
for opal in range(nop):
front0 = data[opal].sort_values(opcat[2])
dis = np.zeros(len(front0) - 1)
for m in range(len(front0) - 1):
a = np.zeros(3)
b = np.zeros(3)
for f in range(3):
a[f] = front0.get_value(m, f, takeable=True)
b[f] = front0.get_value(m + 1, f, takeable=True)
dis[m] = np.linalg.norm(a - b)
davg = np.average(dis)
sumd = 0
for m in range(len(front0) - 1):
sumd += abs(dis[m] - davg)
deltas[opal] = sumd / len(front0)
deltadiffs = np.identity(nop)
for op1 in range(nop):
for op2 in range(nop):
if op1 != op2:
deltadiffs[op1, op2] = deltas[op1] / deltas[op2]
return deltas, deltadiffs
def makedeltaplots(nop, deltas, folder, methods):
# This function plots the differences in deltas for each experimental level
for a in range(nop):
for b in range(nop):
if a != b:
plotname = folder + 'DeltaDifferentials_' + methods[a] + '_into_' + methods[b] + '.eps'
distab = np.zeros(len(deltas))
for i in range(len(deltas)):
distab[i] = deltas[i][a, b]
seaborn.set_context('talk', font_scale=1.5)
pyplot.figure(figsize=(8, 6))
plot = seaborn.distplot(distab, kde=False, color=colors[a])
plot.axes.set_title('({0},{1})'.format(methods[a], methods[b]), fontsize=28)
start, end = plot.get_xlim()
if abs(end-1) > abs(1-start):
newend = round((end + 0.05), 1)
plot.set_xlim(2 - newend, newend)
else:
newstart = round((start - 0.05), 1)
plot.set_xlim(newstart, 2 - newstart)
pyplot.savefig(plotname, format='eps', dpi=2000)
pyplot.close()
def countsols(solsindex, runs, levellabel, methods, df_data):
# This function counts the number of solution generated by each algorithm
# in each experiment.
count_exp = df_data.groupby(['Experiment', 'Method']).count()
solsdiversity = []
for r in range(runs):
for opal in range(len(methods)):
mthd = methods[opal]
nsols = count_exp.get_value((r+1, mthd), 'No. of Bins')
solsdiversity.append([levellabel, nsols, mthd])
df_numsols = | pandas.DataFrame(solsdiversity) | pandas.DataFrame |
""" Tests for dataframe module
"""
# pylint: disable=missing-function-docstring,missing-class-docstring
# pylint: disable=invalid-name,no-self-use
import unittest
import pandas as pd
import numpy as np
from data_science_tools import dataframe
from data_science_tools.dataframe import (
coalesce,
merge_on_index,
)
class TestCoalesce(unittest.TestCase):
def test_coalesce(self):
series = [
pd.Series([np.nan, 1, np.nan, np.nan, 1]),
pd.Series([np.nan, 2, np.nan, 2, 2]),
pd.Series([np.nan, np.nan, 3, 3, 3]),
]
expected = pd.Series([np.nan, 1, 3, 2, 1])
actual = coalesce(series)
np.testing.assert_array_equal(actual.values, expected.values)
def test_coalesce_df(self):
df = pd.DataFrame(
{
0: pd.Series([np.nan, 1, np.nan, np.nan, 1]),
1: pd.Series([np.nan, 2, np.nan, 2, 2]),
2: pd.Series([np.nan, np.nan, 3, 3, 3]),
}
)
expected = pd.Series([np.nan, 1, 3, 2, 1])
actual = coalesce([df[c] for c in df])
np.testing.assert_array_equal(actual.values, expected.values)
def test_coalesce_df_multiple_columns(self):
df = pd.DataFrame(
{
0: pd.Series([np.nan, 1, np.nan, np.nan, 1]),
1: pd.Series([np.nan, 2, np.nan, 2, 2]),
2: pd.Series([np.nan, np.nan, 3, 3, 3]),
}
)
# using the column names broke when multiples with same name.
df.columns = [0, 0, 0]
expected = pd.Series([np.nan, 1, 3, 2, 1])
actual = coalesce(df)
np.testing.assert_array_equal(actual.values, expected.values)
class TestWindowFunction(unittest.TestCase):
"""Test window_functions"""
def _generate_example(self, size=10):
df_test = pd.DataFrame()
df_test["name"] = pd.np.random.choice(["tom", "bob"], size)
df_test["height"] = pd.np.random.randint(45, 60, size)
return df_test
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
self.df_example_1 = pd.DataFrame(
[
("bob", 45),
("bob", 58),
("tom", 46),
("bob", 55),
("tom", 53),
("bob", 54),
("bob", 45),
("tom", 55),
("bob", 53),
("bob", 51),
],
columns=["name", "height"],
)
self.df_example_1.index += 10
self.df_example_2 = pd.DataFrame(
[
("bob", "smith", 45),
("bob", "jones", 50),
("tom", "smith", 53),
("bob", "jones", 50),
("bob", "jones", 58),
("tom", "jones", 47),
("bob", "smith", 54),
("bob", "jones", 48),
("tom", "smith", 59),
("tom", "smith", 49),
],
columns=["first_name", "last_name", "height"],
)
# MultiIndex
self.df_example_3 = self.df_example_2.copy()
self.df_example_3.index = pd.MultiIndex.from_tuples(
[
("developer", 30),
("developer", 31),
("developer", 32),
("developer", 33),
("programmer", 40),
("programmer", 41),
("programmer", 42),
("programmer", 43),
("programmer", 44),
("programmer", 45),
],
names=["occupation", "age"],
)
def _apply_example_1_height_mean(self, df):
return df["height"].mean()
def test_apply_full_range(self):
results = dataframe.window_function(
self.df_example_1,
self._apply_example_1_height_mean,
preceding=None,
following=None,
)
answer = pd.Series(51.5, index=self.df_example_1.index)
pd.testing.assert_series_equal(answer, results)
def test_apply_current(self):
results = dataframe.window_function(
self.df_example_1,
self._apply_example_1_height_mean,
)
answer = self.df_example_1["height"].astype(float)
answer.name = None
pd.testing.assert_series_equal(answer, results)
def test_apply_row_number(self):
results = dataframe.window_function(
self.df_example_1,
"row_number",
order_by="height",
)
answer = pd.Series(
[1, 10, 3, 8, 5, 7, 2, 9, 6, 4],
index=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
)
# TO DO fix result index
results.sort_index(inplace=True)
pd.testing.assert_series_equal(answer, results)
def test_apply_series_method(self):
example_data = pd.DataFrame()
example_data["column1"] = [2, 4, 6, 8, 10, 12]
results = dataframe.window_function(
example_data,
"mean",
"column1",
)
answer = example_data["column1"].astype(float)
answer.name = None
| pd.testing.assert_series_equal(answer, results) | pandas.testing.assert_series_equal |
"""
A collection of Algos used to create Strategy logic.
"""
from __future__ import division
import abc
import random
import re
import numpy as np
import pandas as pd
import sklearn.covariance
from future.utils import iteritems
import bt
from bt.core import Algo, AlgoStack, SecurityBase, is_zero
def run_always(f):
"""
Run always decorator to be used with Algo
to ensure stack runs the decorated Algo
on each pass, regardless of failures in the stack.
"""
f.run_always = True
return f
class PrintDate(Algo):
"""
This Algo simply print's the current date.
Can be useful for debugging purposes.
"""
def __call__(self, target):
print(target.now)
return True
class PrintTempData(Algo):
"""
This Algo prints the temp data.
Useful for debugging.
Args:
* fmt_string (str): A string that will later be formatted with the
target's temp dict. Therefore, you should provide
what you want to examine within curly braces ( { } )
"""
def __init__(self, fmt_string=None):
super(PrintTempData, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
if self.fmt_string:
print(self.fmt_string.format(**target.temp))
else:
print(target.temp)
return True
class PrintInfo(Algo):
"""
Prints out info associated with the target strategy. Useful for debugging
purposes.
Args:
* fmt_string (str): A string that will later be formatted with the
target object's __dict__ attribute. Therefore, you should provide
what you want to examine within curly braces ( { } )
Ex:
PrintInfo('Strategy {name} : {now}')
This will print out the name and the date (now) on each call.
Basically, you provide a string that will be formatted with target.__dict__
"""
def __init__(self, fmt_string="{name} {now}"):
super(PrintInfo, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
print(self.fmt_string.format(**target.__dict__))
return True
class Debug(Algo):
"""
Utility Algo that calls pdb.set_trace when triggered.
In the debug session, 'target' is available and can be examined through the
StrategyBase interface.
"""
def __call__(self, target):
import pdb
pdb.set_trace()
return True
class RunOnce(Algo):
"""
Returns True on first run then returns False.
Args:
* run_on_first_call: bool which determines if it runs the first time the algo is called
As the name says, the algo only runs once. Useful in situations
where we want to run the logic once (buy and hold for example).
"""
def __init__(self):
super(RunOnce, self).__init__()
self.has_run = False
def __call__(self, target):
# if it hasn't run then we will
# run it and set flag
if not self.has_run:
self.has_run = True
return True
# return false to stop future execution
return False
class RunPeriod(Algo):
def __init__(
self, run_on_first_date=True, run_on_end_of_period=False, run_on_last_date=False
):
super(RunPeriod, self).__init__()
self._run_on_first_date = run_on_first_date
self._run_on_end_of_period = run_on_end_of_period
self._run_on_last_date = run_on_last_date
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
# not a known date in our universe
if now not in target.data.index:
return False
# get index of the current date
index = target.data.index.get_loc(target.now)
result = False
# index 0 is a date added by the Backtest Constructor
if index == 0:
return False
# first date
if index == 1:
if self._run_on_first_date:
result = True
# last date
elif index == (len(target.data.index) - 1):
if self._run_on_last_date:
result = True
else:
# create pandas.Timestamp for useful .week,.quarter properties
now = pd.Timestamp(now)
index_offset = -1
if self._run_on_end_of_period:
index_offset = 1
date_to_compare = target.data.index[index + index_offset]
date_to_compare = pd.Timestamp(date_to_compare)
result = self.compare_dates(now, date_to_compare)
return result
@abc.abstractmethod
def compare_dates(self, now, date_to_compare):
raise (NotImplementedError("RunPeriod Algo is an abstract class!"))
class RunDaily(RunPeriod):
"""
Returns True on day change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's day has changed
compared to the last(or next if run_on_end_of_period) date, if not returns False.
Useful for daily rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.date() != date_to_compare.date():
return True
return False
class RunWeekly(RunPeriod):
"""
Returns True on week change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's week has changed
since relative to the last(or next) date, if not returns False. Useful for
weekly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.week != date_to_compare.week:
return True
return False
class RunMonthly(RunPeriod):
"""
Returns True on month change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's month has changed
since relative to the last(or next) date, if not returns False. Useful for
monthly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.month != date_to_compare.month:
return True
return False
class RunQuarterly(RunPeriod):
"""
Returns True on quarter change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's quarter has changed
since relative to the last(or next) date, if not returns False. Useful for
quarterly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.quarter != date_to_compare.quarter:
return True
return False
class RunYearly(RunPeriod):
"""
Returns True on year change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's year has changed
since relative to the last(or next) date, if not returns False. Useful for
yearly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year:
return True
return False
class RunOnDate(Algo):
"""
Returns True on a specific set of dates.
Args:
* dates (list): List of dates to run Algo on.
"""
def __init__(self, *dates):
"""
Args:
* dates (*args): A list of dates. Dates will be parsed
by pandas.to_datetime so pass anything that it can
parse. Typically, you will pass a string 'yyyy-mm-dd'.
"""
super(RunOnDate, self).__init__()
# parse dates and save
self.dates = [pd.to_datetime(d) for d in dates]
def __call__(self, target):
return target.now in self.dates
class RunAfterDate(Algo):
"""
Returns True after a date has passed
Args:
* date: Date after which to start trading
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, date):
"""
Args:
* date: Date after which to start trading
"""
super(RunAfterDate, self).__init__()
# parse dates and save
self.date = pd.to_datetime(date)
def __call__(self, target):
return target.now > self.date
class RunAfterDays(Algo):
"""
Returns True after a specific number of 'warmup' trading days have passed
Args:
* days (int): Number of trading days to wait before starting
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, days):
"""
Args:
* days (int): Number of trading days to wait before starting
"""
super(RunAfterDays, self).__init__()
self.days = days
def __call__(self, target):
if self.days > 0:
self.days -= 1
return False
return True
class RunIfOutOfBounds(Algo):
"""
This algo returns true if any of the target weights deviate by an amount greater
than tolerance. For example, it will be run if the tolerance is set to 0.5 and
a security grows from a target weight of 0.2 to greater than 0.3.
A strategy where rebalancing is performed quarterly or whenever any
security's weight deviates by more than 20% could be implemented by:
Or([runQuarterlyAlgo,runIfOutOfBoundsAlgo(0.2)])
Args:
* tolerance (float): Allowed deviation of each security weight.
Requires:
* Weights
"""
def __init__(self, tolerance):
self.tolerance = float(tolerance)
super(RunIfOutOfBounds, self).__init__()
def __call__(self, target):
if "weights" not in target.temp:
return True
targets = target.temp["weights"]
for cname in target.children:
if cname in targets:
c = target.children[cname]
deviation = abs((c.weight - targets[cname]) / targets[cname])
if deviation > self.tolerance:
return True
if "cash" in target.temp:
cash_deviation = abs(
(target.capital - targets.value) / targets.value - target.temp["cash"]
)
if cash_deviation > self.tolerance:
return True
return False
class RunEveryNPeriods(Algo):
"""
This algo runs every n periods.
Args:
* n (int): Run each n periods
* offset (int): Applies to the first run. If 0, this algo will run the
first time it is called.
This Algo can be useful for the following type of strategy:
Each month, select the top 5 performers. Hold them for 3 months.
You could then create 3 strategies with different offsets and create a
master strategy that would allocate equal amounts of capital to each.
"""
def __init__(self, n, offset=0):
super(RunEveryNPeriods, self).__init__()
self.n = n
self.offset = offset
self.idx = n - offset - 1
self.lcall = 0
def __call__(self, target):
# ignore multiple calls on same period
if self.lcall == target.now:
return False
else:
self.lcall = target.now
# run when idx == (n-1)
if self.idx == (self.n - 1):
self.idx = 0
return True
else:
self.idx += 1
return False
class SelectAll(Algo):
"""
Sets temp['selected'] with all securities (based on universe).
Selects all the securities and saves them in temp['selected'].
By default, SelectAll does not include securities that have no
data (nan) on current date or those whose price is zero or negative.
Args:
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, include_no_data=False, include_negative=False):
super(SelectAll, self).__init__()
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if self.include_no_data:
target.temp["selected"] = target.universe.columns
else:
universe = target.universe.loc[target.now].dropna()
if self.include_negative:
target.temp["selected"] = list(universe.index)
else:
target.temp["selected"] = list(universe[universe > 0].index)
return True
class SelectThese(Algo):
"""
Sets temp['selected'] with a set list of tickers.
Args:
* ticker (list): List of tickers to select.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, tickers, include_no_data=False, include_negative=False):
super(SelectThese, self).__init__()
self.tickers = tickers
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if self.include_no_data:
target.temp["selected"] = self.tickers
else:
universe = target.universe.loc[target.now, self.tickers].dropna()
if self.include_negative:
target.temp["selected"] = list(universe.index)
else:
target.temp["selected"] = list(universe[universe > 0].index)
return True
class SelectHasData(Algo):
"""
Sets temp['selected'] based on all items in universe that meet
data requirements.
This is a more advanced version of SelectAll. Useful for selecting
tickers that need a certain amount of data for future algos to run
properly.
For example, if we need the items with 3 months of data or more,
we could use this Algo with a lookback period of 3 months.
When providing a lookback period, it is also wise to provide a min_count.
This is basically the number of data points needed within the lookback
period for a series to be considered valid. For example, in our 3 month
lookback above, we might want to specify the min_count as being
57 -> a typical trading month has give or take 20 trading days. If we
factor in some holidays, we can use 57 or 58. It's really up to you.
If you don't specify min_count, min_count will default to ffn's
get_num_days_required.
Args:
* lookback (DateOffset): A DateOffset that determines the lookback
period.
* min_count (int): Minimum number of days required for a series to be
considered valid. If not provided, ffn's get_num_days_required is
used to estimate the number of points required.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(
self,
lookback= | pd.DateOffset(months=3) | pandas.DateOffset |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 14:08:43 2019
to produce X and y use combine_pos_neg_from_nc_file or
prepare_X_y_for_holdout_test
@author: ziskin
"""
from PW_paths import savefig_path
from PW_paths import work_yuval
from pathlib import Path
cwd = Path().cwd()
hydro_path = work_yuval / 'hydro'
axis_path = work_yuval/'axis'
gis_path = work_yuval / 'gis'
ims_path = work_yuval / 'IMS_T'
hydro_ml_path = hydro_path / 'hydro_ML'
gnss_path = work_yuval / 'GNSS_stations'
# 'tela': 17135
hydro_pw_dict = {'nizn': 25191, 'klhv': 21105, 'yrcm': 55165,
'ramo': 56140, 'drag': 48125, 'dsea': 48192,
'spir': 56150, 'nrif': 60105, 'elat': 60190
}
hydro_st_name_dict = {25191: 'Lavan - new nizana road',
21105: 'Shikma - Tel milcha',
55165: 'Mamsheet',
56140: 'Ramon',
48125: 'Draga',
48192: 'Chiemar - down the cliff',
46150: 'Nekrot - Top',
60105: 'Yaelon - Kibutz Yahel',
60190: 'Solomon - Eilat'}
best_hp_models_dict = {'SVC': {'kernel': 'rbf', 'C': 1.0, 'gamma': 0.02,
'coef0': 0.0, 'degree': 1},
'RF': {'max_depth': 5, 'max_features': 'auto',
'min_samples_leaf': 1, 'min_samples_split': 2,
'n_estimators': 400},
'MLP': {'alpha': 0.1, 'activation': 'relu',
'hidden_layer_sizes': (10,10,10), 'learning_rate': 'constant',
'solver': 'lbfgs'}}
scorer_order = ['precision', 'recall', 'f1', 'accuracy', 'tss', 'hss']
tsafit_dict = {'lat': 30.985556, 'lon': 35.263056,
'alt': -35.75, 'dt_utc': '2018-04-26T10:15:00'}
axis_southern_stations = ['Dimo', 'Ohad', 'Ddse', 'Yotv', 'Elat', 'Raha', 'Yaha']
soi_axis_dict = {'yrcm': 'Dimo',
'slom': 'Ohad',
'dsea': 'Ddse',
'nrif': 'Yotv',
'elat': 'Elat',
'klhv': 'Raha',
'spir': 'Yaha'}
def plot_mean_abs_shap_values_features(SV, fix_xticklabels=True):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
features = ['pwv', 'pressure', 'DOY']
# sns.set_palette('Dark2', 6)
sns.set_theme(style='ticks', font_scale=1.5)
# sns.set_style('whitegrid')
# sns.set_style('ticks')
sv = np.abs(SV).mean('sample').sel(clas=0).reset_coords(drop=True)
gr_spec = [20, 20, 1]
fig, axes = plt.subplots(1, 3, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(features):
fe = [x for x in sv['feature'].values if f in x]
dsf = sv.sel(feature=fe).reset_coords(drop=True).to_dataframe()
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8, color='k', alpha=0.8)
axes[i].set_title(title)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
# axes[i].legend(handles=handles, labels=labels, prop={'size': fontsize-3}, loc='upper center')
axes[i].set_ylabel('mean(|SHAP value|)\n(average impact\non model output magnitude)')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
# n = sum(['pwv' in x for x in sv.feature.values])
axes[2].xaxis.set_ticklabels('')
axes[2].set_xlabel('')
hrs = np.arange(-1, -25, -1)
axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[2].tick_params()
axes[0].set_xlabel('Hours prior to flood')
axes[1].set_xlabel('Hours prior to flood')
fig.tight_layout()
filename = 'RF_shap_values_{}.png'.format('+'.join(features))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def read_binary_classification_shap_values_to_pandas(shap_values, X):
import xarray as xr
SV0 = X.copy(data=shap_values[0])
SV1 = X.copy(data=shap_values[1])
SV = xr.concat([SV0, SV1], dim='clas')
SV['clas'] = [0, 1]
return SV
def get_shap_values_RF_classifier(plot=True):
import shap
X, y = combine_pos_neg_from_nc_file()
ml = ML_Classifier_Switcher()
rf = ml.pick_model('RF')
rf.set_params(**best_hp_models_dict['RF'])
X = select_doy_from_feature_list(X, features=['pwv', 'pressure', 'doy'])
rf.fit(X, y)
explainer = shap.TreeExplainer(rf)
shap_values = explainer.shap_values(X.values)
if plot:
shap.summary_plot(shap_values, X, feature_names=[
x for x in X.feature.values], max_display=49, sort=False)
return shap_values
def interpolate_pwv_to_tsafit_event(path=work_yuval, savepath=work_yuval):
import pandas as pd
import xarray as xr
from PW_stations import produce_geo_gnss_solved_stations
from interpolation_routines import interpolate_var_ds_at_multiple_dts
from aux_gps import save_ncfile
# get gnss soi-apn pwv data and geo-meta data:
geo_df = produce_geo_gnss_solved_stations(plot=False)
pw = xr.load_dataset(work_yuval/'GNSS_PW_thresh_50.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw = pw.sel(time=slice('2018-04-25', '2018-04-26'))
pw = pw.drop_vars(['elat', 'elro', 'csar', 'slom'])
# get tsafit data:
predict_df = pd.DataFrame(tsafit_dict, index=['tsafit'])
df_inter = interpolate_var_ds_at_multiple_dts(pw, geo_df, predict_df)
da=df_inter['interpolated_lr_fixed'].to_xarray()
da.name = 'pwv'
da.attrs['operation'] = 'interploated from SOI-APN PWV data'
da.attrs['WV scale height'] = 'variable from SOI-APN data'
da.attrs.update(**tsafit_dict)
if savepath is not None:
filename = 'Tsafit_PWV_event.nc'
save_ncfile(da, savepath, filename)
return da
def plot_tsafit_event(path=work_yuval):
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
da = xr.load_dataarray(path / 'Tsafit_PWV_event.nc')
fig, ax = plt.subplots(figsize=(11, 8))
da_sliced = da.sel(time=slice('2018-04-26T00:00:00', '2018-04-26T12:00:00'))
# da_sliced.name = 'PWV [mm]'
da_sliced = da_sliced.rename({'time': 'Time [UTC]'})
da_sliced.to_dataframe().plot(ax=ax, ylabel='PWV [mm]', linewidth=2, marker='o', legend=False)
dt = pd.to_datetime(da.attrs['dt_utc'])
ax.axvline(dt, color='r', linestyle='--', linewidth=2, label='T')
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles=handles, labels=['PWV', 'Tsafit Flood Event'])
ax.grid(True)
# ax.set_xlabel('Time [UTC]')
fig.tight_layout()
fig.suptitle('PWV from SOI-APN over Tsafit area on 2018-04-26')
fig.subplots_adjust(top=0.941)
return fig
# TODO: treat all pwv from events as follows:
# For each station:
# 0) rolling mean to all pwv 1 hour
# 1) take 288 points before events, if < 144 gone then drop
# 2) interpolate them 12H using spline/other
# 3) then, check if dts coinside 1 day before, if not concat all dts+pwv for each station
# 4) prepare features, such as pressure, doy, try to get pressure near the stations and remove the longterm hour dayofyear
# pressure in BD anoms is highly correlated with SEDOM (0.9) and ELAT (0.88) so no need for local pressure features
# fixed filling with jerusalem centre since 2 drag events dropped due to lack of data 2018-11 2019-02 in pressure
# 5) feature addition: should be like pwv steps 1-3,
# 6) negative events should be sampled separtely, for
# 7) now prepare pwv and pressure to single ds with 1 hourly sample rate
# 8) produce positives and save them to file!
# 9) produce a way to get negatives considering the positives
# maybe implement permutaion importance to pwv ? see what is more important to
# the model in 24 hours ? only on SVC and MLP ?
# implemetn TSS and HSS scores and test them (make_scorer from confusion matrix)
# redo results but with inner and outer splits of 4, 4
# plot and see best_score per refit-scorrer - this is the best score of GridSearchCV on the entire
# train/validation subset per each outerfold - basically see if the test_metric increased after the gridsearchcv as it should
# use holdout set
# implement repeatedstratifiedkfold and run it...
# check for stability of the gridsearch CV...also run with 4-folds ?
# finalize the permutation_importances and permutation_test_scores
def prepare_tide_events_GNSS_dataset(hydro_path=hydro_path):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import xr_reindex_with_date_range
feats = xr.load_dataset(
hydro_path/'hydro_tides_hourly_features_with_positives.nc')
ds = feats['Tides'].to_dataset('GNSS').rename({'tide_event': 'time'})
da_list = []
for da in ds:
time = ds[da].dropna('time')
daa = time.copy(data=np.ones(time.shape))
daa['time'] = pd.to_datetime(time.values)
daa.name = time.name + '_tide'
da_list.append(daa)
ds = xr.merge(da_list)
li = [xr_reindex_with_date_range(ds[x], freq='H') for x in ds]
ds = xr.merge(li)
return ds
def select_features_from_X(X, features='pwv'):
if isinstance(features, str):
f = [x for x in X.feature.values if features in x]
X = X.sel(feature=f)
elif isinstance(features, list):
fs = []
for f in features:
fs += [x for x in X.feature.values if f in x]
X = X.sel(feature=fs)
return X
def combine_pos_neg_from_nc_file(hydro_path=hydro_path,
negative_sample_num=1,
seed=1, std=True):
from aux_gps import path_glob
from sklearn.utils import resample
import xarray as xr
import numpy as np
# import pandas as pd
if std:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_std*.nc')[-1]
else:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_*.nc')[-1]
ds = xr.open_dataset(file)
# get the positive features and produce target:
X_pos = ds['X_pos'].rename({'positive_sample': 'sample'})
y_pos = xr.DataArray(np.ones(X_pos['sample'].shape), dims=['sample'])
y_pos['sample'] = X_pos['sample']
# choose at random y_pos size of negative class:
X_neg = ds['X_neg'].rename({'negative_sample': 'sample'})
pos_size = y_pos['sample'].size
np.random.seed(seed)
# negatives = []
for n_samples in [x for x in range(negative_sample_num)]:
# dts = np.random.choice(X_neg['sample'], size=y_pos['sample'].size,
# replace=False)
# print(np.unique(dts).shape)
# negatives.append(X_neg.sel(sample=dts))
negative = resample(X_neg, replace=False,
n_samples=pos_size * negative_sample_num,
random_state=seed)
negatives = np.split(negative, negative_sample_num, axis=0)
Xs = []
ys = []
for X_negative in negatives:
y_neg = xr.DataArray(np.zeros(X_negative['sample'].shape), dims=['sample'])
y_neg['sample'] = X_negative['sample']
# now concat all X's and y's:
X = xr.concat([X_pos, X_negative], 'sample')
y = xr.concat([y_pos, y_neg], 'sample')
X.name = 'X'
Xs.append(X)
ys.append(y)
if len(negatives) == 1:
return Xs[0], ys[0]
else:
return Xs, ys
def drop_hours_in_pwv_pressure_features(X, last_hours=7, verbose=True):
import numpy as np
Xcopy = X.copy()
pwvs_to_drop = ['pwv_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pwvs_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pwvs_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pwvs_to_drop)
pressures_to_drop = ['pressure_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pressures_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pressures_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pressures_to_drop)
return Xcopy
def check_if_negatives_are_within_positives(neg_da, hydro_path=hydro_path):
import xarray as xr
import pandas as pd
pos_da = xr.open_dataset(
hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['X']
dt_pos = pos_da.sample.to_dataframe()
dt_neg = neg_da.sample.to_dataframe()
dt_all = dt_pos.index.union(dt_neg.index)
dff = pd.DataFrame(dt_all, index=dt_all)
dff = dff.sort_index()
samples_within = dff[(dff.diff()['sample'] <= pd.Timedelta(1, unit='D'))]
num = samples_within.size
print('samples that are within a day of each other: {}'.format(num))
print('samples are: {}'.format(samples_within))
return dff
def produce_negatives_events_from_feature_file(hydro_path=hydro_path, seed=42,
batches=1, verbose=1, std=True):
# do the same thing for pressure (as for pwv), but not for
import xarray as xr
import numpy as np
import pandas as pd
from aux_gps import save_ncfile
feats = xr.load_dataset(hydro_path / 'hydro_tides_hourly_features.nc')
feats = feats.rename({'doy': 'DOY'})
if std:
pos_filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
pos_filename = 'hydro_tides_hourly_features_with_positives.nc'
all_tides = xr.open_dataset(
hydro_path / pos_filename)['X_pos']
# pos_tides = xr.open_dataset(hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['tide_datetimes']
tides = xr.open_dataset(
hydro_path / pos_filename)['Tides']
# get the positives (tide events) for each station:
df_stns = tides.to_dataset('GNSS').to_dataframe()
# get all positives (tide events) for all stations:
df = all_tides.positive_sample.to_dataframe()['positive_sample']
df.columns = ['sample']
stns = [x for x in hydro_pw_dict.keys()]
other_feats = ['DOY', 'doy_sin', 'doy_cos']
# main stns df features (pwv)
pwv_df = feats[stns].to_dataframe()
pressure = feats['bet-dagan'].to_dataframe()['bet-dagan']
# define the initial no_choice_dt_range from the positive dt_range:
no_choice_dt_range = [pd.date_range(
start=dt, periods=48, freq='H') for dt in df]
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack(no_choice_dt_range)))
dts_to_choose_from = pwv_df.index.difference(no_choice_dt_range)
# dts_to_choose_from_pressure = pwv_df.index.difference(no_choice_dt_range)
# loop over all stns and produce negative events:
np.random.seed(seed)
neg_batches = []
for i in np.arange(1, batches + 1):
if verbose >= 0:
print('preparing batch {}:'.format(i))
neg_stns = []
for stn in stns:
dts_df = df_stns[stn].dropna()
pwv = pwv_df[stn].dropna()
# loop over all events in on stn:
negatives = []
negatives_pressure = []
# neg_samples = []
if verbose >= 1:
print('finding negatives for station {}, events={}'.format(
stn, len(dts_df)))
# print('finding negatives for station {}, dt={}'.format(stn, dt.strftime('%Y-%m-%d %H:%M')))
cnt = 0
while cnt < len(dts_df):
# get random number from each stn pwv:
# r = np.random.randint(low=0, high=len(pwv.index))
# random_dt = pwv.index[r]
random_dt = np.random.choice(dts_to_choose_from)
negative_dt_range = pd.date_range(
start=random_dt, periods=24, freq='H')
if not (no_choice_dt_range.intersection(negative_dt_range)).empty:
# print('#')
if verbose >= 2:
print('Overlap!')
continue
# get the actual pwv and check it is full (24hours):
negative = pwv.loc[pwv.index.intersection(negative_dt_range)]
neg_pressure = pressure.loc[pwv.index.intersection(
negative_dt_range)]
if len(negative.dropna()) != 24 or len(neg_pressure.dropna()) != 24:
# print('!')
if verbose >= 2:
print('NaNs!')
continue
if verbose >= 2:
print('number of dts that are already chosen: {}'.format(
len(no_choice_dt_range)))
negatives.append(negative)
negatives_pressure.append(neg_pressure)
# now add to the no_choice_dt_range the negative dt_range we just aquired:
negative_dt_range_with_padding = pd.date_range(
start=random_dt-pd.Timedelta(24, unit='H'), end=random_dt+pd.Timedelta(23, unit='H'), freq='H')
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack([no_choice_dt_range, negative_dt_range_with_padding])))
dts_to_choose_from = dts_to_choose_from.difference(
no_choice_dt_range)
if verbose >= 2:
print('number of dts to choose from: {}'.format(
len(dts_to_choose_from)))
cnt += 1
neg_da = xr.DataArray(negatives, dims=['sample', 'feature'])
neg_da['feature'] = ['{}_{}'.format(
'pwv', x) for x in np.arange(1, 25)]
neg_samples = [x.index[0] for x in negatives]
neg_da['sample'] = neg_samples
neg_pre_da = xr.DataArray(
negatives_pressure, dims=['sample', 'feature'])
neg_pre_da['feature'] = ['{}_{}'.format(
'pressure', x) for x in np.arange(1, 25)]
neg_pre_samples = [x.index[0] for x in negatives_pressure]
neg_pre_da['sample'] = neg_pre_samples
neg_da = xr.concat([neg_da, neg_pre_da], 'feature')
neg_da = neg_da.sortby('sample')
neg_stns.append(neg_da)
da_stns = xr.concat(neg_stns, 'sample')
da_stns = da_stns.sortby('sample')
# now loop over the remaining features (which are stns agnostic)
# and add them with the same negative datetimes of the pwv already aquired:
dts = [pd.date_range(x.item(), periods=24, freq='H')
for x in da_stns['sample']]
dts_samples = [x[0] for x in dts]
other_feat_list = []
for feat in feats[other_feats]:
# other_feat_sample_list = []
da_other = xr.DataArray(feats[feat].sel(time=dts_samples).values, dims=['sample'])
# for dt in dts_samples:
# da_other = xr.DataArray(feats[feat].sel(
# time=dt).values, dims=['feature'])
da_other['sample'] = dts_samples
other_feat_list.append(da_other)
# other_feat_da = xr.concat(other_feat_sample_list, 'feature')
da_other_feats = xr.concat(other_feat_list, 'feature')
da_other_feats['feature'] = other_feats
da_stns = xr.concat([da_stns, da_other_feats], 'feature')
neg_batches.append(da_stns)
neg_batch_da = xr.concat(neg_batches, 'sample')
# neg_batch_da['batch'] = np.arange(1, batches + 1)
neg_batch_da.name = 'X_neg'
feats['X_neg'] = neg_batch_da
feats['X_pos'] = all_tides
feats['X_pwv_stns'] = tides
# feats['tide_datetimes'] = pos_tides
feats = feats.rename({'sample': 'negative_sample'})
if std:
filename = 'hydro_tides_hourly_features_with_positives_negatives_std_{}.nc'.format(
batches)
else:
filename = 'hydro_tides_hourly_features_with_positives_negatives_{}.nc'.format(
batches)
save_ncfile(feats, hydro_path, filename)
return neg_batch_da
def produce_positives_from_feature_file(hydro_path=hydro_path, std=True):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import save_ncfile
# load features:
if std:
file = hydro_path / 'hydro_tides_hourly_features_std.nc'
else:
file = hydro_path / 'hydro_tides_hourly_features.nc'
feats = xr.load_dataset(file)
feats = feats.rename({'doy': 'DOY'})
# load positive event for each station:
dfs = [read_station_from_tide_database(hydro_pw_dict.get(
x), rounding='1H') for x in hydro_pw_dict.keys()]
dfs = check_if_tide_events_from_stations_are_within_time_window(
dfs, days=1, rounding=None, return_hs_list=True)
da_list = []
positives_per_station = []
for i, feat in enumerate(feats):
try:
_, _, pr = produce_pwv_days_before_tide_events(feats[feat], dfs[i],
plot=False, rolling=None,
days_prior=1,
drop_thresh=0.75,
max_gap='6H',
verbose=0)
print('getting positives from station {}'.format(feat))
positives = [pd.to_datetime(
(x[-1].time + pd.Timedelta(1, unit='H')).item()) for x in pr]
da = xr.DataArray(pr, dims=['sample', 'feature'])
da['sample'] = positives
positives_per_station.append(positives)
da['feature'] = ['pwv_{}'.format(x) for x in np.arange(1, 25)]
da_list.append(da)
except IndexError:
continue
da_pwv = xr.concat(da_list, 'sample')
da_pwv = da_pwv.sortby('sample')
# now add more features:
da_list = []
for feat in ['bet-dagan']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt_end in da_pwv.sample:
dt_st = pd.to_datetime(dt_end.item()) - pd.Timedelta(24, unit='H')
dt_end_end = pd.to_datetime(
dt_end.item()) - pd.Timedelta(1, unit='H')
positive = feats[feat].sel(time=slice(dt_st, dt_end_end))
positives.append(positive)
da = xr.DataArray(positives, dims=['sample', 'feature'])
da['sample'] = da_pwv.sample
if feat == 'bet-dagan':
feat_name = 'pressure'
else:
feat_name = feat
da['feature'] = ['{}_{}'.format(feat_name, x)
for x in np.arange(1, 25)]
da_list.append(da)
da_f = xr.concat(da_list, 'feature')
da_list = []
for feat in ['DOY', 'doy_sin', 'doy_cos']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt in da_pwv.sample:
positive = feats[feat].sel(time=dt)
positives.append(positive)
da = xr.DataArray(positives, dims=['sample'])
da['sample'] = da_pwv.sample
# da['feature'] = feat
da_list.append(da)
da_ff = xr.concat(da_list, 'feature')
da_ff['feature'] = ['DOY', 'doy_sin', 'doy_cos']
da = xr.concat([da_pwv, da_f, da_ff], 'feature')
if std:
filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
filename = 'hydro_tides_hourly_features_with_positives.nc'
feats['X_pos'] = da
# now add positives per stations:
pdf = pd.DataFrame(positives_per_station).T
pdf.index.name = 'tide_event'
pos_da = pdf.to_xarray().to_array('GNSS')
pos_da['GNSS'] = [x for x in hydro_pw_dict.keys()]
pos_da.attrs['info'] = 'contains the datetimes of the tide events per GNSS station.'
feats['Tides'] = pos_da
# rename sample to positive sample:
feats = feats.rename({'sample': 'positive_sample'})
save_ncfile(feats, hydro_path, filename)
return feats
def prepare_features_and_save_hourly(work_path=work_yuval, ims_path=ims_path,
savepath=hydro_path, std=True):
import xarray as xr
from aux_gps import save_ncfile
import numpy as np
# pwv = xr.load_dataset(
if std:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms_sd.nc'
pre_filename = 'IMS_BD_hourly_anoms_std_ps_1964-2020.nc'
else:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc'
pre_filename = 'IMS_BD_hourly_anoms_ps_1964-2020.nc'
# work_path / 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc')
pwv = xr.load_dataset(work_path / pwv_filename)
pwv_stations = [x for x in hydro_pw_dict.keys()]
pwv = pwv[pwv_stations]
# pwv = pwv.rolling(time=12, keep_attrs=True).mean(keep_attrs=True)
pwv = pwv.resample(time='1H', keep_attrs=True).mean(keep_attrs=True)
# bd = xr.load_dataset(ims_path / 'IMS_BD_anoms_5min_ps_1964-2020.nc')
bd = xr.load_dataset(ims_path / pre_filename)
# min_time = pwv.dropna('time')['time'].min()
# bd = bd.sel(time=slice('1996', None)).resample(time='1H').mean()
bd = bd.sel(time=slice('1996', None))
pressure = bd['bet-dagan']
doy = pwv['time'].copy(data=pwv['time'].dt.dayofyear)
doy.name = 'doy'
doy_sin = np.sin(doy * np.pi / 183)
doy_sin.name = 'doy_sin'
doy_cos = np.cos(doy * np.pi / 183)
doy_cos.name = 'doy_cos'
ds = xr.merge([pwv, pressure, doy, doy_sin, doy_cos])
if std:
filename = 'hydro_tides_hourly_features_std.nc'
else:
filename = 'hydro_tides_hourly_features.nc'
save_ncfile(ds, savepath, filename)
return ds
def plot_all_decompositions(X, y, n=2):
import xarray as xr
models = [
'PCA',
'LDA',
'ISO_MAP',
'LLE',
'LLE-modified',
'LLE-hessian',
'LLE-ltsa',
'MDA',
'RTE',
'SE',
'TSNE',
'NCA']
names = [
'Principal Components',
'Linear Discriminant',
'Isomap',
'Locally Linear Embedding',
'Modified LLE',
'Hessian LLE',
'Local Tangent Space Alignment',
'MDS embedding',
'Random forest',
'Spectral embedding',
't-SNE',
'NCA embedding']
name_dict = dict(zip(models, names))
da = xr.DataArray(models, dims=['model'])
da['model'] = models
fg = xr.plot.FacetGrid(da, col='model', col_wrap=4,
sharex=False, sharey=False)
for model_str, ax in zip(da['model'].values, fg.axes.flatten()):
model = model_str.split('-')[0]
method = model_str.split('-')[-1]
if model == method:
method = None
try:
ax = scikit_decompose(X, y, model=model, n=n, method=method, ax=ax)
except ValueError:
pass
ax.set_title(name_dict[model_str])
ax.set_xlabel('')
ax.set_ylabel('')
fg.fig.suptitle('various decomposition projections (n={})'.format(n))
return
def scikit_decompose(X, y, model='PCA', n=2, method=None, ax=None):
from sklearn import (manifold, decomposition, ensemble,
discriminant_analysis, neighbors)
import matplotlib.pyplot as plt
import pandas as pd
# from mpl_toolkits.mplot3d import Axes3D
n_neighbors = 30
if model == 'PCA':
X_decomp = decomposition.TruncatedSVD(n_components=n).fit_transform(X)
elif model == 'LDA':
X2 = X.copy()
X2.values.flat[::X.shape[1] + 1] += 0.01
X_decomp = discriminant_analysis.LinearDiscriminantAnalysis(n_components=n
).fit_transform(X2, y)
elif model == 'ISO_MAP':
X_decomp = manifold.Isomap(
n_neighbors, n_components=n).fit_transform(X)
elif model == 'LLE':
# method = 'standard', 'modified', 'hessian' 'ltsa'
if method is None:
method = 'standard'
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method=method)
X_decomp = clf.fit_transform(X)
elif model == 'MDA':
clf = manifold.MDS(n_components=n, n_init=1, max_iter=100)
X_decomp = clf.fit_transform(X)
elif model == 'RTE':
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=n)
X_decomp = pca.fit_transform(X_transformed)
elif model == 'SE':
embedder = manifold.SpectralEmbedding(n_components=n, random_state=0,
eigen_solver="arpack")
X_decomp = embedder.fit_transform(X)
elif model == 'TSNE':
tsne = manifold.TSNE(n_components=n, init='pca', random_state=0)
X_decomp = tsne.fit_transform(X)
elif model == 'NCA':
nca = neighbors.NeighborhoodComponentsAnalysis(init='random',
n_components=n, random_state=0)
X_decomp = nca.fit_transform(X, y)
df = pd.DataFrame(X_decomp)
df.columns = [
'{}_{}'.format(
model,
x +
1) for x in range(
X_decomp.shape[1])]
df['flood'] = y
df['flood'] = df['flood'].astype(int)
df_1 = df[df['flood'] == 1]
df_0 = df[df['flood'] == 0]
if X_decomp.shape[1] == 1:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='r', marker='x',
label='0',
s=50)
elif X_decomp.shape[1] == 2:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='r',
label='0',
s=50)
elif X_decomp.shape[1] == 3:
ax = plt.figure().gca(projection='3d')
# df_1.plot.scatter(x='{}_1'.format(model), y='{}_2'.format(model), z='{}_3'.format(model), color='b', label='1', s=50, ax=threedee)
ax.scatter(df_1['{}_1'.format(model)],
df_1['{}_2'.format(model)],
df_1['{}_3'.format(model)],
color='b',
label='1',
s=50)
ax.scatter(df_0['{}_1'.format(model)],
df_0['{}_2'.format(model)],
df_0['{}_3'.format(model)],
color='r',
label='0',
s=50)
ax.set_xlabel('{}_1'.format(model))
ax.set_ylabel('{}_2'.format(model))
ax.set_zlabel('{}_3'.format(model))
return ax
def permutation_scikit(X, y, cv=False, plot=True):
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
if not cv:
clf = SVC(C=0.01, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma=0.032374575428176434,
kernel='poly', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
clf = SVC(kernel='linear')
# clf = LinearDiscriminantAnalysis()
cv = StratifiedKFold(4, shuffle=True)
# cv = KFold(4, shuffle=True)
n_classes = 2
score, permutation_scores, pvalue = permutation_test_score(
clf, X, y, scoring="f1", cv=cv, n_permutations=1000, n_jobs=-1, verbose=2)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
plt.hist(permutation_scores, 20, label='Permutation scores',
edgecolor='black')
ylim = plt.ylim()
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
else:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=42)
param_grid = {
'C': np.logspace(-2, 3, 50), 'gamma': np.logspace(-2, 3, 50),
'kernel': ['rbf', 'poly', 'sigmoid']}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2)
grid.fit(X_train, y_train)
print(grid.best_estimator_)
grid_predictions = grid.predict(X_test)
print(confusion_matrix(y_test, grid_predictions))
print(classification_report(y_test, grid_predictions))
return
def grab_y_true_and_predict_from_sklearn_model(model, X, y, cv,
kfold_name='inner_kfold'):
from sklearn.model_selection import GridSearchCV
import xarray as xr
import numpy as np
if isinstance(model, GridSearchCV):
model = model.best_estimator_
ds_list = []
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_true = y[val]
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
y_true_da = xr.DataArray(y_true, dims=['sample'])
y_pred_da = xr.DataArray(y_pred, dims=['sample'])
y_prob_da = xr.DataArray(lr_probs, dims=['sample'])
ds = xr.Dataset()
ds['y_true'] = y_true_da
ds['y_pred'] = y_pred_da
ds['y_prob'] = y_prob_da
ds['sample'] = np.arange(0, len(X[val]))
ds_list.append(ds)
ds = xr.concat(ds_list, kfold_name)
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
return ds
def produce_ROC_curves_from_model(model, X, y, cv, kfold_name='inner_kfold'):
import numpy as np
import xarray as xr
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# TODO: collect all predictions and y_tests from this, also predict_proba
# and save, then calculte everything elsewhere.
if isinstance(model, GridSearchCV):
model = model.best_estimator_
tprs = []
aucs = []
pr = []
pr_aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
fpr, tpr, _ = roc_curve(y[val], y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(y[val], y_pred))
precision, recall, _ = precision_recall_curve(y[val], lr_probs)
pr.append(recall)
average_precision = average_precision_score(y[val], y_pred)
pr_aucs.append(average_precision)
# mean_tpr = np.mean(tprs, axis=0)
# mean_tpr[-1] = 1.0
# mean_auc = auc(mean_fpr, mean_tpr)
# std_auc = np.std(aucs)
# std_tpr = np.std(tprs, axis=0)
tpr_da = xr.DataArray(tprs, dims=[kfold_name, 'fpr'])
auc_da = xr.DataArray(aucs, dims=[kfold_name])
ds = xr.Dataset()
ds['TPR'] = tpr_da
ds['AUC'] = auc_da
ds['fpr'] = mean_fpr
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
# variability for each tpr is ds['TPR'].std('kfold')
return ds
def cross_validation_with_holdout(X, y, model_name='SVC', features='pwv',
n_splits=3, test_ratio=0.25,
scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X, model_name, features)
if param_grid == 'light':
print(np.unique(X.feature.values))
# first take out the hold-out set:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio,
random_state=seed,
stratify=y)
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_holdout_{}_{}_{}_{}_{}_{}_{}.pkl'.format(
model_name, '+'.join(features), '+'.join(scorers), n_splits,
int(test_ratio*100), param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
# gr, _ = process_gridsearch_results(
# gr_search, model_name, split_dim='kfold', features=X.feature.values)
# remove_digits = str.maketrans('', '', digits)
# features = list(set([x.translate(remove_digits).split('_')[0]
# for x in X.feature.values]))
# # add more attrs, features etc:
# gr.attrs['features'] = features
return gr_search
def select_doy_from_feature_list(X, model_name='RF', features='pwv'):
# first if RF chosen, replace the cyclic coords of DOY (sin and cos) with
# the DOY itself.
if isinstance(features, list):
feats = features.copy()
else:
feats = features
if model_name == 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('DOY')
elif isinstance(features, str):
feats = 'DOY'
elif model_name != 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('doy_sin')
feats.append('doy_cos')
elif isinstance(features, str):
feats = ['doy_sin']
feats.append('doy_cos')
X = select_features_from_X(X, feats)
return X
def single_cross_validation(X_val, y_val, model_name='SVC', features='pwv',
n_splits=4, scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None, outer_split='1-1'):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
# from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X_val, model_name, features)
y = y_val
if param_grid == 'light':
print(np.unique(X.feature.values))
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(
n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
if outer_split == '1-1':
cv_type = 'holdout'
print('holdout cv is selected.')
else:
cv_type = 'nested'
print('nested cv {} out of {}.'.format(
outer_split.split('-')[0], outer_split.split('-')[1]))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_{}_{}_{}_{}_{}_{}_{}_{}.pkl'.format(cv_type,
model_name, '+'.join(features), '+'.join(
scorers), n_splits,
outer_split, param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
return gr_search
def save_cv_params_to_file(cv_obj, path, name):
import pandas as pd
di = vars(cv_obj)
splitter_type = cv_obj.__repr__().split('(')[0]
di['splitter_type'] = splitter_type
(pd.DataFrame.from_dict(data=di, orient='index')
.to_csv(path / '{}.csv'.format(name), header=False))
print('{}.csv saved to {}.'.format(name, path))
return
def read_cv_params_and_instantiate(filepath):
import pandas as pd
from sklearn.model_selection import StratifiedKFold
df = pd.read_csv(filepath, header=None, index_col=0)
d = {}
for row in df.iterrows():
dd = pd.to_numeric(row[1], errors='ignore')
if dd.item() == 'True' or dd.item() == 'False':
dd = dd.astype(bool)
d[dd.to_frame().columns.item()] = dd.item()
s_type = d.pop('splitter_type')
if s_type == 'StratifiedKFold':
cv = StratifiedKFold(**d)
return cv
def nested_cross_validation_procedure(X, y, model_name='SVC', features='pwv',
outer_splits=4, inner_splits=2,
refit_scorer='roc_auc',
scorers=['f1', 'recall', 'tss', 'hss',
'roc_auc', 'precision',
'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1):
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.inspection import permutation_importance
from string import digits
import numpy as np
import xarray as xr
assert refit_scorer in scorers
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X, model_name, features)
# if model_name == 'RF':
# doy = X['sample'].dt.dayofyear
# sel_doy = [x for x in X.feature.values if 'doy_sin' in x]
# doy_X = doy.broadcast_like(X.sel(feature=sel_doy))
# doy_X['feature'] = [
# 'doy_{}'.format(x) for x in range(
# doy_X.feature.size)]
# no_doy = [x for x in X.feature.values if 'doy' not in x]
# X = X.sel(feature=no_doy)
# X = xr.concat([X, doy_X], 'feature')
# else:
# # first slice X for features:
# if isinstance(features, str):
# f = [x for x in X.feature.values if features in x]
# X = X.sel(feature=f)
# elif isinstance(features, list):
# fs = []
# for f in features:
# fs += [x for x in X.feature.values if f in x]
# X = X.sel(feature=fs)
if param_grid == 'light':
print(np.unique(X.feature.values))
# configure the cross-validation procedure
cv_inner = StratifiedKFold(n_splits=inner_splits, shuffle=True,
random_state=seed)
print('Inner CV StratifiedKfolds of {}.'.format(inner_splits))
# define the model and search space:
ml = ML_Classifier_Switcher()
if param_grid == 'light':
print('disgnostic mode light.')
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv_inner, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=refit_scorer, return_train_score=True)
# gr.fit(X, y)
# configure the cross-validation procedure
cv_outer = StratifiedKFold(
n_splits=outer_splits, shuffle=True, random_state=seed)
# execute the nested cross-validation
scores_est_dict = cross_validate(gr_search, X, y,
scoring=scores_dict,
cv=cv_outer, n_jobs=n_jobs,
return_estimator=True, verbose=verbose)
# perm = []
# for i, (train, val) in enumerate(cv_outer.split(X, y)):
# gr_model = scores_est_dict['estimator'][i]
# gr_model.fit(X[train], y[train])
# r = permutation_importance(gr_model, X[val], y[val],scoring='f1',
# n_repeats=30, n_jobs=-1,
# random_state=0)
# perm.append(r)
# get the test scores:
test_keys = [x for x in scores_est_dict.keys() if 'test' in x]
ds = xr.Dataset()
for key in test_keys:
ds[key] = xr.DataArray(scores_est_dict[key], dims=['outer_kfold'])
preds_ds = []
gr_ds = []
for est in scores_est_dict['estimator']:
gr, _ = process_gridsearch_results(
est, model_name, split_dim='inner_kfold', features=X.feature.values)
# somehow save gr:
gr_ds.append(gr)
preds_ds.append(
grab_y_true_and_predict_from_sklearn_model(est, X, y, cv_inner))
# tpr_ds.append(produce_ROC_curves_from_model(est, X, y, cv_inner))
dss = xr.concat(preds_ds, 'outer_kfold')
gr_dss = xr.concat(gr_ds, 'outer_kfold')
dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1)
gr_dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1)
# aggragate results:
dss = xr.merge([ds, dss])
dss = xr.merge([dss, gr_dss])
dss.attrs = gr_dss.attrs
dss.attrs['outer_kfold_splits'] = outer_splits
remove_digits = str.maketrans('', '', digits)
features = list(set([x.translate(remove_digits).split('_')[0]
for x in X.feature.values]))
# add more attrs, features etc:
dss.attrs['features'] = features
# rename major data_vars with model name:
# ys = [x for x in dss.data_vars if 'y_' in x]
# new_ys = [y + '_{}'.format(model_name) for y in ys]
# dss = dss.rename(dict(zip(ys, new_ys)))
# new_test_keys = [y + '_{}'.format(model_name) for y in test_keys]
# dss = dss.rename(dict(zip(test_keys, new_test_keys)))
# if isinstance(X.attrs['pwv_id'], list):
# dss.attrs['pwv_id'] = '-'.join(X.attrs['pwv_id'])
# else:
# dss.attrs['pwv_id'] = X.attrs['pwv_id']
# if isinstance(y.attrs['hydro_station_id'], list):
# dss.attrs['hs_id'] = '-'.join([str(x) for x in y.attrs['hydro_station_id']])
# else:
# dss.attrs['hs_id'] = y.attrs['hydro_station_id']
# dss.attrs['hydro_max_flow'] = y.attrs['max_flow']
# dss.attrs['neg_pos_ratio'] = y.attrs['neg_pos_ratio']
# save results to file:
if savepath is not None:
save_cv_results(dss, savepath=savepath)
return dss
# def ML_main_procedure(X, y, estimator=None, model_name='SVC', features='pwv',
# val_size=0.18, n_splits=None, test_size=0.2, seed=42, best_score='f1',
# savepath=None, plot=True):
# """split the X,y for train and test, either do HP tuning using HP_tuning
# with val_size or use already tuned (or not) estimator.
# models to play with = MLP, RF and SVC.
# n_splits = 2, 3, 4.
# features = pwv, pressure.
# best_score = f1, roc_auc, accuracy.
# can do loop on them. RF takes the most time to tune."""
# X = select_features_from_X(X, features)
# X_train, X_test, y_train, y_test = train_test_split(X, y,
# test_size=test_size,
# shuffle=True,
# random_state=seed)
# # do HP_tuning:
# if estimator is None:
# cvr, model = HP_tuning(X_train, y_train, model_name=model_name, val_size=val_size, test_size=test_size,
# best_score=best_score, seed=seed, savepath=savepath, n_splits=n_splits)
# else:
# model = estimator
# if plot:
# ax = plot_many_ROC_curves(model, X_test, y_test, name=model_name,
# ax=None)
# return ax
# else:
# return model
def plot_hyper_parameters_heatmaps_from_nested_CV_model(dss, path=hydro_path, model_name='MLP',
features='pwv+pressure+doy', save=True):
import matplotlib.pyplot as plt
ds = dss.sel(features=features).reset_coords(drop=True)
non_hp_vars = ['mean_score', 'std_score',
'test_score', 'roc_auc_score', 'TPR']
if model_name == 'RF':
non_hp_vars.append('feature_importances')
ds = ds[[x for x in ds if x not in non_hp_vars]]
seq = 'Blues'
cat = 'Dark2'
cmap_hp_dict = {
'alpha': seq, 'activation': cat,
'hidden_layer_sizes': cat, 'learning_rate': cat,
'solver': cat, 'kernel': cat, 'C': seq,
'gamma': seq, 'degree': seq, 'coef0': seq,
'max_depth': seq, 'max_features': cat,
'min_samples_leaf': seq, 'min_samples_split': seq,
'n_estimators': seq
}
# fix stuff for SVC:
if model_name == 'SVC':
ds['degree'] = ds['degree'].where(ds['kernel']=='poly')
ds['coef0'] = ds['coef0'].where(ds['kernel']=='poly')
# da = ds.to_arrray('hyper_parameters')
# fg = xr.plot.FacetGrid(
# da,
# col='hyper_parameters',
# sharex=False,
# sharey=False, figsize=(16, 10))
fig, axes = plt.subplots(5, 1, sharex=True, figsize=(4, 10))
for i, da in enumerate(ds):
df = ds[da].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df.index.name = 'Outer Split'
try:
df = df.astype(float).round(2)
except ValueError:
pass
cmap = cmap_hp_dict.get(da, 'Set1')
plot_heatmap_for_hyper_parameters_df(df, ax=axes[i], title=da, cmap=cmap)
fig.tight_layout()
if save:
filename = 'Hyper-parameters_nested_{}.png'.format(
model_name)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_heatmaps_for_hyper_parameters_data_splits(df1, df2, axes=None,
cmap='colorblind',
title=None, fig=None,
cbar_params=[.92, .12, .03, .75],
fontsize=12,
val_type='float'):
import pandas as pd
import seaborn as sns
import numpy as np
# from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
sns.set_style('ticks')
sns.set_style('whitegrid')
sns.set(font_scale=1.2)
df1 = df1.astype(eval(val_type))
df2 = df2.astype(eval(val_type))
arr = pd.concat([df1, df2], axis=0).values.ravel()
value_to_int = {j: i for i, j in enumerate(
np.unique(arr))} # like you did
# try:
# sorted_v_to_i = dict(sorted(value_to_int.items()))
# except TypeError:
# sorted_v_to_i = value_to_int
# print(value_to_int)
n = len(value_to_int)
# discrete colormap (n samples from a given cmap)
cmap_list = sns.color_palette(cmap, n)
if val_type == 'float':
# print([value_to_int.keys()])
cbar_ticklabels = ['{:.2g}'.format(x) for x in value_to_int.keys()]
elif val_type == 'int':
cbar_ticklabels = [int(x) for x in value_to_int.keys()]
elif val_type == 'str':
cbar_ticklabels = [x for x in value_to_int.keys()]
if 'nan' in value_to_int.keys():
cmap_list[-1] = (0.5, 0.5, 0.5)
new_value_to_int = {}
for key, val in value_to_int.items():
try:
new_value_to_int[str(int(float(key)))] = val
except ValueError:
new_value_to_int['NR'] = val
cbar_ticklabels = [x for x in new_value_to_int.keys()]
# u1 = np.unique(df1.replace(value_to_int)).astype(int)
# cmap1 = [cmap_list[x] for x in u1]
# u2 = np.unique(df2.replace(value_to_int)).astype(int)
# cmap2 = [cmap_list[x] for x in u2]
# prepare normalizer
## Prepare bins for the normalizer
norm_bins = np.sort([*value_to_int.values()]) + 0.5
norm_bins = np.insert(norm_bins, 0, np.min(norm_bins) - 1.0)
# print(norm_bins)
## Make normalizer and formatter
norm = matplotlib.colors.BoundaryNorm(norm_bins, n, clip=True)
# normalizer = Normalize(np.array([x for x in value_to_int.values()])[0],np.array([x for x in value_to_int.values()])[-1])
# im=cm.ScalarMappable(norm=normalizer)
if axes is None:
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False)
# divider = make_axes_locatable([axes[0], axes[1]])
# cbar_ax = divider.append_axes('right', size='5%', pad=0.05)
cbar_ax = fig.add_axes(cbar_params)
sns.heatmap(df1.replace(value_to_int), cmap=cmap_list, cbar=False,
ax=axes[0], linewidth=0.7, linecolor='k', square=True,
cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm)
sns.heatmap(df2.replace(value_to_int), cmap=cmap_list, cbar=False,
ax=axes[1], linewidth=0.7, linecolor='k', square=True,
cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm)
# else:
# ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
# ax=ax, linewidth=1, linecolor='k',
# square=False, cbar_kws={"shrink": .9})
if title is not None:
axes[0].set_title(title, fontsize=fontsize)
for ax in axes:
ax.set_xticklabels(ax.get_xticklabels(), ha='right', va='top', rotation=45)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.tick_params(labelsize=fontsize, direction='out', bottom=True,
left=True, length=2)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
# colorbar = axes[0].collections[0].colorbar
# diff = norm_bins[1:] - norm_bins[:-1]
# tickz = norm_bins[:-1] + diff / 2
colorbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=matplotlib.colors.ListedColormap(cmap_list)), ax=[axes[0], axes[1]],
shrink=1, pad=0.05, cax=cbar_ax)
# colorbar = plt.gca().images[-1].colorbar
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.ax.set_yticklabels(cbar_ticklabels, fontsize=fontsize-2)
return axes
def plot_hyper_parameters_heatmap_data_splits_per_model(dss4, dss5, fontsize=14,
save=True, model_name='SVC',
features='pwv+pressure+doy'):
import matplotlib.pyplot as plt
# import seaborn as sns
fig, axes = plt.subplots(2, 5, sharex=True, sharey=False ,figsize=(16, 5))
ds4 = dss4.sel(features=features).reset_coords(drop=True)
ds5 = dss5.sel(features=features).reset_coords(drop=True)
ds4 = ds4.reindex(scorer=scorer_order)
ds5 = ds5.reindex(scorer=scorer_order)
non_hp_vars = ['mean_score', 'std_score',
'test_score', 'roc_auc_score', 'TPR']
if model_name == 'RF':
non_hp_vars.append('feature_importances')
if model_name == 'MLP':
adj_dict=dict(
top=0.946,
bottom=0.145,
left=0.046,
right=0.937,
hspace=0.121,
wspace=0.652)
cb_st = 0.167
cb_mul = 0.193
else:
adj_dict=dict(
wspace = 0.477,
top=0.921,
bottom=0.17,
left=0.046,
right=0.937,
hspace=0.121)
cb_st = 0.18
cb_mul = 0.19
ds4 = ds4[[x for x in ds4 if x not in non_hp_vars]]
ds5 = ds5[[x for x in ds5 if x not in non_hp_vars]]
seq = 'Blues'
cat = 'Dark2'
hp_dict = {
'alpha': ['Reds', 'float'], 'activation': ['Set1_r', 'str'],
'hidden_layer_sizes': ['Paired', 'str'], 'learning_rate': ['Spectral_r', 'str'],
'solver': ['Dark2', 'str'], 'kernel': ['Dark2', 'str'], 'C': ['Blues', 'float'],
'gamma': ['Oranges', 'float'], 'degree': ['Greens', 'str'], 'coef0': ['Spectral', 'str'],
'max_depth': ['Blues', 'int'], 'max_features': ['Dark2', 'str'],
'min_samples_leaf': ['Greens', 'int'], 'min_samples_split': ['Reds', 'int'],
'n_estimators': ['Oranges', 'int']
}
# fix stuff for SVC:
if model_name == 'SVC':
ds4['degree'] = ds4['degree'].where(ds4['kernel']=='poly')
ds4['coef0'] = ds4['coef0'].where(ds4['kernel']=='poly')
ds5['degree'] = ds5['degree'].where(ds5['kernel']=='poly')
ds5['coef0'] = ds5['coef0'].where(ds5['kernel']=='poly')
for i, (da4, da5) in enumerate(zip(ds4, ds5)):
df4 = ds4[da4].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df5 = ds5[da5].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df4.index.name = 'Outer Split'
df5.index.name = 'Outer Split'
# try:
# df4 = df4.astype(float).round(2)
# df5 = df5.astype(float).round(2)
# except ValueError:
# pass
cmap = hp_dict.get(da4, 'Set1')[0]
val_type = hp_dict.get(da4, 'int')[1]
cbar_params = [cb_st + cb_mul*float(i), .175, .01, .71]
plot_heatmaps_for_hyper_parameters_data_splits(df4,
df5,
axes=[axes[0, i], axes[1, i]],
fig=fig,
title=da4,
cmap=cmap,
cbar_params=cbar_params,
fontsize=fontsize,
val_type=val_type)
if i > 0 :
axes[0, i].set_ylabel('')
axes[0, i].yaxis.set_tick_params(labelleft=False)
axes[1, i].set_ylabel('')
axes[1, i].yaxis.set_tick_params(labelleft=False)
fig.tight_layout()
fig.subplots_adjust(**adj_dict)
if save:
filename = 'Hyper-parameters_nested_{}.png'.format(
model_name)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_heatmap_for_hyper_parameters_df(df, ax=None, cmap='colorblind',
title=None, fontsize=12):
import pandas as pd
import seaborn as sns
import numpy as np
sns.set_style('ticks')
sns.set_style('whitegrid')
sns.set(font_scale=1.2)
value_to_int = {j: i for i, j in enumerate(
sorted(pd.unique(df.values.ravel())))} # like you did
# for key in value_to_int.copy().keys():
# try:
# if np.isnan(key):
# value_to_int['NA'] = value_to_int.pop(key)
# df = df.fillna('NA')
# except TypeError:
# pass
try:
sorted_v_to_i = dict(sorted(value_to_int.items()))
except TypeError:
sorted_v_to_i = value_to_int
n = len(value_to_int)
# discrete colormap (n samples from a given cmap)
cmap = sns.color_palette(cmap, n)
if ax is None:
ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
linewidth=1, linecolor='k', square=False,
cbar_kws={"shrink": .9})
else:
ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
ax=ax, linewidth=1, linecolor='k',
square=False, cbar_kws={"shrink": .9})
if title is not None:
ax.set_title(title, fontsize=fontsize)
ax.set_xticklabels(ax.get_xticklabels(), rotation=30)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
colorbar = ax.collections[0].colorbar
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.set_ticklabels(list(value_to_int.keys()))
return ax
# def plot_ROC_curves_for_all_models_and_scorers(dss, save=False,
# fontsize=24, fig_split=1,
# feat=['pwv', 'pwv+pressure', 'pwv+pressure+doy']):
# import xarray as xr
# import seaborn as sns
# import matplotlib.pyplot as plt
# import pandas as pd
# cmap = sns.color_palette('tab10', len(feat))
# sns.set_style('whitegrid')
# sns.set_style('ticks')
# if fig_split == 1:
# dss = dss.sel(scorer=['precision', 'recall', 'f1'])
# elif fig_split == 2:
# dss = dss.sel(scorer=['accuracy', 'tss', 'hss'])
# fg = xr.plot.FacetGrid(
# dss,
# col='model',
# row='scorer',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# modelname = dss['model'].isel(model=j).item()
# scorer = dss['scorer'].isel(scorer=i).item()
# chance_plot = [False for x in feat]
# chance_plot[-1] = True
# for k, f in enumerate(feat):
# # name = '{}-{}-{}'.format(modelname, scoring, feat)
# # model = dss.isel({'model': j, 'scoring': i}).sel(
# # {'features': feat})
# model = dss.isel({'model': j, 'scorer': i}
# ).sel({'features': f})
# # return model
# title = 'ROC of {} model ({})'.format(modelname.replace('SVC', 'SVM'), scorer)
# try:
# ax = plot_ROC_curve_from_dss_nested_CV(model, outer_dim='outer_split',
# plot_chance=[k],
# main_label=f,
# ax=ax,
# color=cmap[k], title=title,
# fontsize=fontsize)
# except ValueError:
# ax.grid('on')
# continue
# handles, labels = ax.get_legend_handles_labels()
# lh_ser = pd.Series(labels, index=handles).drop_duplicates()
# lh_ser = lh_ser.sort_values(ascending=False)
# hand = lh_ser.index.values
# labe = lh_ser.values
# ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right",
# fontsize=fontsize-7)
# ax.grid('on')
# if j >= 1:
# ax.set_ylabel('')
# if fig_split == 1:
# ax.set_xlabel('')
# ax.tick_params(labelbottom=False)
# else:
# if i <= 1:
# ax.set_xlabel('')
# # title = '{} station: {} total events'.format(
# # station.upper(), events)
# # if max_flow > 0:
# # title = '{} station: {} total events (max flow = {} m^3/sec)'.format(
# # station.upper(), events, max_flow)
# # fg.fig.suptitle(title, fontsize=fontsize)
# fg.fig.tight_layout()
# fg.fig.subplots_adjust(top=0.937,
# bottom=0.054,
# left=0.039,
# right=0.993,
# hspace=0.173,
# wspace=0.051)
# if save:
# filename = 'ROC_curves_nested_{}_figsplit_{}.png'.format(
# dss['outer_split'].size, fig_split)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
# return fg
def plot_hydro_ML_models_results_from_dss(dss, std_on='outer',
save=False, fontsize=16,
plot_type='ROC', split=1,
feat=['pwv', 'pressure+pwv', 'doy+pressure+pwv']):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
cmap = sns.color_palette("colorblind", len(feat))
if split == 1:
dss = dss.sel(scoring=['f1', 'precision', 'recall'])
elif split == 2:
dss = dss.sel(scoring=['tss', 'hss', 'roc-auc', 'accuracy'])
fg = xr.plot.FacetGrid(
dss,
col='model',
row='scoring',
sharex=True,
sharey=True, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
modelname = dss['model'].isel(model=j).item()
scoring = dss['scoring'].isel(scoring=i).item()
chance_plot = [False for x in feat]
chance_plot[-1] = True
for k, f in enumerate(feat):
# name = '{}-{}-{}'.format(modelname, scoring, feat)
# model = dss.isel({'model': j, 'scoring': i}).sel(
# {'features': feat})
model = dss.isel({'model': j, 'scoring': i}
).sel({'features': f})
title = '{} of {} model ({})'.format(
plot_type, modelname, scoring)
try:
plot_ROC_PR_curve_from_dss(model, outer_dim='outer_kfold',
inner_dim='inner_kfold',
plot_chance=[k],
main_label=f, plot_type=plot_type,
plot_std_legend=False, ax=ax,
color=cmap[k], title=title,
std_on=std_on, fontsize=fontsize)
except ValueError:
ax.grid('on')
continue
handles, labels = ax.get_legend_handles_labels()
hand = pd.Series(
labels, index=handles).drop_duplicates().index.values
labe = pd.Series(labels, index=handles).drop_duplicates().values
ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right",
fontsize=14)
ax.grid('on')
# title = '{} station: {} total events'.format(
# station.upper(), events)
# if max_flow > 0:
# title = '{} station: {} total events (max flow = {} m^3/sec)'.format(
# station.upper(), events, max_flow)
# fg.fig.suptitle(title, fontsize=fontsize)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.937,
bottom=0.054,
left=0.039,
right=0.993,
hspace=0.173,
wspace=0.051)
if save:
filename = 'hydro_models_on_{}_{}_std_on_{}_{}.png'.format(
dss['inner_kfold'].size, dss['outer_kfold'].size,
std_on, plot_type)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
# def plot_hydro_ML_models_result(model_da, nsplits=2, station='drag',
# test_size=20, n_splits_plot=None, save=False):
# import xarray as xr
# import seaborn as sns
# import matplotlib.pyplot as plt
# from sklearn.model_selection import train_test_split
# # TODO: add plot_roc_curve(model, X_other_station, y_other_station)
# # TODO: add pw_station, hs_id
# cmap = sns.color_palette("colorblind", 3)
# X, y = produce_X_y(station, hydro_pw_dict[station], neg_pos_ratio=1)
# events = int(y[y == 1].sum().item())
# model_da = model_da.sel(
# splits=nsplits,
# test_size=test_size).reset_coords(
# drop=True)
## just_pw = [x for x in X.feature.values if 'pressure' not in x]
## X_pw = X.sel(feature=just_pw)
# fg = xr.plot.FacetGrid(
# model_da,
# col='model',
# row='scoring',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# modelname = model_da['model'].isel(model=j).item()
# scoring = model_da['scoring'].isel(scoring=i).item()
# chance_plot = [False, False, True]
# for k, feat in enumerate(model_da['feature'].values):
# name = '{}-{}-{}'.format(modelname, scoring, feat)
# model = model_da.isel({'model': j, 'scoring': i}).sel({'feature': feat}).item()
# title = 'ROC of {} model ({})'.format(modelname, scoring)
# if not '+' in feat:
# f = [x for x in X.feature.values if feat in x]
# X_f = X.sel(feature=f)
# else:
# X_f = X
# X_train, X_test, y_train, y_test = train_test_split(
# X_f, y, test_size=test_size/100, shuffle=True, random_state=42)
#
# plot_many_ROC_curves(model, X_f, y, name=name,
# color=cmap[k], ax=ax,
# plot_chance=chance_plot[k],
# title=title, n_splits=n_splits_plot)
# fg.fig.suptitle('{} station: {} total_events, test_events = {}, n_splits = {}'.format(station.upper(), events, int(events* test_size/100), nsplits))
# fg.fig.tight_layout()
# fg.fig.subplots_adjust(top=0.937,
# bottom=0.054,
# left=0.039,
# right=0.993,
# hspace=0.173,
# wspace=0.051)
# if save:
# plt.savefig(savefig_path / 'try.png', bbox_inches='tight')
# return fg
def order_features_list(flist):
""" order the feature list in load_ML_run_results
so i don't get duplicates"""
import pandas as pd
import numpy as np
# first get all features:
li = [x.split('+') for x in flist]
flat_list = [item for sublist in li for item in sublist]
f = list(set(flat_list))
nums = np.arange(1, len(f)+1)
# now assagin a number for each entry:
inds = []
for x in flist:
for fe, num in zip(f, nums):
x = x.replace(fe, str(10**num))
inds.append(eval(x))
ser = pd.Series(inds)
ser.index = flist
ser1 = ser.drop_duplicates()
di = dict(zip(ser1.values, ser1.index))
new_flist = []
for ind, feat in zip(inds, flist):
new_flist.append(di.get(ind))
return new_flist
def smart_add_dataarray_to_ds_list(dsl, da_name='feature_importances'):
"""add data array to ds_list even if it does not exist, use shape of
data array that exists in other part of ds list"""
import numpy as np
import xarray as xr
# print(da_name)
fi = [x for x in dsl if da_name in x][0]
print(da_name, fi[da_name].shape)
fi = fi[da_name].copy(data=np.zeros(shape=fi[da_name].shape))
new_dsl = []
for ds in dsl:
if da_name not in ds:
ds = xr.merge([ds, fi], combine_attrs='no_conflicts')
new_dsl.append(ds)
return new_dsl
def load_ML_run_results(path=hydro_ml_path, prefix='CVR',
change_DOY_to_doy=True):
from aux_gps import path_glob
import xarray as xr
# from aux_gps import save_ncfile
import pandas as pd
import numpy as np
print('loading hydro ML results for all models and features')
# print('loading hydro ML results for station {}'.format(pw_station))
model_files = path_glob(path, '{}_*.nc'.format(prefix))
model_files = sorted(model_files)
# model_files = [x for x in model_files if pw_station in x.as_posix()]
ds_list = [xr.load_dataset(x) for x in model_files]
if change_DOY_to_doy:
for ds in ds_list:
if 'DOY' in ds.features:
new_feats = [x.replace('DOY', 'doy') for x in ds['feature'].values]
ds['feature'] = new_feats
ds.attrs['features'] = [x.replace('DOY', 'doy') for x in ds.attrs['features']]
model_as_str = [x.as_posix().split('/')[-1].split('.')[0]
for x in model_files]
model_names = [x.split('_')[1] for x in model_as_str]
model_scores = [x.split('_')[3] for x in model_as_str]
model_features = [x.split('_')[2] for x in model_as_str]
if change_DOY_to_doy:
model_features = [x.replace('DOY', 'doy') for x in model_features]
new_model_features = order_features_list(model_features)
ind = pd.MultiIndex.from_arrays(
[model_names,
new_model_features,
model_scores],
names=(
'model',
'features',
'scoring'))
# ind1 = pd.MultiIndex.from_product([model_names, model_scores, model_features], names=[
# 'model', 'scoring', 'feature'])
# ds_list = [x[data_vars] for x in ds_list]
# complete non-existant fields like best and fi for all ds:
data_vars = [x for x in ds_list[0] if x.startswith('test')]
# data_vars += ['AUC', 'TPR']
data_vars += [x for x in ds_list[0] if x.startswith('y_')]
bests = [[x for x in y if x.startswith('best')] for y in ds_list]
data_vars += list(set([y for x in bests for y in x]))
if 'RF' in model_names:
data_vars += ['feature_importances']
new_ds_list = []
for dvar in data_vars:
ds_list = smart_add_dataarray_to_ds_list(ds_list, dvar)
# # check if all data vars are in each ds and merge them:
new_ds_list = [xr.merge([y[x] for x in data_vars if x in y],
combine_attrs='no_conflicts') for y in ds_list]
# concat all
dss = xr.concat(new_ds_list, dim='dim_0')
dss['dim_0'] = ind
dss = dss.unstack('dim_0')
# dss.attrs['pwv_id'] = pw_station
# fix roc_auc to roc-auc in dss datavars
dss = dss.rename_vars({'test_roc_auc': 'test_roc-auc'})
# dss['test_roc_auc'].name = 'test_roc-auc'
print('calculating ROC, PR metrics.')
dss = calculate_metrics_from_ML_dss(dss)
print('Done!')
return dss
def plot_nested_CV_test_scores(dss, feats=None, fontsize=16,
save=True, wv_label='pwv'):
import seaborn as sns
import matplotlib.pyplot as plt
from aux_gps import convert_da_to_long_form_df
import numpy as np
import xarray as xr
def change_width(ax, new_value) :
for patch in ax.patches :
current_width = patch.get_width()
diff = current_width - new_value
# we change the bar width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
def show_values_on_bars(axs, fs=12, fw='bold', exclude_bar_num=None):
import numpy as np
def _show_on_single_plot(ax, exclude_bar_num=3):
for i, p in enumerate(ax.patches):
if i != exclude_bar_num and exclude_bar_num is not None:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.2f}'.format(p.get_height())
ax.text(_x, _y, value, ha="right",
fontsize=fs, fontweight=fw, zorder=20)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax, exclude_bar_num)
else:
_show_on_single_plot(axs, exclude_bar_num)
splits = dss['outer_split'].size
try:
assert 'best' in dss.attrs['comment']
best = True
except AssertionError:
best = False
except KeyError:
best = False
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.sortby('model', ascending=False)
dss = dss.reindex(scorer=scorer_order)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst['test_score'].to_dataframe()
# df['scorer'] = df.index.get_level_values(3)
# df['model'] = df.index.get_level_values(0)
# df['features'] = df.index.get_level_values(1)
# df['outer_splits'] = df.index.get_level_values(2)
# df['model'] = df['model'].str.replace('SVC', 'SVM')
# df = df.melt(value_vars='test_score', id_vars=[
# 'features', 'model', 'scorer', 'outer_splits'], var_name='test_score',
# value_name='score')
da = dst['test_score']
if len(feats) == 5:
da_empty = da.isel(features=0).copy(
data=np.zeros(da.isel(features=0).shape))
da_empty['features'] = 'empty'
da = xr.concat([da, da_empty], 'features')
da = da.reindex(features=['doy', 'pressure', 'pwv',
'empty', 'pwv+pressure', 'pwv+pressure+doy'])
da.name = 'feature groups'
df = convert_da_to_long_form_df(da, value_name='score',
var_name='feature groups')
sns.set(font_scale=1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
cmap = sns.color_palette('tab10', n_colors=len(feats))
if len(feats) == 5:
cmap = ['tab:purple', 'tab:brown', 'tab:blue', 'tab:blue',
'tab:orange', 'tab:green']
fg = sns.FacetGrid(data=df, row='model', col='scorer', height=4, aspect=0.9)
# fg.map_dataframe(sns.stripplot, x="test_score", y="score", hue="features",
# data=df, dodge=True, alpha=1, zorder=1, palette=cmap)
# fg.map_dataframe(sns.pointplot, x="test_score", y="score", hue="features",
# data=df, dodge=True, join=False, palette=cmap,
# markers="o", scale=.75, ci=None)
fg.map_dataframe(sns.barplot, x='feature groups', y="score", hue='features',
ci='sd', capsize=None, errwidth=2, errcolor='k',
palette=cmap, dodge=True)
# g = sns.catplot(x='test_score', y="score", hue='features',
# col="scorer", row='model', ci='sd',
# data=df, kind="bar", capsize=0.25,
# height=4, aspect=1.5, errwidth=1.5)
#fg.set_xticklabels(rotation=45)
# fg.set_yticklabels([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=fontsize)
fg.set_ylabels('score')
[x.grid(True) for x in fg.axes.flatten()]
handles, labels = fg.axes[0, 0].get_legend_handles_labels()
if len(feats) == 5:
del handles[3]
del labels[3]
show_values_on_bars(fg.axes, fs=fontsize-4, exclude_bar_num=3)
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
if model == 'SVC':
model = 'SVM'
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
scorer = dss['scorer'].isel(scorer=j).item()
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
ax.set_xlabel('')
ax.set_ylim(0, 1)
change_width(ax, 0.110)
fg.set_xlabels(' ')
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=len(feats), fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92)
if save:
if best:
filename = 'ML_scores_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'ML_scores_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_holdout_test_scores(dss, feats='pwv+pressure+doy'):
import seaborn as sns
import matplotlib.pyplot as plt
def show_values_on_bars(axs, fs=12, fw='bold'):
import numpy as np
def _show_on_single_plot(ax):
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.2f}'.format(p.get_height())
ax.text(_x, _y, value, ha="center", fontsize=fs, fontweight=fw)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dst = dss.sel(features=feats) # .reset_coords(drop=True)
df = dst['holdout_test_scores'].to_dataframe()
df['scorer'] = df.index.droplevel(1).droplevel(0)
df['model'] = df.index.droplevel(2).droplevel(1)
df['features'] = df.index.droplevel(2).droplevel(0)
df['model'] = df['model'].str.replace('SVC', 'SVM')
df = df.melt(value_vars='holdout_test_scores', id_vars=[
'features', 'model', 'scorer'], var_name='test_score')
sns.set(font_scale=1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
g = sns.catplot(x="model", y="value", hue='features',
col="scorer", ci='sd', row=None,
col_wrap=3,
data=df, kind="bar", capsize=0.15,
height=4, aspect=1.5, errwidth=0.8)
g.set_xticklabels(rotation=45)
[x.grid(True) for x in g.axes.flatten()]
show_values_on_bars(g.axes)
filename = 'ML_scores_models_holdout_{}.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def prepare_test_df_to_barplot_from_dss(dss, feats='doy+pwv+pressure',
plot=True, splitfigs=True):
import seaborn as sns
import matplotlib.pyplot as plt
dvars = [x for x in dss if 'test_' in x]
scores = [x.split('_')[-1] for x in dvars]
dst = dss[dvars]
# dst['scoring'] = [x+'_inner' for x in dst['scoring'].values]
# for i, ds in enumerate(dst):
# dst[ds] = dst[ds].sel(scoring=scores[i]).reset_coords(drop=True)
if feats is None:
feats = ['pwv', 'pressure+pwv', 'doy+pressure+pwv']
dst = dst.sel(features=feats) # .reset_coords(drop=True)
dst = dst.rename_vars(dict(zip(dvars, scores)))
# dst = dst.drop('scoring')
df = dst.to_dataframe()
# dfu = df
df['inner score'] = df.index.droplevel(2).droplevel(1).droplevel(0)
df['features'] = df.index.droplevel(2).droplevel(2).droplevel(1)
df['model'] = df.index.droplevel(2).droplevel(0).droplevel(1)
df = df.melt(value_vars=scores, id_vars=[
'features', 'model', 'inner score'], var_name='outer score')
# return dfu
# dfu.columns = dfu.columns.droplevel(1)
# dfu = dfu.T
# dfu['score'] = dfu.index
# dfu = dfu.reset_index()
# df = dfu.melt(value_vars=['MLP', 'RF', 'SVC'], id_vars=['score'])
df1 = df[(df['inner score']=='f1') | (df['inner score']=='precision') | (df['inner score']=='recall')]
df2 = df[(df['inner score']=='hss') | (df['inner score']=='tss') | (df['inner score']=='roc-auc') | (df['inner score']=='accuracy')]
if plot:
sns.set(font_scale = 1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
if splitfigs:
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df1, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}_1.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df2, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}_2.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
else:
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def calculate_metrics_from_ML_dss(dss):
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
import xarray as xr
import numpy as np
import pandas as pd
mean_fpr = np.linspace(0, 1, 100)
# fpr = dss['y_true'].copy(deep=False).values
# tpr = dss['y_true'].copy(deep=False).values
# y_true = dss['y_true'].values
# y_prob = dss['y_prob'].values
ok = [x for x in dss['outer_kfold'].values]
ik = [x for x in dss['inner_kfold'].values]
m = [x for x in dss['model'].values]
sc = [x for x in dss['scoring'].values]
f = [x for x in dss['features'].values]
# r = [x for x in dss['neg_pos_ratio'].values]
ind = pd.MultiIndex.from_product(
[ok, ik, m, sc, f],
names=[
'outer_kfold',
'inner_kfold',
'model',
'scoring',
'features']) # , 'station'])
okn = [x for x in range(dss['outer_kfold'].size)]
ikn = [x for x in range(dss['inner_kfold'].size)]
mn = [x for x in range(dss['model'].size)]
scn = [x for x in range(dss['scoring'].size)]
fn = [x for x in range(dss['features'].size)]
ds_list = []
for i in okn:
for j in ikn:
for k in mn:
for n in scn:
for m in fn:
ds = xr.Dataset()
y_true = dss['y_true'].isel(
outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze()
y_prob = dss['y_prob'].isel(
outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze()
y_true = y_true.dropna('sample')
y_prob = y_prob.dropna('sample')
if y_prob.size == 0:
# in case of NaNs in the results:
fpr_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
fpr_da['sample'] = [
x for x in range(fpr_da.size)]
tpr_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
tpr_da['sample'] = [
x for x in range(tpr_da.size)]
prn_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
prn_da['sample'] = [
x for x in range(prn_da.size)]
rcll_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
rcll_da['sample'] = [
x for x in range(rcll_da.size)]
tpr_fpr = xr.DataArray(
np.nan*np.ones((100)), dims=['FPR'])
tpr_fpr['FPR'] = mean_fpr
prn_rcll = xr.DataArray(
np.nan*np.ones((100)), dims=['RCLL'])
prn_rcll['RCLL'] = mean_fpr
pr_auc_da = xr.DataArray(np.nan)
roc_auc_da = xr.DataArray(np.nan)
no_skill_da = xr.DataArray(np.nan)
else:
no_skill = len(
y_true[y_true == 1]) / len(y_true)
no_skill_da = xr.DataArray(no_skill)
fpr, tpr, _ = roc_curve(y_true, y_prob)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_true, y_prob)
prn, rcll, _ = precision_recall_curve(
y_true, y_prob)
interp_prn = np.interp(
mean_fpr, rcll[::-1], prn[::-1])
interp_prn[0] = 1.0
pr_auc_score = auc(rcll, prn)
roc_auc_da = xr.DataArray(roc_auc)
pr_auc_da = xr.DataArray(pr_auc_score)
prn_da = xr.DataArray(prn, dims=['sample'])
prn_da['sample'] = [x for x in range(len(prn))]
rcll_da = xr.DataArray(rcll, dims=['sample'])
rcll_da['sample'] = [
x for x in range(len(rcll))]
fpr_da = xr.DataArray(fpr, dims=['sample'])
fpr_da['sample'] = [x for x in range(len(fpr))]
tpr_da = xr.DataArray(tpr, dims=['sample'])
tpr_da['sample'] = [x for x in range(len(tpr))]
tpr_fpr = xr.DataArray(
interp_tpr, dims=['FPR'])
tpr_fpr['FPR'] = mean_fpr
prn_rcll = xr.DataArray(
interp_prn, dims=['RCLL'])
prn_rcll['RCLL'] = mean_fpr
ds['fpr'] = fpr_da
ds['tpr'] = tpr_da
ds['roc-auc'] = roc_auc_da
ds['pr-auc'] = pr_auc_da
ds['prn'] = prn_da
ds['rcll'] = rcll_da
ds['TPR'] = tpr_fpr
ds['PRN'] = prn_rcll
ds['no_skill'] = no_skill_da
ds_list.append(ds)
ds = xr.concat(ds_list, 'dim_0')
ds['dim_0'] = ind
ds = ds.unstack()
ds.attrs = dss.attrs
ds['fpr'].attrs['long_name'] = 'False positive rate'
ds['tpr'].attrs['long_name'] = 'True positive rate'
ds['prn'].attrs['long_name'] = 'Precision'
ds['rcll'].attrs['long_name'] = 'Recall'
ds['roc-auc'].attrs['long_name'] = 'ROC or FPR-TPR Area under curve'
ds['pr-auc'].attrs['long_name'] = 'Precition-Recall Area under curve'
ds['PRN'].attrs['long_name'] = 'Precision-Recall'
ds['TPR'].attrs['long_name'] = 'TPR-FPR (ROC)'
dss = xr.merge([dss, ds], combine_attrs='no_conflicts')
return dss
#
# def load_ML_models(path=hydro_ml_path, station='drag', prefix='CVM', suffix='.pkl'):
# from aux_gps import path_glob
# import joblib
# import matplotlib.pyplot as plt
# import seaborn as sns
# import xarray as xr
# import pandas as pd
# model_files = path_glob(path, '{}_*{}'.format(prefix, suffix))
# model_files = sorted(model_files)
# model_files = [x for x in model_files if station in x.as_posix()]
# m_list = [joblib.load(x) for x in model_files]
# model_files = [x.as_posix().split('/')[-1].split('.')[0] for x in model_files]
# # fix roc-auc:
# model_files = [x.replace('roc_auc', 'roc-auc') for x in model_files]
# print('loading {} station only.'.format(station))
# model_names = [x.split('_')[3] for x in model_files]
## model_pw_stations = [x.split('_')[1] for x in model_files]
## model_hydro_stations = [x.split('_')[2] for x in model_files]
# model_nsplits = [x.split('_')[6] for x in model_files]
# model_scores = [x.split('_')[5] for x in model_files]
# model_features = [x.split('_')[4] for x in model_files]
# model_test_sizes = []
# for file in model_files:
# try:
# model_test_sizes.append(int(file.split('_')[7]))
# except IndexError:
# model_test_sizes.append(20)
## model_pwv_hs_id = list(zip(model_pw_stations, model_hydro_stations))
## model_pwv_hs_id = ['_'.join(x) for filename = 'CVR_{}_{}_{}_{}_{}.nc'.format(
# name, features, refitted_scorer, ikfolds, okfolds)
# x in model_pwv_hs_id]
# # transform model_dict to dataarray:
# tups = [tuple(x) for x in zip(model_names, model_scores, model_nsplits, model_features, model_test_sizes)] #, model_pwv_hs_id)]
# ind = pd.MultiIndex.from_tuples((tups), names=['model', 'scoring', 'splits', 'feature', 'test_size']) #, 'station'])
# da = xr.DataArray(m_list, dims='dim_0')
# da['dim_0'] = ind
# da = da.unstack('dim_0')
# da['splits'] = da['splits'].astype(int)
# da['test_size'].attrs['units'] = '%'
# return da
def plot_heatmaps_for_all_models_and_scorings(dss, var='roc-auc'): # , save=True):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
# assert station == dss.attrs['pwv_id']
cmaps = {'roc-auc': sns.color_palette("Blues", as_cmap=True),
'pr-auc': sns.color_palette("Greens", as_cmap=True)}
fg = xr.plot.FacetGrid(
dss,
col='model',
row='scoring',
sharex=True,
sharey=True, figsize=(10, 20))
dss = dss.mean('inner_kfold', keep_attrs=True)
vmin, vmax = dss[var].min(), 1
norm = plt.Normalize(vmin=vmin, vmax=vmax)
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
modelname = dss['model'].isel(model=j).item()
scoring = dss['scoring'].isel(scoring=i).item()
model = dss[var].isel(
{'model': j, 'scoring': i}).reset_coords(drop=True)
df = model.to_dataframe()
title = '{} model ({})'.format(modelname, scoring)
df = df.unstack()
mean = df.mean()
mean.name = 'mean'
df = df.append(mean).T.droplevel(0)
ax = sns.heatmap(df, annot=True, cmap=cmaps[var], cbar=False,
ax=ax, norm=norm)
ax.set_title(title)
ax.vlines([4], 0, 10, color='r', linewidth=2)
if j > 0:
ax.set_ylabel('')
if i < 2:
ax.set_xlabel('')
cax = fg.fig.add_axes([0.1, 0.025, .8, .015])
fg.fig.colorbar(ax.get_children()[0], cax=cax, orientation="horizontal")
fg.fig.suptitle('{}'.format(
dss.attrs[var].upper()), fontweight='bold')
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.937,
bottom=0.099,
left=0.169,
right=0.993,
hspace=0.173,
wspace=0.051)
# if save:
# filename = 'hydro_models_heatmaps_on_{}_{}_{}.png'.format(
# station, dss['outer_kfold'].size, var)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_ROC_from_dss(dss, feats=None, fontsize=16, save=True, wv_label='pwv',
best=False):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from aux_gps import convert_da_to_long_form_df
sns.set_style('whitegrid')
sns.set_style('ticks')
sns.set(font_scale=1.0)
cmap = sns.color_palette('tab10', n_colors=3)
splits = dss['outer_split'].size
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
dss = dss.reindex(scorer=scorer_order)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.sortby('model', ascending=False)
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst['TPR'].to_dataframe()
# if 'neg_sample' in dss.dims:
# fpr_lnum = 5
# model_lnum = 0
# scorer_lnum = 4
# features_lnum = 1
# else:
# fpr_lnum = 4
# model_lnum = 0
# scorer_lnum = 3
# features_lnum = 1
# df['FPR'] = df.index.get_level_values(fpr_lnum)
# df['model'] = df.index.get_level_values(model_lnum)
# df['scorer'] = df.index.get_level_values(scorer_lnum)
# df['features'] = df.index.get_level_values(features_lnum)
df = convert_da_to_long_form_df(dst['TPR'], var_name='score')
# df = df.melt(value_vars='TPR', id_vars=[
# 'features', 'model', 'scorer', 'FPR'], var_name='score')
if best is not None:
if best == 'compare_negs':
df1 = df.copy()[df['neg_sample'] == 1]
df2 = df.copy()
df2.drop('neg_sample', axis=1, inplace=True)
df1.drop('neg_sample', axis=1, inplace=True)
df1['neg_group'] = 1
df2['neg_group'] = 25
df = pd.concat([df1, df2])
col = 'neg_group'
titles = ['Neg=1', 'Neg=25']
else:
col=None
else:
col = 'scorer'
df['model'] = df['model'].str.replace('SVC', 'SVM')
fg = sns.FacetGrid(df, col=col, row='model', aspect=1)
fg.map_dataframe(sns.lineplot, x='FPR', y='value',
hue='features', ci='sd', palette=cmap, n_boot=None,
estimator='mean')
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
auc_model = dst.sel(model=model)
if model == 'SVC':
model = 'SVM'
for j in range(fg.axes.shape[1]): # j is cols
scorer = dss['scorer'].isel(scorer=j).item()
auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).reset_coords(drop=True).to_dataframe()
auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats]
auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats]
auc_mean = [x.item() for x in auc_scorer_mean]
auc_std = [x.item() for x in auc_scorer_std]
if j == 0 and best is not None:
scorer = dss['scorer'].isel(scorer=j).item()
auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).isel(neg_sample=0).reset_coords(drop=True).to_dataframe()
auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats]
auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats]
auc_mean = [x.item() for x in auc_scorer_mean]
auc_std = [x.item() for x in auc_scorer_std]
ax = fg.axes[i, j]
ax.plot([0, 1], [0, 1], color='tab:red', linestyle='--', lw=2,
label='chance')
if best is not None:
if best == 'compare_negs':
title = '{} | {}'.format(model, titles[j])
else:
title = '{}'.format(model)
else:
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
handles, labels = ax.get_legend_handles_labels()
hands = handles[0:3]
# labes = labels[0:3]
new_labes = []
for auc, auc_sd in zip(auc_mean, auc_std):
l = r'{:.2}$\pm${:.1}'.format(auc, auc_sd)
new_labes.append(l)
ax.legend(handles=hands, labels=new_labes, loc='lower right',
title='AUCs', prop={'size': fontsize-4})
ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])
ax.grid(True)
# return handles, labels
fg.set_ylabels('True Positive Rate', fontsize=fontsize)
fg.set_xlabels('False Positive Rate', fontsize=fontsize)
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
if best is not None:
if best == 'compare_negs':
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize},
edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=2, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.865,
bottom=0.079,
left=0.144,
right=0.933,
hspace=0.176,
wspace=0.2)
else:
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize},
edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=1, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.825,
bottom=0.079,
left=0.184,
right=0.933,
hspace=0.176,
wspace=0.2)
else:
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.915)
if save:
if best is not None:
filename = 'ROC_plots_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'ROC_plots_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_permutation_importances_from_dss(dss, feat_dim='features',
outer_dim='outer_split',
features='pwv+pressure+doy',
fix_xticklabels=True,split=1,
axes=None, save=True):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
sns.set_palette('Dark2', 6)
sns.set_style('whitegrid')
sns.set_style('ticks')
model = dss.attrs['model']
# use dss.sel(model='RF') first as input
dss['feature'] = dss['feature'].str.replace('DOY', 'doy')
dss = dss.sel({feat_dim: features})
# tests_ds = dss['test_score']
# tests_ds = tests_ds.sel(scorer=scorer)
# max_score_split = int(tests_ds.idxmax(outer_dim).item())
# use mean outer split:
# dss = dss.mean(outer_dim)
dss = dss.sel({outer_dim: split})
feats = features.split('+')
fn = len(feats)
if fn == 1:
gr_spec = None
fix_xticklabels = False
elif fn == 2:
gr_spec = [1, 1]
elif fn == 3:
gr_spec = [2, 5, 5]
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(sorted(feats)):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['PI_mean'].sel(
feature=fe).reset_coords(
drop=True)
sorted_feat = natsorted([x for x in dsf.feature.values])
dsf = dsf.reindex(feature=sorted_feat)
print([x for x in dsf.feature.values])
# dsf = dss['PI_mean'].sel(
# feature=fe).reset_coords(
# drop=True)
dsf = dsf.to_dataset('scorer').to_dataframe(
).reset_index(drop=True)
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f})'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 10}, loc='upper left')
axes[i].set_ylabel('Scores')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
n = sum(['pwv' in x for x in dss.feature.values])
axes[0].xaxis.set_ticklabels('')
hrs = np.arange(-24, -24+n)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[2].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[1].set_xlabel('Hours prior to flood')
axes[2].set_xlabel('Hours prior to flood')
fig.tight_layout()
fig.suptitle('permutation importance scores for {} model split #{}'.format(model, split))
fig.subplots_adjust(top=0.904)
if save:
filename = 'permutation_importances_{}_split_{}_all_scorers_{}.png'.format(model, split, features)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances_from_dss(
dss,
feat_dim='features', outer_dim='outer_split',
features='pwv+pressure+doy', fix_xticklabels=True,
axes=None, save=True, ylim=[0, 12], fontsize=16):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
sns.set_palette('Dark2', 6)
# sns.set_style('whitegrid')
# sns.set_style('ticks')
sns.set_theme(style='ticks', font_scale=1.5)
# use dss.sel(model='RF') first as input
dss['feature'] = dss['feature'].str.replace('DOY', 'doy')
dss = dss.sel({feat_dim: features})
# tests_ds = dss['test_score']
# tests_ds = tests_ds.sel(scorer=scorer)
# max_score_split = int(tests_ds.idxmax(outer_dim).item())
# use mean outer split:
dss = dss.mean(outer_dim)
feats = features.split('+')
fn = len(feats)
if fn == 1:
gr_spec = None
fix_xticklabels = False
elif fn == 2:
gr_spec = [1, 1]
elif fn == 3:
gr_spec = [5, 5, 2]
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(feats):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['feature_importances'].sel(
feature=fe).reset_coords(
drop=True)
# dsf = dss['PI_mean'].sel(
# feature=fe).reset_coords(
# drop=True)
sorted_feat = natsorted([x for x in dsf.feature.values])
# sorted_feat = [x for x in dsf.feature.values]
print(sorted_feat)
dsf = dsf.reindex(feature=sorted_feat)
dsf = dsf.to_dataset('scorer').to_dataframe(
).reset_index(drop=True) * 100
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
axes[i].set_title(title, fontsize=fontsize)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 12}, loc='upper center')
axes[i].set_ylabel('Feature importances [%]')
axes[i].grid(axis='y', zorder=1)
if ylim is not None:
[ax.set_ylim(*ylim) for ax in axes]
if fix_xticklabels:
n = sum(['pwv' in x for x in dss.feature.values])
axes[2].xaxis.set_ticklabels('')
hrs = np.arange(-1, -25, -1)
axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14)
axes[2].tick_params(labelsize=fontsize)
axes[0].set_xlabel('Hours prior to flood')
axes[1].set_xlabel('Hours prior to flood')
fig.tight_layout()
if save:
filename = 'RF_feature_importances_all_scorers_{}.png'.format(features)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances(
dss,
feat_dim='features',
features='pwv+pressure+doy',
scoring='f1', fix_xticklabels=True,
axes=None, save=True):
# use dss.sel(model='RF') first as input
import matplotlib.pyplot as plt
import numpy as np
dss = dss.sel({feat_dim: features})
tests_ds = dss[[x for x in dss if 'test' in x]]
tests_ds = tests_ds.sel(scoring=scoring)
score_ds = tests_ds['test_{}'.format(scoring)]
max_score = score_ds.idxmax('outer_kfold').values
feats = features.split('+')
fn = len(feats)
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': [1, 4, 4]})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(feats):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['feature_importances'].sel(
feature=fe,
outer_kfold=max_score).reset_coords(
drop=True)
dsf = dsf.to_dataset('scoring').to_dataframe(
).reset_index(drop=True) * 100
title = '{} ({})'.format(f.upper(), scoring)
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 8})
axes[i].set_ylabel('Feature importance [%]')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
axes[0].xaxis.set_ticklabels('')
hrs = np.arange(-24,0)
axes[1].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12)
axes[2].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12)
axes[1].set_xlabel('Hours prior to flood')
axes[2].set_xlabel('Hours prior to flood')
if save:
fig.tight_layout()
filename = 'RF_feature_importances_{}.png'.format(scoring)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances_for_all_scorings(dss,
features='doy+pwv+pressure',
model='RF', splitfigs=True):
import matplotlib.pyplot as plt
# station = dss.attrs['pwv_id'].upper()
dss = dss.sel(model=model).reset_coords(drop=True)
fns = len(features.split('+'))
scores = dss['scoring'].values
scores1 = ['f1', 'precision', 'recall']
scores2 = ['hss', 'tss', 'accuracy','roc-auc']
if splitfigs:
fig, axes = plt.subplots(len(scores1), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores1):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances_1.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
fig, axes = plt.subplots(len(scores2), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores2):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances_2.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
else:
fig, axes = plt.subplots(len(scores), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return dss
def plot_ROC_curve_from_dss_nested_CV(dss, outer_dim='outer_split',
plot_chance=True, color='tab:blue',
fontsize=14, plot_legend=True,
title=None,
ax=None, main_label=None):
import matplotlib.pyplot as plt
import numpy as np
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
mean_fpr = dss['FPR'].values
mean_tpr = dss['TPR'].mean(outer_dim).values
mean_auc = dss['roc_auc_score'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['roc_auc_score'].std().item()
field = 'TPR'
xlabel = 'False Positive Rate'
ylabel = 'True Positive Rate'
if main_label is None:
main_label = r'Mean ROC (AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)
textstr = '\n'.join(['{}'.format(
main_label), r'(AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)])
main_label = textstr
ax.plot(mean_fpr, mean_tpr, color=color,
lw=3, alpha=.8, label=main_label)
std_tpr = dss[field].std(outer_dim).values
n = dss[outer_dim].size
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# plot Chance line:
if plot_chance:
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8, zorder=206)
stdlabel = r'$\pm$ 1 Std. dev.'
stdstr = '\n'.join(['{}'.format(stdlabel), r'({} outer splits)'.format(n)])
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color='grey',
alpha=.2, label=stdstr)
ax.grid()
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
# ax.set_title(title, fontsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_title(title, fontsize=fontsize)
return ax
def plot_ROC_PR_curve_from_dss(
dss,
outer_dim='outer_kfold',
inner_dim='inner_kfold',
plot_chance=True,
ax=None,
color='b',
title=None,
std_on='inner',
main_label=None,
fontsize=14,
plot_type='ROC',
plot_std_legend=True):
"""plot classifier metrics, plot_type=ROC or PR"""
import matplotlib.pyplot as plt
import numpy as np
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
if plot_type == 'ROC':
mean_fpr = dss['FPR'].values
mean_tpr = dss['TPR'].mean(outer_dim).mean(inner_dim).values
mean_auc = dss['roc-auc'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['roc-auc'].std().item()
field = 'TPR'
xlabel = 'False Positive Rate'
ylabel = 'True Positive Rate'
elif plot_type == 'PR':
mean_fpr = dss['RCLL'].values
mean_tpr = dss['PRN'].mean(outer_dim).mean(inner_dim).values
mean_auc = dss['pr-auc'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['pr-auc'].std().item()
no_skill = dss['no_skill'].mean(outer_dim).mean(inner_dim).item()
field = 'PRN'
xlabel = 'Recall'
ylabel = 'Precision'
# plot mean ROC:
if main_label is None:
main_label = r'Mean {} (AUC={:.2f}$\pm${:.2f})'.format(
plot_type, mean_auc, std_auc)
else:
textstr = '\n'.join(['Mean ROC {}'.format(
main_label), r'(AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)])
main_label = textstr
ax.plot(mean_fpr, mean_tpr, color=color,
lw=2, alpha=.8, label=main_label)
if std_on == 'inner':
std_tpr = dss[field].mean(outer_dim).std(inner_dim).values
n = dss[inner_dim].size
elif std_on == 'outer':
std_tpr = dss[field].mean(inner_dim).std(outer_dim).values
n = dss[outer_dim].size
elif std_on == 'all':
std_tpr = dss[field].stack(
dumm=[inner_dim, outer_dim]).std('dumm').values
n = dss[outer_dim].size * dss[inner_dim].size
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# plot Chance line:
if plot_chance:
if plot_type == 'ROC':
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
elif plot_type == 'PR':
ax.plot([0, 1], [no_skill, no_skill], linestyle='--', color='r',
lw=2, label='No Skill', alpha=.8)
# plot ROC STD range:
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color='grey',
alpha=.2, label=r'$\pm$ 1 std. dev. ({} {} splits)'.format(n, std_on))
ax.grid()
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
ax.set_title(title, fontsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
# handles, labels = ax.get_legend_handles_labels()
# if not plot_std_legend:
# if len(handles) == 7:
# handles = handles[:-2]
# labels = labels[:-2]
# else:
# handles = handles[:-1]
# labels = labels[:-1]
# ax.legend(handles=handles, labels=labels, loc="lower right",
# fontsize=fontsize)
return ax
def load_cv_splits_from_pkl(savepath):
import joblib
from aux_gps import path_glob
file = path_glob(savepath, 'CV_inds_*.pkl')[0]
n_splits = int(file.as_posix().split('/')[-1].split('_')[2])
shuffle = file.as_posix().split('/')[-1].split('.')[0].split('=')[-1]
cv_dict = joblib.load(file)
spl = len([x for x in cv_dict.keys()])
assert spl == n_splits
print('loaded {} with {} splits.'.format(file, n_splits))
return cv_dict
def save_cv_splits_to_dict(X, y, cv, train_key='train', test_key='test',
savepath=None):
import joblib
cv_dict = {}
for i, (train, test) in enumerate(cv.split(X, y)):
cv_dict[i+1] = {train_key: train, test_key: test}
# check for completness:
all_train = [x['train'] for x in cv_dict.values()]
flat_train = set([item for sublist in all_train for item in sublist])
all_test = [x['test'] for x in cv_dict.values()]
flat_test = set([item for sublist in all_test for item in sublist])
assert flat_test == flat_train
if savepath is not None:
filename = 'CV_inds_{}_splits_shuffle={}.pkl'.format(cv.n_splits, cv.shuffle)
joblib.dump(cv_dict, savepath / filename)
print('saved {} to {}.'.format(filename, savepath))
return cv_dict
def plot_many_ROC_curves(model, X, y, name='', color='b', ax=None,
plot_chance=True, title=None, n_splits=None):
from sklearn.metrics import plot_roc_curve
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import numpy as np
from sklearn.model_selection import StratifiedKFold
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
# just plot the ROC curve for X, y, no nsplits and stats:
if n_splits is None:
viz = plot_roc_curve(model, X, y, color=color, ax=ax, name=name)
else:
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
# y_score = model.fit(X[train], y[train]).predict_proba(X[val])[:, 1]
y_pred = model.predict(X[val])
fpr, tpr, _ = roc_curve(y[val], y_pred)
# viz = plot_roc_curve(model, X[val], y[val],
# name='ROC fold {}'.format(i),
# alpha=0.3, lw=1, ax=ax)
# fpr = viz.fpr
# tpr = viz.tpr
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(y[val], y_pred))
# scores.append(f1_score(y[val], y_pred))
# scores = np.array(scores)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color=color,
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (
mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
if plot_chance:
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title=title)
ax.legend(loc="lower right")
return ax
def HP_tuning(X, y, model_name='SVC', val_size=0.18, n_splits=None,
test_size=None,
best_score='f1', seed=42, savepath=None):
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
""" do HP tuning with ML_Classfier_Switcher object and return a DataSet of
results. note that the X, y are already after split to val/test"""
# first get the features from X:
features = list(set(['_'.join(x.split('_')[0:2])
for x in X['feature'].values]))
ml = ML_Classifier_Switcher()
sk_model = ml.pick_model(model_name)
param_grid = ml.param_grid
if n_splits is None and val_size is not None:
n_splits = int((1 // val_size) - 1)
elif val_size is not None and n_splits is not None:
raise('Both val_size and n_splits are defined, choose either...')
print('StratifiedKfolds of {}.'.format(n_splits))
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
gr = GridSearchCV(estimator=sk_model, param_grid=param_grid, cv=cv,
n_jobs=-1, scoring=['f1', 'roc_auc', 'accuracy'], verbose=1,
refit=best_score, return_train_score=True)
gr.fit(X, y)
if best_score is not None:
ds, best_model = process_gridsearch_results(gr, model_name,
features=features, pwv_id=X.attrs['pwv_id'], hs_id=y.attrs['hydro_station_id'], test_size=test_size)
else:
ds = process_gridsearch_results(gr, model_name, features=features,
pwv_id=X.attrs['pwv_id'], hs_id=y.attrs['hydro_station_id'], test_size=test_size)
best_model = None
if savepath is not None:
save_cv_results(ds, best_model=best_model, savepath=savepath)
return ds, best_model
def save_gridsearchcv_object(GridSearchCV, savepath, filename):
import joblib
print('{} was saved to {}'.format(filename, savepath))
joblib.dump(GridSearchCV, savepath / filename)
return
def run_RF_feature_importance_on_all_features(path=hydro_path, gr_path=hydro_ml_path/'holdout'):
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
feat_list = []
for feat in feats:
da = holdout_test(model_name='RF', return_RF_FI=True, features=feat)
feat_list.append(da)
daa = xr.concat(feat_list, 'features')
daa['features'] = feats
return daa
def load_nested_CV_test_results_from_all_models(path=hydro_ml_path, best=False,
neg=1, splits=4,
permutation=False):
from aux_gps import path_glob
import xarray as xr
if best:
if splits is not None:
file_str = 'nested_CV_test_results_*_all_features_with_hyper_params_best_hp_neg_{}_{}a.nc'.format(neg, splits)
if permutation:
file_str = 'nested_CV_test_results_*_all_features_permutation_tests_best_hp_neg_{}_{}a.nc'.format(neg, splits)
else:
if splits is not None:
file_str = 'nested_CV_test_results_*_all_features_with_hyper_params_neg_{}_{}a.nc'.format(neg, splits)
if permutation:
file_str = 'nested_CV_test_results_*_all_features_permutation_tests_neg_{}_{}a.nc'.format(neg, splits)
files = path_glob(path, file_str)
print(files)
models = [x.as_posix().split('/')[-1].split('_')[4] for x in files]
print('loading CV test results only for {} models'.format(', '.join(models)))
dsl = [xr.load_dataset(x) for x in files]
if not permutation:
dsl = [x[['mean_score', 'std_score', 'test_score', 'roc_auc_score', 'TPR']] for x in dsl]
dss = xr.concat(dsl, 'model')
dss['model'] = models
return dss
# def plot_all_permutation_test_results(dss, feats=None):
# import xarray as xr
# fg = xr.plot.FacetGrid(
# dss,
# col='scorer',
# row='model',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# model = dss['model'].isel(model=i).item()
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# scorer = dss['scorer'].isel(scorer=j).item()
# ax = plot_single_permutation_test_result(dss, feats=feats,
# scorer=scorer,
# model=model,
# ax=ax)
# fg.fig.tight_layout()
# return fg
def plot_permutation_test_results_from_dss(dss, feats=None, fontsize=14,
save=True, wv_label='pwv'):
# ax=None, scorer='f1', model='MLP'):
import matplotlib.pyplot as plt
import seaborn as sns
from PW_from_gps_figures import get_legend_labels_handles_title_seaborn_histplot
from aux_gps import convert_da_to_long_form_df
sns.set_style('whitegrid')
sns.set_style('ticks')
try:
splits = dss['outer_split'].size
except KeyError:
splits = 5
try:
assert 'best' in dss.attrs['comment']
best = True
except AssertionError:
best = False
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.reindex(scorer=scorer_order)
# dss = dss.mean('outer_split')
cmap = sns.color_palette('tab10', n_colors=3)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dss = dss.sortby('model', ascending=False)
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst[['permutation_score', 'true_score', 'pvalue']].to_dataframe()
# df['permutations'] = df.index.get_level_values(2)
# df['scorer'] = df.index.get_level_values(3)
# df['features'] = df.index.get_level_values(0)
# df['model'] = df.index.get_level_values(1)
# df['model'] = df['model'].str.replace('SVC', 'SVM')
# df = df.melt(value_vars=['permutation_score', 'true_score', 'pvalue'], id_vars=[
# 'features', 'model', 'scorer'], var_name='scores')
df = convert_da_to_long_form_df(dst[['permutation_score', 'true_score', 'pvalue']], var_name='scores')
df_p = df[df['scores'] == 'permutation_score']
df_pval = df[df['scores'] == 'pvalue']
# if ax is None:
# fig, ax = plt.subplots(figsize=(6, 8))
fg = sns.FacetGrid(df_p, col='scorer', row='model', legend_out=True,
sharex=False)
fg.map_dataframe(sns.histplot, x="value", hue="features",
legend=True, palette=cmap,
stat='density', kde=True,
element='bars', bins=10)
# pvals = dst.sel(scorer=scorer, model=model)[
# 'pvalue'].reset_coords(drop=True)
# pvals = pvals.values
# handles, labels, title = get_legend_labels_handles_title_seaborn_histplot(ax)
# new_labels = []
# for pval, label in zip(pvals, labels):
# label += ' (p={:.1})'.format(pval)
# new_labels.append(label)
# ax.legend(handles, new_labels, title=title)
df_t = df[df['scores'] == 'true_score']
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
df_model = df_t[df_t['model'] == model]
df_pval_model = df_pval[df_pval['model'] == model]
for j in range(fg.axes.shape[1]): # j is cols
scorer = dss['scorer'].isel(scorer=j).item()
df1 = df_model[df_model['scorer'] == scorer]
df2 = df_pval_model[df_pval_model['scorer'] == scorer]
ax = fg.axes[i, j]
ymax = ax.get_ylim()[-1] - 0.2
plabels = []
for k, feat in enumerate(feats):
val = df1[df1['features']==feat]['value'].unique().item()
pval = df2[df2['features']==feat]['value'].unique().item()
plabels.append('pvalue: {:.2g}'.format(pval))
# print(i, val, feat, scorer, model)
ax.axvline(x=val, ymin=0, ymax=ymax, linestyle='--', color=cmap[k],
label=feat)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles, labels=plabels,
prop={'size': fontsize-4}, loc='upper left')
if 'hss' in scorer or 'tss' in scorer:
ax.set_xlim(-0.35, 1)
else:
ax.set_xlim(0.15, 1)
# ax.set_xticks([0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1])
# handles, labels, title = get_legend_labels_handles_title_seaborn_histplot(ax)
if model == 'SVC':
model = 'SVM'
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
# ax.set_xlim(-0.3, 1)
fg.set_ylabels('Density', fontsize=fontsize)
fg.set_xlabels('Score', fontsize=fontsize)
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92)
if save:
if best:
filename = 'permutation_test_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'permutation_test_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def run_CV_nested_tests_on_all_features(path=hydro_path, gr_path=hydro_ml_path/'nested4',
verbose=False, model_name='SVC', params=None,
savepath=None, drop_hours=None, PI=30, Ptest=None,
suffix=None, sample_from_negatives=1):
"""returns the nested CV test results for all scorers, features and models,
if model is chosen, i.e., model='MLP', returns just this model results
and its hyper-parameters per each outer split"""
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
from aux_gps import save_ncfile
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
feat_list = []
for feat in feats:
print('Running CV on feature {}'.format(feat))
ds = CV_test_after_GridSearchCV(path=path, gr_path=gr_path,
model_name=model_name, params=params,
features=feat, PI=PI, Ptest=Ptest,
verbose=verbose, drop_hours=drop_hours,
sample_from_negatives=sample_from_negatives)
feat_list.append(ds)
dsf = xr.concat(feat_list, 'features')
dsf['features'] = feats
dss = dsf
dss.attrs['model'] = model_name
if Ptest is not None:
filename = 'nested_CV_test_results_{}_all_features_permutation_tests'.format(model_name)
else:
filename = 'nested_CV_test_results_{}_all_features_with_hyper_params'.format(model_name)
if params is not None:
dss.attrs['comment'] = 'using best hyper parameters for all features and outer splits'
filename += '_best_hp'
filename += '_neg_{}'.format(sample_from_negatives)
if suffix is not None:
filename += '_{}'.format(suffix)
filename += '.nc'
if savepath is not None:
save_ncfile(dss, savepath, filename)
return dss
def run_holdout_test_on_all_models_and_features(path=hydro_path, gr_path=hydro_ml_path/'holdout'):
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
models = ['MLP', 'SVC', 'RF']
model_list = []
model_list2 = []
for model in models:
feat_list = []
feat_list2 = []
for feat in feats:
best, roc = holdout_test(path=path, gr_path=gr_path,
model_name=model, features=feat)
best.index.name = 'scorer'
ds = best[['mean_score', 'std_score', 'holdout_test_scores']].to_xarray()
roc.index.name = 'FPR'
roc_da = roc.to_xarray().to_array('scorer')
feat_list.append(ds)
feat_list2.append(roc_da)
dsf = xr.concat(feat_list, 'features')
dsf2 = xr.concat(feat_list2, 'features')
dsf['features'] = feats
dsf2['features'] = feats
model_list.append(dsf)
model_list2.append(dsf2)
dss = xr.concat(model_list, 'model')
rocs = xr.concat(model_list2, 'model')
dss['model'] = models
rocs['model'] = models
dss['roc'] = rocs
return dss
def prepare_X_y_for_holdout_test(features='pwv+doy', model_name='SVC',
path=hydro_path, drop_hours=None,
negative_samples=1):
# combine X,y and split them according to test ratio and seed:
X, y = combine_pos_neg_from_nc_file(path, negative_sample_num=negative_samples)
# re arange X features according to model:
feats = features.split('+')
if model_name == 'RF' and 'doy' in feats:
if isinstance(feats, list):
feats.remove('doy')
feats.append('DOY')
elif isinstance(feats, str):
feats = 'DOY'
elif model_name != 'RF' and 'doy' in feats:
if isinstance(feats, list):
feats.remove('doy')
feats.append('doy_sin')
feats.append('doy_cos')
elif isinstance(feats, str):
feats = ['doy_sin']
feats.append('doy_cos')
if isinstance(X, list):
Xs = []
for X1 in X:
Xs.append(select_features_from_X(X1, feats))
X = Xs
else:
X = select_features_from_X(X, feats)
if drop_hours is not None:
if isinstance(X, list):
Xs = []
for X1 in X:
Xs.append(drop_hours_in_pwv_pressure_features(X1, drop_hours,
verbose=True))
X = Xs
else:
X = drop_hours_in_pwv_pressure_features(X, drop_hours, verbose=True)
return X, y
def CV_test_after_GridSearchCV(path=hydro_path, gr_path=hydro_ml_path/'nested4',
model_name='SVC', features='pwv', params=None,
verbose=False, drop_hours=None, PI=None,
Ptest=None, sample_from_negatives=1):
"""do cross_validate with all scorers on all gridsearchcv folds,
reads the nested outer splits CV file in gr_path"""
import xarray as xr
import numpy as np
# cv = read_cv_params_and_instantiate(gr_path/'CV_outer.csv')
cv_dict = load_cv_splits_from_pkl(gr_path)
if verbose:
print(cv_dict)
param_df_dict = load_one_gridsearchcv_object(path=gr_path,
cv_type='nested',
features=features,
model_name=model_name,
verbose=verbose)
Xs, ys = prepare_X_y_for_holdout_test(features, model_name, path,
drop_hours=drop_hours,
negative_samples=sample_from_negatives)
bests = []
for i, negative_sample in enumerate(np.arange(1, sample_from_negatives + 1)):
print('running with negative sample #{} out of {}'.format(
negative_sample, sample_from_negatives))
if isinstance(Xs, list):
X = Xs[i]
y = ys[i]
else:
X = Xs
y = ys
if Ptest is not None:
print('Permutation Test is in progress!')
ds = run_permutation_classifier_test(X, y, 5, param_df_dict, Ptest=Ptest,
params=params,
model_name=model_name, verbose=verbose)
return ds
if params is not None:
if verbose:
print('running with custom hyper parameters: ', params)
outer_bests = []
outer_rocs = []
fis = []
pi_means = []
pi_stds = []
n_splits = len([x for x in cv_dict.keys()])
for split, tt in cv_dict.items():
X_train = X[tt['train']]
y_train = y[tt['train']]
X_test = X[tt['test']]
y_test = y[tt['test']]
outer_split = '{}-{}'.format(split, n_splits)
# for i, (train_index, test_index) in enumerate(cv.split(X, y)):
# X_train = X[train_index]
# y_train = y[train_index]
# X_test = X[test_index]
# y_test = y[test_index]
# outer_split = '{}-{}'.format(i+1, cv.n_splits)
best_params_df = param_df_dict.get(outer_split)
if params is not None:
for key, value in params.items():
if isinstance(value, tuple):
for ind in best_params_df.index:
best_params_df.at[ind, key] = value
else:
best_params_df[key] = value
if model_name == 'RF':
if PI is not None:
bdf, roc, fi, pi_mean, pi_std = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI, Ptest=Ptest,
model_name=model_name, verbose=verbose)
else:
bdf, roc, fi = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI, Ptest=Ptest,
model_name=model_name, verbose=verbose)
fis.append(fi)
else:
if PI is not None:
bdf, roc, pi_mean, pi_std = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI,
model_name=model_name, verbose=verbose)
else:
bdf, roc = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI,
model_name=model_name, verbose=verbose)
if PI is not None:
pi_means.append(pi_mean)
pi_stds.append(pi_std)
bdf.index.name = 'scorer'
roc.index.name = 'FPR'
if 'hidden_layer_sizes' in bdf.columns:
bdf['hidden_layer_sizes'] = bdf['hidden_layer_sizes'].astype(str)
bdf_da = bdf.to_xarray()
roc_da = roc.to_xarray().to_array('scorer')
roc_da.name = 'TPR'
outer_bests.append(bdf_da)
outer_rocs.append(roc_da)
best_da = xr.concat(outer_bests, 'outer_split')
roc_da = xr.concat(outer_rocs, 'outer_split')
best = xr.merge([best_da, roc_da])
best['outer_split'] = np.arange(1, n_splits + 1)
if model_name == 'RF':
fi_da = xr.concat(fis, 'outer_split')
best['feature_importances'] = fi_da
if PI is not None:
pi_mean_da = xr.concat(pi_means, 'outer_split')
pi_std_da = xr.concat(pi_stds, 'outer_split')
best['PI_mean'] = pi_mean_da
best['PI_std'] = pi_std_da
bests.append(best)
if len(bests) == 1:
return bests[0]
else:
best_ds = xr.concat(bests, 'neg_sample')
best_ds['neg_sample'] = np.arange(1, sample_from_negatives + 1)
return best_ds
def run_permutation_classifier_test(X, y, cv, best_params_df, Ptest=100,
model_name='SVC', verbose=False, params=None):
from sklearn.model_selection import permutation_test_score
import xarray as xr
import numpy as np
def run_one_permutation_test(X=X, y=y, cv=cv, bp_df=best_params_df,
model_name=model_name, n_perm=Ptest,
verbose=verbose):
true_scores = []
pvals = []
perm_scores = []
for scorer in bp_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
b_params = bp_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, b_params))
true, perm_scrs, pval = permutation_test_score(sk_model, X, y,
cv=cv,
n_permutations=Ptest,
scoring=scorers(scorer),
random_state=0,
n_jobs=-1)
true_scores.append(true)
pvals.append(pval)
perm_scores.append(perm_scrs)
true_da = xr.DataArray(true_scores, dims=['scorer'])
true_da['scorer'] = [x for x in bp_df.index.values]
true_da.name = 'true_score'
pval_da = xr.DataArray(pvals, dims=['scorer'])
pval_da['scorer'] = [x for x in bp_df.index.values]
pval_da.name = 'pvalue'
perm_da = xr.DataArray(perm_scores, dims=['scorer', 'permutations'])
perm_da['scorer'] = [x for x in bp_df.index.values]
perm_da['permutations'] = np.arange(1, Ptest+1)
perm_da.name = 'permutation_score'
ds = xr.merge([true_da, pval_da, perm_da])
return ds
ml = ML_Classifier_Switcher()
if params is not None:
best_p_df = best_params_df['1-{}'.format(len(best_params_df))]
for key, value in params.items():
if isinstance(value, tuple):
for ind in best_p_df.index:
best_p_df.at[ind, key] = value
else:
best_p_df[key] = value
dss = run_one_permutation_test(bp_df=best_p_df)
else:
if verbose:
print('Picking {} model with best params'.format(model_name))
splits = []
for i, df in enumerate(best_params_df.values()):
if verbose:
print('running on split #{}'.format(i+1))
ds = run_one_permutation_test()
splits.append(ds)
dss = xr.concat(splits, dim='outer_split')
dss['outer_split'] = np.arange(1, len(best_params_df)+ 1)
return dss
def run_test_on_CV_split(X_train, y_train, X_test, y_test, param_df,
model_name='SVC', verbose=False, PI=None,
Ptest=None):
import numpy as np
import xarray as xr
import pandas as pd
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.inspection import permutation_importance
best_df = param_df.copy()
ml = ML_Classifier_Switcher()
if verbose:
print('Picking {} model with best params'.format(model_name))
# print('Features are: {}'.format(features))
test_scores = []
fi_list = []
mean_fpr = np.linspace(0, 1, 100)
tprs = []
roc_aucs = []
pi_mean_list = []
pi_std_list = []
for scorer in best_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
params = best_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, params))
sk_model.set_params(**params)
sk_model.fit(X_train, y_train)
if hasattr(sk_model, 'feature_importances_'):
# print(X_train['feature'])
# input('press any key')
FI = xr.DataArray(sk_model.feature_importances_, dims=['feature'])
FI['feature'] = X_train['feature']
fi_list.append(FI)
y_pred = sk_model.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_test, y_pred)
roc_aucs.append(roc_auc)
tprs.append(interp_tpr)
score = scorer_function(scorer, y_test, y_pred)
test_scores.append(score)
if PI is not None:
pi = permutation_importance(sk_model, X_test, y_test,
n_repeats=PI,
scoring=scorers(scorer),
random_state=0, n_jobs=-1)
pi_mean = xr.DataArray(pi['importances_mean'], dims='feature')
pi_std = xr.DataArray(pi['importances_std'], dims='feature')
pi_mean.name = 'PI_mean'
pi_std.name = 'PI_std'
pi_mean['feature'] = X_train['feature']
pi_std['feature'] = X_train['feature']
pi_mean_list.append(pi_mean)
pi_std_list.append(pi_std)
if PI is not None:
pi_mean_da = xr.concat(pi_mean_list, 'scorer')
pi_std_da = xr.concat(pi_std_list, 'scorer')
pi_mean_da['scorer'] = [x for x in best_df.index.values]
pi_std_da['scorer'] = [x for x in best_df.index.values]
roc_df = pd.DataFrame(tprs).T
roc_df.columns = [x for x in best_df.index]
roc_df.index = mean_fpr
best_df['test_score'] = test_scores
best_df['roc_auc_score'] = roc_aucs
if hasattr(sk_model, 'feature_importances_'):
fi = xr.concat(fi_list, 'scorer')
fi['scorer'] = [x for x in best_df.index.values]
if PI is not None:
return best_df, roc_df, fi, pi_mean_da, pi_std_da
else:
return best_df, roc_df, fi
elif PI is not None:
return best_df, roc_df, pi_mean_da, pi_std_da
else:
return best_df, roc_df
def holdout_test(path=hydro_path, gr_path=hydro_ml_path/'holdout',
model_name='SVC', features='pwv', return_RF_FI=False,
verbose=False):
"""do a holdout test with best model from gridsearchcv
with all scorers"""
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
import xarray as xr
import pandas as pd
import numpy as np
# process gridsearchcv results:
best_df, test_ratio, seed = load_one_gridsearchcv_object(path=gr_path,
cv_type='holdout',
features=features,
model_name=model_name,
verbose=False)
print('Using random seed of {} and {}% test ratio'.format(seed, test_ratio))
ts = int(test_ratio) / 100
X, y = prepare_X_y_for_holdout_test(features, model_name, path)
# split using test_size and seed:
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=ts,
random_state=int(seed),
stratify=y)
if verbose:
print('y train pos/neg:{}, {}'.format((y_train==1).sum().item(),(y_train==0).sum().item()))
print('y test pos/neg:{}, {}'.format((y_test==1).sum().item(),(y_test==0).sum().item()))
# pick model and set the params to best from gridsearchcv:
ml = ML_Classifier_Switcher()
print('Picking {} model with best params'.format(model_name))
print('Features are: {}'.format(features))
test_scores = []
fi_list = []
mean_fpr = np.linspace(0, 1, 100)
tprs = []
roc_aucs = []
for scorer in best_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
params = best_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, params))
sk_model.set_params(**params)
sk_model.fit(X_train, y_train)
if hasattr(sk_model, 'feature_importances_'):
FI = xr.DataArray(sk_model.feature_importances_, dims=['feature'])
FI['feature'] = X_train['feature']
fi_list.append(FI)
y_pred = sk_model.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_test, y_pred)
roc_aucs.append(roc_auc)
tprs.append(interp_tpr)
score = scorer_function(scorer, y_test, y_pred)
test_scores.append(score)
roc_df = pd.DataFrame(tprs).T
roc_df.columns = [x for x in best_df.index]
roc_df.index = mean_fpr
best_df['holdout_test_scores'] = test_scores
best_df['roc_auc_score'] = roc_aucs
if fi_list and return_RF_FI:
da = xr.concat(fi_list, 'scorer')
da['scorer'] = best_df.index.values
da.name = 'RF_feature_importances'
return da
return best_df, roc_df
def load_one_gridsearchcv_object(path=hydro_ml_path, cv_type='holdout', features='pwv',
model_name='SVC', verbose=True):
"""load one gridsearchcv obj with model_name and features and run read_one_gridsearchcv_object"""
from aux_gps import path_glob
import joblib
# first filter for model name:
if verbose:
print('loading GridsearchCVs results for {} model with {} cv type'.format(model_name, cv_type))
model_files = path_glob(path, 'GRSRCHCV_{}_*.pkl'.format(cv_type))
model_files = [x for x in model_files if model_name in x.as_posix()]
# now select features:
if verbose:
print('loading GridsearchCVs results with {} features'.format(features))
model_features = [x.as_posix().split('/')[-1].split('_')[3] for x in model_files]
feat_ind = get_feature_set_from_list(model_features, features)
# also get the test ratio and seed number:
if len(feat_ind) > 1:
if verbose:
print('found {} GR objects.'.format(len(feat_ind)))
files = sorted([model_files[x] for x in feat_ind])
outer_splits = [x.as_posix().split('/')[-1].split('.')[0].split('_')[-3] for x in files]
grs = [joblib.load(x) for x in files]
best_dfs = [read_one_gridsearchcv_object(x) for x in grs]
di = dict(zip(outer_splits, best_dfs))
return di
else:
file = model_files[feat_ind]
seed = file.as_posix().split('/')[-1].split('.')[0].split('_')[-1]
outer_splits = file.as_posix().split('/')[-1].split('.')[0].split('_')[-3]
# load and produce best_df:
gr = joblib.load(file)
best_df = read_one_gridsearchcv_object(gr)
return best_df, outer_splits, seed
def get_feature_set_from_list(model_features_list, features, sep='+'):
"""select features from model_features_list,
return the index in the model_features_list and the entry itself"""
# first find if features is a single or multiple features:
if isinstance(features, str) and sep not in features:
try:
ind = [i for i, e in enumerate(model_features_list) if e == features]
# ind = model_features_list.index(features)
except ValueError:
raise ValueError('{} is not in {}'.format(features, ', '.join(model_features_list)))
elif isinstance(features, str) and sep in features:
features_split = features.split(sep)
mf = [x.split(sep) for x in model_features_list]
bool_list = [set(features_split) == (set(x)) for x in mf]
ind = [i for i, x in enumerate(bool_list) if x]
# print(len(ind))
# ind = ind[0]
# feat = model_features_list[ind]
# feat = model_features_list[ind]
return ind
def read_one_gridsearchcv_object(gr):
"""read one gridsearchcv multimetric object and
get the best params, best mean/std scores"""
import pandas as pd
# first get all the scorers used:
scorers = [x for x in gr.scorer_.keys()]
# now loop over the scorers:
best_params = []
best_mean_scores = []
best_std_scores = []
for scorer in scorers:
df_mean = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["mean_test_{}".format(scorer)], columns=[scorer])], axis=1)
df_std = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["std_test_{}".format(scorer)], columns=[scorer])], axis=1)
# best index = highest score:
best_ind = df_mean[scorer].idxmax()
best_mean_scores.append(df_mean.iloc[best_ind][scorer])
best_std_scores.append(df_std.iloc[best_ind][scorer])
best_params.append(df_mean.iloc[best_ind].to_frame().T.iloc[:, :-1])
best_df = pd.concat(best_params)
best_df['mean_score'] = best_mean_scores
best_df['std_score'] = best_std_scores
best_df.index = scorers
return best_df
# # param grid dict:
# params = gr.param_grid
# # scorer names:
# scoring = [x for x in gr.scoring.keys()]
# # df:
# df = pd.DataFrame().from_dict(gr.cv_results_)
# # produce multiindex from param_grid dict:
# param_names = [x for x in params.keys()]
# # unpack param_grid vals to list of lists:
# pro = [[y for y in x] for x in params.values()]
# ind = pd.MultiIndex.from_product((pro), names=param_names)
# df.index = ind
# best_params = []
# best_mean_scores = []
# best_std_scores = []
# for scorer in scoring:
# best_params.append(df[df['rank_test_{}'.format(scorer)]==1]['mean_test_{}'.format(scorer)].index[0])
# best_mean_scores.append(df[df['rank_test_{}'.format(scorer)]==1]['mean_test_{}'.format(scorer)].iloc[0])
# best_std_scores.append(df[df['rank_test_{}'.format(scorer)]==1]['std_test_{}'.format(scorer)].iloc[0])
# best_df = pd.DataFrame(best_params, index=scoring, columns=param_names)
# best_df['mean_score'] = best_mean_scores
# best_df['std_score'] = best_std_scores
# return best_df, best_df_1
def process_gridsearch_results(GridSearchCV, model_name,
split_dim='inner_kfold', features=None,
pwv_id=None, hs_id=None, test_size=None):
import xarray as xr
import pandas as pd
import numpy as np
# finish getting best results from all scorers togather
"""takes GridSreachCV object with cv_results and xarray it into dataarray"""
params = GridSearchCV.param_grid
scoring = GridSearchCV.scoring
results = GridSearchCV.cv_results_
# for scorer in scoring:
# for sample in ['train', 'test']:
# sample_score_mean = results['mean_{}_{}'.format(sample, scorer)]
# sample_score_std = results['std_{}_{}'.format(sample, scorer)]
# best_index = np.nonzero(results['rank_test_{}'.format(scorer)] == 1)[0][0]
# best_score = results['mean_test_{}'.format(scorer)][best_index]
names = [x for x in params.keys()]
# unpack param_grid vals to list of lists:
pro = [[y for y in x] for x in params.values()]
ind = pd.MultiIndex.from_product((pro), names=names)
# result_names = [x for x in GridSearchCV.cv_results_.keys() if 'split'
# not in x and 'time' not in x and 'param' not in x and
# 'rank' not in x]
result_names = [
x for x in results.keys() if 'param' not in x]
ds = xr.Dataset()
for da_name in result_names:
da = xr.DataArray(results[da_name])
ds[da_name] = da
ds = ds.assign(dim_0=ind).unstack('dim_0')
for dim in ds.dims:
if ds[dim].dtype == 'O':
try:
ds[dim] = ds[dim].astype(str)
except ValueError:
ds = ds.assign_coords({dim: [str(x) for x in ds[dim].values]})
if ('True' in ds[dim]) and ('False' in ds[dim]):
ds[dim] = ds[dim] == 'True'
# get all splits data and concat them along number of splits:
all_splits = [x for x in ds.data_vars if 'split' in x]
train_splits = [x for x in all_splits if 'train' in x]
test_splits = [x for x in all_splits if 'test' in x]
# loop over scorers:
trains = []
tests = []
for scorer in scoring:
train_splits_scorer = [x for x in train_splits if scorer in x]
trains.append(xr.concat([ds[x]
for x in train_splits_scorer], split_dim))
test_splits_scorer = [x for x in test_splits if scorer in x]
tests.append(xr.concat([ds[x] for x in test_splits_scorer], split_dim))
splits_scorer = np.arange(1, len(train_splits_scorer) + 1)
train_splits = xr.concat(trains, 'scoring')
test_splits = xr.concat(tests, 'scoring')
# splits = [x for x in range(len(train_splits))]
# train_splits = xr.concat([ds[x] for x in train_splits], 'split')
# test_splits = xr.concat([ds[x] for x in test_splits], 'split')
# replace splits data vars with newly dataarrays:
ds = ds[[x for x in ds.data_vars if x not in all_splits]]
ds['split_train_score'] = train_splits
ds['split_test_score'] = test_splits
ds[split_dim] = splits_scorer
if isinstance(scoring, list):
ds['scoring'] = scoring
elif isinstance(scoring, dict):
ds['scoring'] = [x for x in scoring.keys()]
ds.attrs['name'] = 'CV_results'
ds.attrs['param_names'] = names
ds.attrs['model_name'] = model_name
ds.attrs['{}_splits'.format(split_dim)] = ds[split_dim].size
if GridSearchCV.refit:
if hasattr(GridSearchCV.best_estimator_, 'feature_importances_'):
f_import = xr.DataArray(
GridSearchCV.best_estimator_.feature_importances_,
dims=['feature'])
f_import['feature'] = features
ds['feature_importances'] = f_import
ds['best_score'] = GridSearchCV.best_score_
# ds['best_model'] = GridSearchCV.best_estimator_
ds.attrs['refitted_scorer'] = GridSearchCV.refit
for name in names:
if isinstance(GridSearchCV.best_params_[name], tuple):
GridSearchCV.best_params_[name] = ','.join(
map(str, GridSearchCV.best_params_[name]))
ds['best_{}'.format(name)] = GridSearchCV.best_params_[name]
return ds, GridSearchCV.best_estimator_
else:
return ds, None
def save_cv_results(cvr, savepath=hydro_path):
from aux_gps import save_ncfile
features = '+'.join(cvr.attrs['features'])
# pwv_id = cvr.attrs['pwv_id']
# hs_id = cvr.attrs['hs_id']
# neg_pos_ratio = cvr.attrs['neg_pos_ratio']
ikfolds = cvr.attrs['inner_kfold_splits']
okfolds = cvr.attrs['outer_kfold_splits']
name = cvr.attrs['model_name']
refitted_scorer = cvr.attrs['refitted_scorer'].replace('_', '-')
# filename = 'CVR_{}_{}_{}_{}_{}_{}_{}_{}.nc'.format(pwv_id, hs_id,
# name, features, refitted_scorer, ikfolds, okfolds, neg_pos_ratio)
filename = 'CVR_{}_{}_{}_{}_{}.nc'.format(
name, features, refitted_scorer, ikfolds, okfolds)
save_ncfile(cvr, savepath, filename)
return
def scikit_fit_predict(X, y, seed=42, with_pressure=True, n_splits=7,
plot=True):
# step1: CV for train/val (80% from 80-20 test). display results with
# model and scores(AUC, f1), use StratifiedKFold
# step 2: use validated model with test (20%) and build ROC curve
# step 3: add features (pressure) but check for correlation
# check permutations with scikit learn
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.metrics import f1_score
from sklearn.metrics import plot_roc_curve
from sklearn.svm import SVC
from numpy import interp
from sklearn.metrics import auc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
if not with_pressure:
just_pw = [x for x in X.feature.values if 'pressure' not in x]
X = X.sel(feature=just_pw)
X_tt, X_test, y_tt, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=seed)
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
# cv = LeaveOneOut()
classifier = SVC(kernel='rbf', probability=False,
random_state=seed)
# classifier = LinearDiscriminantAnalysis()
# clf = QuadraticDiscriminantAnalysis()
scores = []
fig, ax = plt.subplots()
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X_tt, y_tt)):
# for i in range(100):
# X_train, X_val, y_train, y_val = train_test_split(
# X_tt, y_tt, shuffle=True, test_size=0.5, random_state=i)
# clf.fit(X_train, y_train)
classifier.fit(X_tt[train], y_tt[train])
# viz = plot_roc_curve(clf, X_val, y_val,
# name='ROC run {}'.format(i),
# alpha=0.3, lw=1, ax=ax)
viz = plot_roc_curve(classifier, X_tt[val], y_tt[val],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax)
interp_tpr = interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
# aucs.append(viz.roc_auc)
# y_pred = clf.predict(X_val)
y_pred = classifier.predict(X_tt[val])
aucs.append(roc_auc_score(y_tt[val], y_pred))
# scores.append(clf.score(X_val, y_val))
scores.append(f1_score(y_tt[val], y_pred))
scores = np.array(scores)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="Receiver operating characteristic example")
ax.legend(loc="lower right")
ax.set_title(
'ROC curve for KFold={}, with pressure anomalies.'.format(n_splits))
if not with_pressure:
ax.set_title(
'ROC curve for KFold={}, without pressure anomalies.'.format(n_splits))
y_test_predict = classifier.predict(X_test)
print('final test predict score:')
print(f1_score(y_test, y_test_predict))
if plot:
plt.figure()
plt.hist(scores, bins=15, edgecolor='k')
return scores
# clf.fit(X,y)
def produce_X_y_from_list(pw_stations=['drag', 'dsea', 'elat'],
hs_ids=[48125, 48199, 60170],
pressure_station='bet-dagan', max_flow=0,
window=25, neg_pos_ratio=1, path=work_yuval,
ims_path=ims_path, hydro_path=hydro_path,
concat_Xy=False):
if isinstance(hs_ids, int):
hs_ids = [hs_ids for x in range(len(pw_stations))]
kwargs = locals()
[kwargs.pop(x) for x in ['pw_stations', 'hs_ids', 'concat_Xy']]
Xs = []
ys = []
for pw_station, hs_id in list(zip(pw_stations, hs_ids)):
X, y = produce_X_y(pw_station, hs_id, **kwargs)
Xs.append(X)
ys.append(y)
if concat_Xy:
print('concatenating pwv stations {}, with hydro_ids {}.'.format(
pw_stations, hs_ids))
X, y = concat_X_y(Xs, ys)
return X, y
else:
return Xs, ys
def concat_X_y(Xs, ys):
import xarray as xr
import pandas as pd
X_attrs = [x.attrs for x in Xs]
X_com_attrs = dict(zip(pd.DataFrame(X_attrs).T.index.values,
pd.DataFrame(X_attrs).T.values.tolist()))
y_attrs = [x.attrs for x in ys]
y_com_attrs = dict(zip(pd.DataFrame(y_attrs).T.index.values,
pd.DataFrame(y_attrs).T.values.tolist()))
for X in Xs:
feat = [x.replace('_' + X.attrs['pwv_id'], '')
for x in X.feature.values]
X['feature'] = feat
X = xr.concat(Xs, 'sample')
X.attrs = X_com_attrs
y = xr.concat(ys, 'sample')
y.attrs = y_com_attrs
return X, y
def produce_X_y(pw_station='drag', hs_id=48125, pressure_station='bet-dagan',
window=25, seed=42,
max_flow=0, neg_pos_ratio=1, path=work_yuval,
ims_path=ims_path, hydro_path=hydro_path):
import xarray as xr
from aux_gps import anomalize_xr
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
# call preprocess_hydro_station
hdf, y_meta = preprocess_hydro_station(
hs_id, hydro_path, max_flow=max_flow)
# load PWV and other features and combine them to fdf:
pw = xr.open_dataset(path / 'GNSS_PW_anom_hourly_50_hour_dayofyear.nc')
fdf = pw[pw_station].to_dataframe(name='pwv_{}'.format(pw_station))
# add Day of year to fdf:
doy = fdf.index.dayofyear
# scale doy to cyclic with amp ~1:
fdf['doy_sin'] = np.sin(doy * np.pi / 183)
fdf['doy_cos'] = np.cos(doy * np.pi / 183)
if pressure_station is not None:
p = xr.load_dataset(
ims_path /
'IMS_BD_hourly_ps_1964-2020.nc')[pressure_station]
p_attrs = p.attrs
p_attrs = {'pressure_{}'.format(
key): val for key, val in p_attrs.items()}
p = p.sel(time=slice('1996', None))
p = anomalize_xr(p, freq='MS')
fdf['pressure_{}'.format(pressure_station)] = p.to_dataframe()
# check the the last date of hdf is bigger than the first date of fdf,
# i.e., there is at least one overlapping event in the data:
if hdf.index[-1] < fdf.index[0]:
raise KeyError('Data not overlapping, hdf for {} stops at {} and fdf starts at {}'.format(
hs_id, hdf.index[-1], fdf.index[0]))
# finally, call add_features_and_produce_X_y
X, y = add_features_and_produce_X_y(hdf, fdf, window_size=window,
seed=seed,
neg_pos_ratio=neg_pos_ratio)
# add meta data:
gps = produce_geo_gnss_solved_stations(plot=False)
pwv_attrs = gps.loc[pw_station, :][['lat', 'lon', 'alt', 'name']].to_dict()
pwv_attrs = {'pwv_{}'.format(key): val for key, val in pwv_attrs.items()}
X.attrs = pwv_attrs
if pressure_station is not None:
X.attrs.update(p_attrs)
y.attrs = y_meta
y.attrs['hydro_station_id'] = hs_id
y.attrs['neg_pos_ratio'] = neg_pos_ratio
# calculate distance to hydro station:
lat1 = X.attrs['pwv_lat']
lon1 = X.attrs['pwv_lon']
lat2 = y.attrs['lat']
lon2 = y.attrs['lon']
y.attrs['max_flow'] = max_flow
distance = calculate_distance_between_two_latlons_israel(
lat1, lon1, lat2, lon2)
X.attrs['distance_to_hydro_station_in_km'] = distance / 1000.0
y.attrs['distance_to_pwv_station_in_km'] = distance / 1000.0
X.attrs['pwv_id'] = pw_station
return X, y
# def produce_X_y(station='drag', hs_id=48125, lag=25, anoms=True,
# neg_pos_ratio=2, add_pressure=False,
# path=work_yuval, hydro_path=hydro_path, with_ends=False,
# seed=42,
# verbose=True, return_xarray=False, pressure_anoms=None):
# import pandas as pd
# import numpy as np
# import xarray as xr
#
# def produce_da_from_list(event_list, feature='pwv'):
# X_da = xr.DataArray(event_list, dims=['sample', 'feature'])
# X_da['feature'] = ['{}_{}'.format(feature, x) for x in np.arange(0, 24, 1)]
# X_df = pd.concat(event_list)
# X_da['sample'] = [x for x in X_df.index[::24]]
# return X_da
#
# df = preprocess_hydro_pw(
# pw_station=station,
# hs_id=hs_id,
# path=path,
# hydro_path=hydro_path,
# with_tide_ends=with_ends, anoms=anoms,
# pressure_anoms=pressure_anoms,
# add_pressure=add_pressure)
# if pressure_anoms is not None:
# station = pressure_anoms.name
# # first produce all the positives:
# # get the tides datetimes:
# y_pos = df[df['tides'] == 1]['tides']
# # get the datetimes of 24 hours before tide event (not inclusive):
# y_lag_pos = y_pos.index - pd.Timedelta(lag, unit='H')
# masks = [(df.index > start) & (df.index < end)
# for start, end in zip(y_lag_pos, y_pos.index)]
# # also drop event if less than 24 hour before available:
# pw_pos_list = []
# pressure_pos_list = []
# ind = []
# bad_ind = []
# for i, tide in enumerate(masks):
# if len(df['tides'][tide]) == (lag - 1):
# pw_pos_list.append(df[station][tide])
# pressure_pos_list.append(df['pressure'][tide])
# ind.append(i)
# else:
# bad_ind.append(i)
# # get the indices of the dropped events:
# # ind = [x[0] for x in pw_pos_list]
# if bad_ind:
# if verbose:
# print('{} are without full 24 hours before record.'.format(
# ','.join([x for x in df.iloc[bad_ind].index.strftime('%Y-%m-%d:%H:00:00')])))
# # drop the events in y so len(y) == in each x from tides_list:
# y_pos_arr = y_pos.iloc[ind].values
# # now get the negative y's with neg_pos_ratio (set to 1 if the same pos=neg):
# y_neg_arr = np.zeros(y_pos_arr.shape[0] * neg_pos_ratio)
# cnt = 0
# pw_neg_list = []
# pressure_neg_list = []
# np.random.seed(seed)
# while cnt < len(y_neg_arr):
# # get a random date from df:
# r = np.random.randint(low=0, high=len(df))
# # slice -24 to 24 range with t=0 being the random date:
# # update: extend the range to -72 hours to 72 hours:
# lag_factor = 72 / lag
# slice_range = int(lag * lag_factor)
# sliced = df.iloc[r - slice_range:r + slice_range]
# # if tides inside this date range, continue:
# if y_pos.iloc[ind].index in sliced.index:
# if verbose:
# print('found positive tide in randomly sliced 48 window')
# continue
# # now if no 24 items exist, also continue:
# negative = df.iloc[r - lag:r - 1][station]
# if len(negative) != (lag-1):
# if verbose:
# print('didnt find full {} hours sliced negative'.format(lag-1))
# continue
# # else, append to pw_neg_list and increase cnt
# pw_neg_list.append(negative)
# pressure_neg_list.append(df.iloc[r - lag:r - 1]['pressure'])
# cnt += 1
# # lastly, assemble for X, y using np.columnstack:
# y = np.concatenate([y_pos_arr, y_neg_arr])
# X = np.stack([[x.values for x in pw_pos_list] +
# [x.values for x in pw_neg_list]])
# X = X.squeeze()
# pw_pos_da = produce_da_from_list(pw_pos_list, feature='pwv')
# pw_neg_da = produce_da_from_list(pw_neg_list, feature='pwv')
# pr_pos_da = produce_da_from_list(pressure_pos_list, feature='pressure')
# pr_neg_da = produce_da_from_list(pressure_neg_list, feature='pressure')
# if return_xarray:
# y = xr.DataArray(y, dims='sample')
# X_pwv = xr.concat([pw_pos_da, pw_neg_da], 'sample')
# X_pressure = xr.concat([pr_pos_da, pr_neg_da], 'sample')
# X = xr.concat([X_pwv, X_pressure], 'feature')
# X.name = 'X'
# y['sample'] = X['sample']
# y.name = 'y'
# X.attrs['PWV_station'] = station
# X.attrs['hydro_station_id'] = hs_id
# y.attrs = X.attrs
# return X, y
# else:
# return X, y
def plot_Xpos_Xneg_mean_std(X_pos_da, X_neg_da):
import matplotlib.pyplot as plt
from PW_from_gps_figures import plot_field_with_fill_between
fig, ax = plt.subplots(figsize=(8, 6))
posln = plot_field_with_fill_between(X_pos_da, ax=ax, mean_dim='event',
dim='time', color='b', marker='s')
negln = plot_field_with_fill_between(X_neg_da, ax=ax, mean_dim='event',
dim='time', color='r', marker='o')
ax.legend(posln+negln, ['Positive tide events', 'Negative tide events'])
ax.grid()
return fig
def preprocess_hydro_station(hs_id=48125, hydro_path=hydro_path, max_flow=0,
with_tide_ends=False):
"""load hydro station tide events with max_flow and round it up to
hourly sample rate, with_tide_ends, puts the value 2 at the datetime of
tide end. regardless 1 is the datetime for tide event."""
import xarray as xr
import pandas as pd
import numpy as np
# first load tides data:
all_tides = xr.open_dataset(hydro_path / 'hydro_tides.nc')
# get all tides for specific station without nans:
sta_slice = [x for x in all_tides.data_vars if str(hs_id) in x]
sta_slice = [
x for x in sta_slice if 'max_flow' in x or 'tide_end' in x or 'tide_max' in x]
if not sta_slice:
raise KeyError('hydro station {} not found in database'.format(hs_id))
tides = all_tides[sta_slice].dropna('tide_start')
max_flow_tide = tides['TS_{}_max_flow'.format(hs_id)]
max_flow_attrs = max_flow_tide.attrs
tide_starts = tides['tide_start'].where(
~tides.isnull()).where(max_flow_tide > max_flow).dropna('tide_start')['tide_start']
tide_ends = tides['TS_{}_tide_end'.format(hs_id)].where(
~tides.isnull()).where(max_flow_tide > max_flow).dropna('tide_start')['TS_{}_tide_end'.format(hs_id)]
max_flows = max_flow_tide.where(
max_flow_tide > max_flow).dropna('tide_start')
# round all tide_starts to hourly:
ts = tide_starts.dt.round('1H')
max_flows = max_flows.sel(tide_start=ts, method='nearest')
max_flows['tide_start'] = ts
ts_end = tide_ends.dt.round('1H')
time_dt = pd.date_range(
start=ts.min().values,
end=ts_end.max().values,
freq='1H')
df = pd.DataFrame(data=np.zeros(time_dt.shape), index=time_dt)
df.loc[ts.values, 0] = 1
df.loc[ts.values, 1] = max_flows.loc[ts.values]
df.columns = ['tides', 'max_flow']
df = df.fillna(0)
if with_tide_ends:
df.loc[ts_end.values, :] = 2
return df, max_flow_attrs
def add_features_and_produce_X_y(hdf, fdf, window_size=25, seed=42,
neg_pos_ratio=1, plot=False):
"""hdf is the hydro events df and fdf is the features df in 'H' freq.
This function checks the fdf for window-sized data and hour before
each positive event.
returns the combined df (hdf+fdf) the positive events labels and features.
"""
import pandas as pd
import numpy as np
import xarray as xr
# first add check_window_size of 0's to hdf:
st = hdf.index[0] - pd.Timedelta(window_size, unit='H')
en = hdf.index[0]
dts = pd.date_range(st, en - pd.Timedelta(1, unit='H'), freq='H')
mdf = pd.DataFrame(
np.zeros(window_size),
index=dts,
columns=['tides'])
hdf = pd.concat([hdf, mdf], axis=0)
# check for hourly sample rate and concat:
if not pd.infer_freq(fdf.index) == 'H':
raise('pls resample fdf to hourly...')
feature = [x for x in fdf.columns]
df = pd.concat([hdf, fdf], axis=1)
# get the tides(positive events) datetimes:
y_pos = df[df['tides'] == 1]['tides']
# get the datetimes of 24 hours before tide event (not inclusive):
y_lag_pos = y_pos.index - pd.Timedelta(window_size, unit='H')
masks = [(df.index > start) & (df.index < end)
for start, end in zip(y_lag_pos, y_pos.index)]
# first check how many full periods of data the feature has:
avail = [window_size - 1 - df[feature][masks[x]].isnull().sum()
for x in range(len(masks))]
adf = pd.DataFrame(avail, index=y_pos.index, columns=feature)
if plot:
adf.plot(kind='bar')
# produce the positive events datetimes for which all the features have
# window sized data and hour before the event:
good_dts = adf[adf.loc[:, feature] == window_size - 1].dropna().index
# y array of positives (1's):
y_pos_arr = y_pos.loc[good_dts].values
# now produce the feature list itself:
good_inds_for_masks = [adf.index.get_loc(x) for x in good_dts]
good_masks = [masks[x] for x in good_inds_for_masks]
feature_pos_list = [df[feature][x].values for x in good_masks]
dts_pos_list = [df[feature][x].index[-1] +
pd.Timedelta(1, unit='H') for x in good_masks]
# TODO: add diagnostic mode for how and where are missing features
# now get the negative y's with neg_pos_ratio
# (set to 1 if the same pos=neg):
y_neg_arr = np.zeros(y_pos_arr.shape[0] * neg_pos_ratio)
cnt = 0
feature_neg_list = []
dts_neg_list = []
np.random.seed(seed)
while cnt < len(y_neg_arr):
# get a random date from df:
r = np.random.randint(low=0, high=len(df))
# slice -24 to 24 range with t=0 being the random date:
# update: extend the range to -72 hours to 72 hours:
window_factor = 72 / window_size
slice_range = int(window_size * window_factor)
sliced = df.iloc[r - slice_range:r + slice_range]
# if tides inside this date range, continue:
# try:
if not (y_pos.loc[good_dts].index.intersection(sliced.index)).empty:
# print('#')
continue
# except TypeError:
# return y_pos, good_dts, sliced
# now if no 24 items exist, also continue:
negative = df.iloc[r - window_size:r - 1][feature].dropna().values
if len(negative) != (window_size - 1):
# print('!')
continue
# get the negative datetimes (last record)
neg_dts = df.iloc[r - window_size:r -
1][feature].dropna().index[-1] + pd.Timedelta(1, unit='H')
# else, append to pw_neg_list and increase cnt
feature_neg_list.append(negative)
dts_neg_list.append(neg_dts)
cnt += 1
# print(cnt)
# lastly, assemble for X, y using np.columnstack:
y = np.concatenate([y_pos_arr, y_neg_arr])
# TODO: add exception where no features exist, i.e., there is no
# pw near flood events at all...
Xpos_da = xr.DataArray(feature_pos_list, dims=['sample', 'window', 'feat'])
Xpos_da['window'] = np.arange(0, window_size - 1)
Xpos_da['feat'] = adf.columns
Xpos_da['sample'] = dts_pos_list
Xneg_da = xr.DataArray(feature_neg_list, dims=['sample', 'window', 'feat'])
Xneg_da['window'] = np.arange(0, window_size - 1)
Xneg_da['feat'] = adf.columns
Xneg_da['sample'] = dts_neg_list
X = xr.concat([Xpos_da, Xneg_da], 'sample')
# if feature_pos_list[0].shape[1] > 0 and feature_neg_list[0].shape[1] > 0:
# xpos = [x.ravel() for x in feature_pos_list]
# xneg = [x.ravel() for x in feature_neg_list]
# X = np.column_stack([[x for x in xpos] +
# [x for x in xneg]])
y_dts = np.stack([[x for x in dts_pos_list]+[x for x in dts_neg_list]])
y_dts = y_dts.squeeze()
X_da = X.stack(feature=['feat', 'window'])
feature = ['_'.join([str(x), str(y)]) for x, y in X_da.feature.values]
X_da['feature'] = feature
y_da = xr.DataArray(y, dims=['sample'])
y_da['sample'] = y_dts
# feats = []
# for f in feature:
# feats.append(['{}_{}'.format(f, x) for x in np.arange(0, window_size
# - 1, 1)])
# X_da['feature'] = [item for sublist in feats for item in sublist]
return X_da, y_da
# def preprocess_hydro_pw(pw_station='drag', hs_id=48125, path=work_yuval,
# ims_path=ims_path,
# anoms=True, hydro_path=hydro_path, max_flow=0,
# with_tide_ends=False, pressure_anoms=None,
# add_pressure=False):
# import xarray as xr
# import pandas as pd
# import numpy as np
# from aux_gps import anomalize_xr
# # df.columns = ['tides']
# # now load pw:
# if anoms:
# pw = xr.load_dataset(path / 'GNSS_PW_anom_hourly_50_hour_dayofyear.nc')[pw_station]
# else:
# pw = xr.load_dataset(path / 'GNSS_PW_hourly_thresh_50.nc')[pw_station]
# if pressure_anoms is not None:
# pw = pressure_anoms
# pw_df = pw.dropna('time').to_dataframe()
# # now align the both dataframes:
# pw_df['tides'] = df['tides']
# pw_df['max_flow'] = df['max_flow']
# if add_pressure:
# pressure = xr.load_dataset(ims_path / 'IMS_BP_israeli_hourly.nc')['JERUSALEM-CENTRE']
# pressure = anomalize_xr(pressure, freq='MS')
# pr_df = pressure.dropna('time').to_dataframe()
# pw_df['pressure'] = pr_df
# pw_df = pw_df.fillna(0)
# return pw_df
def loop_over_gnss_hydro_and_aggregate(sel_hydro, pw_anom=False,
pressure_anoms=None,
max_flow_thresh=None,
hydro_path=hydro_path,
work_yuval=work_yuval, ndays=5,
ndays_forward=1,
plot=True, plot_all=False):
import xarray as xr
import matplotlib.pyplot as plt
from aux_gps import path_glob
filename = 'PW_tide_sites_{}_{}.nc'.format(ndays, ndays_forward)
if pw_anom:
filename = 'PW_tide_sites_anom_{}_{}.nc'.format(ndays, ndays_forward)
gnss_stations = []
if (hydro_path / filename).is_file():
print('loading {}...'.format(filename))
ds = xr.load_dataset(hydro_path / filename)
else:
if pw_anom:
file = path_glob(work_yuval, 'GNSS_PW_anom_*.nc')[-1]
gnss_pw = xr.open_dataset(file)
else:
gnss_pw = xr.open_dataset(
work_yuval / 'GNSS_PW_thresh_50_homogenized.nc')
just_pw = [x for x in gnss_pw.data_vars if '_error' not in x]
gnss_pw = gnss_pw[just_pw]
da_list = []
for i, gnss_sta in enumerate(just_pw):
print('proccessing station {}'.format(gnss_sta))
sliced = sel_hydro[~sel_hydro[gnss_sta].isnull()]
hydro_ids = [x for x in sliced.id.values]
if not hydro_ids:
print(
'skipping {} station since no close hydro stations...'.format(gnss_sta))
continue
else:
try:
if pressure_anoms is not None:
pname = pressure_anoms.name
dass = aggregate_get_ndays_pw_hydro(
pressure_anoms,
hydro_ids,
max_flow_thresh=max_flow_thresh,
ndays=ndays, ndays_forward=ndays_forward,
plot=plot_all)
gnss_stations.append(gnss_sta)
dass.name = '{}_{}'.format(pname, i)
else:
dass = aggregate_get_ndays_pw_hydro(
gnss_pw[gnss_sta],
hydro_ids,
max_flow_thresh=max_flow_thresh,
ndays=ndays, ndays_forward=ndays_forward,
plot=plot_all)
da_list.append(dass)
except ValueError as e:
print('skipping {} because {}'.format(gnss_sta, e))
continue
ds = xr.merge(da_list)
ds.to_netcdf(hydro_path / filename, 'w')
if plot:
names = [x for x in ds.data_vars]
fig, ax = plt.subplots()
for name in names:
ds.mean('station').mean('tide_start')[name].plot.line(
marker='.', linewidth=0., ax=ax)
if pressure_anoms is not None:
names = [x.split('_')[0] for x in ds.data_vars]
names = [x + ' ({})'.format(y)
for x, y in zip(names, gnss_stations)]
ax.set_xlabel('Days before tide event')
ax.grid()
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x, y, z)
for x, y, z in fmt])
if pw_anom:
title = 'Mean PWV anomalies for tide stations near all GNSS stations'
ylabel = 'PWV anomalies [mm]'
else:
title = 'Mean PWV for tide stations near all GNSS stations'
ylabel = 'PWV [mm]'
if max_flow_thresh is not None:
title += ' (max_flow > {} m^3/sec)'.format(max_flow_thresh)
if pressure_anoms is not None:
ylabel = 'Surface pressure anomalies [hPa]'
title = 'Mean surface pressure anomaly in {} for all tide stations near GNSS stations'.format(
pname)
ax.set_title(title)
ax.set_ylabel(ylabel)
return ds
def aggregate_get_ndays_pw_hydro(pw_da, hs_ids, max_flow_thresh=None,
hydro_path=hydro_path, ndays=5,
ndays_forward=1, plot=True):
import xarray as xr
import matplotlib.pyplot as plt
das = []
max_flows_list = []
pw_ndays_list = []
if not isinstance(hs_ids, list):
hs_ids = [int(hs_ids)]
else:
hs_ids = [int(x) for x in hs_ids]
used_ids = []
events = []
for sid in hs_ids:
print('proccessing hydro station {}'.format(sid))
try:
max_flows, pw_ndays, da = get_n_days_pw_hydro_all(pw_da, sid,
max_flow_thresh=max_flow_thresh,
hydro_path=hydro_path,
ndays=ndays, ndays_forward=ndays_forward,
return_max_flows=True,
plot=False)
das.append(da)
pw_ndays_list.append(pw_ndays)
max_flows_list.append(max_flows)
used_ids.append(sid)
events.append(max_flows.size)
except KeyError as e:
print('{}, skipping...'.format(e))
continue
except ValueError as e:
print('{}, skipping...'.format(e))
continue
pw_ndays = xr.concat(pw_ndays_list, 'time')
dass = xr.concat(das, 'station')
dass['station'] = used_ids
dass.name = pw_da.name
dass.attrs['hydro_stations'] = len(used_ids)
dass.attrs['total_events'] = sum(events)
if plot:
fig, ax = plt.subplots(figsize=(20, 4))
color = 'tab:blue'
pw_ndays.plot.line(marker='.', linewidth=0., color=color, ax=ax)
ax.tick_params(axis='y', labelcolor=color)
ax.set_ylabel('PW [mm]', color=color)
ax2 = ax.twinx()
color = 'tab:red'
for mf in max_flows_list:
mf.plot.line(marker='X', linewidth=0., color=color, ax=ax2)
ax2.tick_params(axis='y', labelcolor=color)
ax.grid()
ax2.set_title(
'PW in station {} {} days before tide events ({} total)'.format(
pw_da.name, ndays, sum(events)))
ax2.set_ylabel('max_flow [m^3/sec]', color=color)
fig.tight_layout()
fig, ax = plt.subplots()
for sid in used_ids:
dass.sel(
station=sid).mean('tide_start').plot.line(
marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days before tide event')
ax.set_ylabel('PW [mm]')
ax.grid()
fmt = list(zip(used_ids, events))
ax.legend(['station #{} ({} events)'.format(x, y) for x, y in fmt])
ax.set_title(
'Mean PW for tide stations near {} station'.format(pw_da.name))
if max_flow_thresh is not None:
ax.set_title(
'Mean PW for tide stations (above {} m^3/sec) near {} station'.format(
max_flow_thresh, pw_da.name))
return dass
def produce_pwv_days_before_tide_events(pw_da, hs_df, days_prior=1, drop_thresh=0.5,
days_after=1, plot=False, verbose=0,
max_gap='12H', rolling=12):
"""
takes pwv and hydro tide dates from one station and
rounds the hydro tides dates to 5 min
selects the tides dates that are at least the first date of pwv available
then if no pwv data prior to 1 day of tides date - drops
if more than half day missing - drops
then interpolates the missing pwv data points using spline
returns the dataframes contains pwv 1 day before and after tides
and pwv's 1 day prior to event and 1 day after.
Parameters
----------
pw_da : TYPE
pwv of station.
hs_df : TYPE
hydro tide dataframe for one station.
days_prior : TYPE, optional
DESCRIPTION. The default is 1.
drop_thresh : TYPE, optional
DESCRIPTION. The default is 0.5.
days_after : TYPE, optional
DESCRIPTION. The default is 1.
plot : TYPE, optional
DESCRIPTION. The default is False.
verbose : TYPE, optional
DESCRIPTION. The default is 0.
max_gap : TYPE, optional
DESCRIPTION. The default is '12H'.
rolling : TYPE, optional
DESCRIPTION. The default is 12.
Returns
-------
df : TYPE
DESCRIPTION.
pwv_after_list : TYPE
DESCRIPTION.
pwv_prior_list : TYPE
DESCRIPTION.
"""
import pandas as pd
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
if rolling is not None:
pw_da = pw_da.rolling(time=rolling, center=True).mean(keep_attrs=True)
if drop_thresh is None:
drop_thresh = 0
# first infer time freq of pw_da:
freq = xr.infer_freq(pw_da['time'])
if freq == '5T':
pts_per_day = 288
timedelta = pd.Timedelta(5, unit='min')
if freq == '1H' or freq == 'H':
pts_per_day = 24
timedelta = pd.Timedelta(1, unit='H')
# get the minimum dt of the pwv station:
min_dt = pw_da.dropna('time').time.min().values
# round the hs_df to 5 mins, and find the closest min_dt:
hs_df.index = hs_df.index.round(freq)
hs_df = hs_df[~hs_df.index.duplicated(keep='first')]
hs_df = hs_df.sort_index()
min_ind = hs_df.index.get_loc(min_dt, method='nearest')
# slice the tides data accordinaly:
hs_df = hs_df.iloc[min_ind:].dropna()
# loop over each tide start and grab the datetimes
pwv_prior_list = []
pwv_after_list = []
# se_list = []
tot_events = hs_df.index.size
event_cnt = 0
dropped_thresh = 0
dropped_no_data = 0
for ts in hs_df.index:
dt_prior = ts - pd.Timedelta(days_prior, unit='d')
dt_after = ts + pd.Timedelta(days_after, unit='d')
after_da = pw_da.sel(time=slice(ts, dt_after))
prior_da = pw_da.sel(time=slice(dt_prior, ts - timedelta))
if prior_da.dropna('time').size == 0:
if verbose == 1:
print('{} found no prior data for PWV {} days prior'.format(
ts.strftime('%Y-%m-%d %H:%M'), days_prior))
dropped_no_data += 1
continue
elif prior_da.dropna('time').size < pts_per_day*drop_thresh:
if verbose == 1:
print('{} found less than {} a day prior data for PWV {} days prior'.format(
ts.strftime('%Y-%m-%d %H:%M'), drop_thresh, days_prior))
dropped_thresh += 1
continue
if max_gap is not None:
prior_da = prior_da.interpolate_na(
'time', method='spline', max_gap=max_gap, keep_attrs=True)
event_cnt += 1
# if rolling is not None:
# after_da = after_da.rolling(time=rolling, center=True, keep_attrs=True).mean(keep_attrs=True)
# prior_da = prior_da.rolling(time=rolling, center=True, keep_attrs=True).mean(keep_attrs=True)
# after_da.name = pw_da.name + '_{}'.format(i)
pwv_after_list.append(after_da)
pwv_prior_list.append(prior_da)
# se = da.reset_index('time', drop=True).to_dataframe()[da.name]
# se_list.append(se)
se_list = []
for i, (prior, after) in enumerate(zip(pwv_prior_list, pwv_after_list)):
# return prior, after
# df_p = prior.to_dataframe()
# df_a = after.to_dataframe()
# return df_p, df_a
da = xr.concat([prior, after], 'time')
# print(da)
se = da.reset_index('time', drop=True).to_dataframe()
se.columns = [da.name + '_{}'.format(i)]
# print(se)
# [da.name + '_{}'.format(i)]
se_list.append(se)
df = pd.concat(se_list, axis=1)
df = df.iloc[:-1]
df.index = np.arange(-days_prior, days_after, 1/pts_per_day)
if verbose >= 0:
print('total events with pwv:{} , dropped due to no data: {}, dropped due to thresh:{}, left events: {}'.format(
tot_events, dropped_no_data, dropped_thresh, event_cnt))
if plot:
ax = df.T.mean().plot()
ax.grid()
ax.axvline(color='k', linestyle='--')
ax.set_xlabel('Days before tide event')
ax.set_ylabel('PWV anomalies [mm]')
ax.set_title('GNSS station: {} with {} events'.format(
pw_da.name.upper(), event_cnt))
better = df.copy()
better.index = pd.to_timedelta(better.index, unit='d')
better = better.resample('15S').interpolate(
method='cubic').T.mean().resample('5T').mean()
better = better.reset_index(drop=True)
better.index = np.linspace(-days_prior, days_after, better.index.size)
better.plot(ax=ax)
# fig, ax = plt.subplots(figsize=(20, 7))
# [pwv.plot.line(ax=ax) for pwv in pwv_list]
return df, pwv_after_list, pwv_prior_list
def get_n_days_pw_hydro_all(pw_da, hs_id, max_flow_thresh=None,
hydro_path=hydro_path, ndays=5, ndays_forward=1,
return_max_flows=False, plot=True):
"""calculate the mean of the PW ndays before all tide events in specific
hydro station. can use max_flow_thresh to get only event with al least
this max_flow i.e., big tide events"""
# important, DO NOT dropna pw_da!
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
def get_n_days_pw_hydro_one_event(pw_da, tide_start, ndays=ndays, ndays_forward=0):
freq = pd.infer_freq(pw_da.time.values)
# for now, work with 5 mins data:
if freq == '5T':
points = int(ndays) * 24 * 12
points_forward = int(ndays_forward) * 24 * 12
elif freq == '10T':
points = int(ndays) * 24 * 6
points_forward = int(ndays_forward) * 24 * 6
elif freq == 'H':
points = int(ndays) * 24
points_forward = int(ndays_forward) * 24
lag = pd.timedelta_range(end=0, periods=points, freq=freq)
forward_lag = pd.timedelta_range(
start=0, periods=points_forward, freq=freq)
lag = lag.union(forward_lag)
time_arr = pd.to_datetime(pw_da.time.values)
tide_start = pd.to_datetime(tide_start).round(freq)
ts_loc = time_arr.get_loc(tide_start)
# days = pd.Timedelta(ndays, unit='D')
# time_slice = [tide_start - days, tide_start]
# pw = pw_da.sel(time=slice(*time_slice))
pw = pw_da.isel(time=slice(ts_loc - points,
ts_loc + points_forward - 1))
return pw, lag
# first load tides data:
all_tides = xr.open_dataset(hydro_path / 'hydro_tides.nc')
# get all tides for specific station without nans:
sta_slice = [x for x in all_tides.data_vars if str(hs_id) in x]
if not sta_slice:
raise KeyError('hydro station {} not found in database'.format(hs_id))
tides = all_tides[sta_slice].dropna('tide_start')
tide_starts = tides['tide_start'].where(
~tides.isnull()).dropna('tide_start')['tide_start']
# get max flow tides data:
mf = [x for x in tides.data_vars if 'max_flow' in x]
max_flows = tides[mf].dropna('tide_start').to_array('max_flow').squeeze()
# also get tide end and tide max data:
# te = [x for x in tides.data_vars if 'tide_end' in x]
# tide_ends = tides[te].dropna('tide_start').to_array('tide_end').squeeze()
# tm = [x for x in tides.data_vars if 'tide_max' in x]
# tide_maxs = tides[tm].dropna('tide_start').to_array('tide_max').squeeze()
# slice minmum time for convenience:
min_pw_time = pw_da.dropna('time').time.min().values
tide_starts = tide_starts.sel(tide_start=slice(min_pw_time, None))
max_flows = max_flows.sel(tide_start=slice(min_pw_time, None))
# filter if hydro station data ends before gnss pw:
if tide_starts.size == 0:
raise ValueError('tides data end before gnss data begin')
if max_flow_thresh is not None:
# pick only big events:
max_flows = max_flows.where(
max_flows > max_flow_thresh).dropna('tide_start')
tide_starts = tide_starts.where(
max_flows > max_flow_thresh).dropna('tide_start')
pw_list = []
for ts in tide_starts.values:
# te = tide_ends.sel(tide_start=ts).values
# tm = tide_maxs.sel(tide_start=ts).values
pw, lag = get_n_days_pw_hydro_one_event(
pw_da, ts, ndays=ndays, ndays_forward=ndays_forward)
pw.attrs['ts'] = ts
pw_list.append(pw)
# filter events that no PW exists:
pw_list = [x for x in pw_list if x.dropna('time').size > 0]
da = xr.DataArray([x.values for x in pw_list], dims=['tide_start', 'lag'])
da['tide_start'] = [x.attrs['ts'] for x in pw_list] # tide_starts
da['lag'] = lag
# da.name = pw_da.name + '_tide_events'
da.attrs = pw_da.attrs
if max_flow_thresh is not None:
da.attrs['max_flow_minimum'] = max_flow_thresh
pw_ndays = xr.concat(pw_list, 'time')
if plot:
fig, ax = plt.subplots(figsize=(20, 4))
color = 'tab:blue'
pw_ndays.plot.line(marker='.', linewidth=0., color=color, ax=ax)
ax.tick_params(axis='y', labelcolor=color)
ax.set_ylabel('PW [mm]', color=color)
ax2 = ax.twinx()
color = 'tab:red'
max_flows.plot.line(marker='X', linewidth=0., color=color, ax=ax2)
ax2.tick_params(axis='y', labelcolor=color)
ax.grid()
ax2.set_title(
'PW in station {} {} days before tide events'.format(
pw_da.name, ndays))
ax2.set_ylabel('max_flow [m^3/sec]', color=color)
fig.tight_layout()
fig, ax = plt.subplots()
da.mean('tide_start').plot.line(marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days before tide event')
ax.set_ylabel('PW [mm]')
ax.grid()
ax.set_title(
'Mean PW for {} tide events near {} station'.format(
da.tide_start.size, pw_da.name))
if max_flow_thresh is not None:
ax.set_title(
'Mean PW for {} tide events (above {} m^3/sec) near {} station'.format(
da.tide_start.size, max_flow_thresh, pw_da.name))
if return_max_flows:
return max_flows, pw_ndays, da
else:
return da
def calculate_distance_between_two_latlons_israel(lat1, lon1, lat2, lon2):
import geopandas as gpd
import numpy as np
import pandas as pd
points = np.array(([lat1, lon1], [lat2, lon2]))
df = pd.DataFrame(points, columns=['lat', 'lon'])
pdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon, df.lat),
crs={'init': 'epsg:4326'})
pdf_meters = pdf.to_crs({'init': 'epsg:6991'})
# distance in meters:
distance = pdf_meters.geometry[0].distance(pdf_meters.geometry[1])
return distance
def get_hydro_near_GNSS(radius=5, n=5, hydro_path=hydro_path,
gis_path=gis_path, plot=True):
import pandas as pd
import geopandas as gpd
from pathlib import Path
import xarray as xr
import matplotlib.pyplot as plt
df = pd.read_csv(Path().cwd() / 'israeli_gnss_coords.txt',
delim_whitespace=True)
df = df[['lon', 'lat']]
gnss = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon, df.lat),
crs={'init': 'epsg:4326'})
gnss = gnss.to_crs({'init': 'epsg:2039'})
hydro_meta = read_hydro_metadata(hydro_path, gis_path, plot=False)
hydro_meta = hydro_meta.to_crs({'init': 'epsg:2039'})
for index, row in gnss.iterrows():
# hdict[index] = hydro_meta.geometry.distance(row['geometry'])
hydro_meta[index] = hydro_meta.geometry.distance(row['geometry'])
hydro_meta[index] = hydro_meta[index].where(
hydro_meta[index] <= radius * 1000)
gnss_list = [x for x in gnss.index]
# get only stations within desired radius
mask = ~hydro_meta.loc[:, gnss_list].isnull().all(axis=1)
sel_hydro = hydro_meta.copy()[mask] # pd.concat(hydro_list)
# filter unexisting stations:
tides = xr.load_dataset(hydro_path / 'hydro_tides.nc')
to_remove = []
for index, row in sel_hydro.iterrows():
sid = row['id']
try:
tides['TS_{}_max_flow'.format(sid)]
except KeyError:
print('{} hydro station non-existant in database'.format(sid))
to_remove.append(index)
sel_hydro.drop(to_remove, axis=0, inplace=True)
if plot:
isr = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
isr.crs = {'init': 'epsg:4326'}
gnss = gnss.to_crs({'init': 'epsg:4326'})
sel_hydro = sel_hydro.to_crs({'init': 'epsg:4326'})
ax = isr.plot(figsize=(10, 16))
sel_hydro.plot(ax=ax, color='yellow', edgecolor='black')
gnss.plot(ax=ax, color='green', edgecolor='black', alpha=0.7)
for x, y, label in zip(gnss.lon, gnss.lat, gnss.index):
ax.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points")
plt.legend(['hydro-tide stations', 'GNSS stations'], loc='upper left')
plt.suptitle(
'hydro-tide stations within {} km of a GNSS station'.format(radius), fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.95)
# for x, y, label in zip(sel_hydro.lon, sel_hydro.lat,
# sel_hydro.id):
# ax.annotate(label, xy=(x, y), xytext=(3, 3),
# textcoords="offset points")
return sel_hydro
def read_hydro_metadata(path=hydro_path, gis_path=gis_path, plot=True):
import pandas as pd
import geopandas as gpd
import xarray as xr
df = pd.read_excel(hydro_path / 'hydro_stations_metadata.xlsx',
header=4)
# drop last row:
df.drop(df.tail(1).index, inplace=True) # drop last n rows
df.columns = [
'id',
'name',
'active',
'agency',
'type',
'X',
'Y',
'area']
df.loc[:, 'active'][df['active'] == 'פעילה'] = 1
df.loc[:, 'active'][df['active'] == 'לא פעילה'] = 0
df.loc[:, 'active'][df['active'] == 'לא פעילה זמנית'] = 0
df['active'] = df['active'].astype(float)
df = df[~df.X.isnull()]
df = df[~df.Y.isnull()]
# now, geopandas part:
geo_df = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.X, df.Y),
crs={'init': 'epsg:2039'})
# geo_df.crs = {'init': 'epsg:2039'}
geo_df = geo_df.to_crs({'init': 'epsg:4326'})
isr_dem = xr.open_rasterio(gis_path / 'israel_dem.tif')
alt_list = []
for index, row in geo_df.iterrows():
lat = row.geometry.y
lon = row.geometry.x
alt = isr_dem.sel(band=1, x=lon, y=lat, method='nearest').values.item()
alt_list.append(float(alt))
geo_df['alt'] = alt_list
geo_df['lat'] = geo_df.geometry.y
geo_df['lon'] = geo_df.geometry.x
isr = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
isr.crs = {'init': 'epsg:4326'}
geo_df = gpd.sjoin(geo_df, isr, op='within')
if plot:
ax = isr.plot()
geo_df.plot(ax=ax, edgecolor='black', legend=True)
return geo_df
def read_tides(path=hydro_path):
from aux_gps import path_glob
import pandas as pd
import xarray as xr
from aux_gps import get_unique_index
files = path_glob(path, 'tide_report*.xlsx')
df_list = []
for file in files:
df = pd.read_excel(file, header=4)
df.drop(df.columns[len(df.columns) - 1], axis=1, inplace=True)
df.columns = [
'id',
'name',
'hydro_year',
'tide_start_hour',
'tide_start_date',
'tide_end_hour',
'tide_end_date',
'tide_duration',
'tide_max_hour',
'tide_max_date',
'max_height',
'max_flow[m^3/sec]',
'tide_vol[MCM]']
df = df[~df.hydro_year.isnull()]
df['id'] = df['id'].astype(int)
df['tide_start'] = pd.to_datetime(
df['tide_start_date'], dayfirst=True) + pd.to_timedelta(
df['tide_start_hour'].add(':00'), unit='m', errors='coerce')
# tides are in local Israeli winter clock (no DST):
# dst = np.zeros(df['tide_start'].shape)
# df['tide_start'] = df['tide_start'].dt.tz_localize('Asia/Jerusalem', ambiguous=dst).dt.tz_convert('UTC')
df['tide_start'] = df['tide_start'] - pd.Timedelta(2, unit='H')
df['tide_end'] = pd.to_datetime(
df['tide_end_date'], dayfirst=True) + pd.to_timedelta(
df['tide_end_hour'].add(':00'),
unit='m',
errors='coerce')
# also to tide ends:
df['tide_end'] = df['tide_end'] - pd.Timedelta(2, unit='H')
# df['tide_end'] = df['tide_end'].dt.tz_localize('Asia/Jerusalem', ambiguous=dst).dt.tz_convert('UTC')
df['tide_max'] = pd.to_datetime(
df['tide_max_date'], dayfirst=True) + pd.to_timedelta(
df['tide_max_hour'].add(':00'),
unit='m',
errors='coerce')
# also to tide max:
# df['tide_max'] = df['tide_max'].dt.tz_localize('Asia/Jerusalem', ambiguous=dst).dt.tz_convert('UTC')
df['tide_max'] = df['tide_max'] - pd.Timedelta(2, unit='H')
df['tide_duration'] = pd.to_timedelta(
df['tide_duration'] + ':00', unit='m', errors='coerce')
df.loc[:,
'max_flow[m^3/sec]'][df['max_flow[m^3/sec]'].str.contains('<',
na=False)] = 0
df.loc[:, 'tide_vol[MCM]'][df['tide_vol[MCM]'].str.contains(
'<', na=False)] = 0
df['max_flow[m^3/sec]'] = df['max_flow[m^3/sec]'].astype(float)
df['tide_vol[MCM]'] = df['tide_vol[MCM]'].astype(float)
to_drop = ['tide_start_hour', 'tide_start_date', 'tide_end_hour',
'tide_end_date', 'tide_max_hour', 'tide_max_date']
df = df.drop(to_drop, axis=1)
df_list.append(df)
df = pd.concat(df_list)
dfs = [x for _, x in df.groupby('id')]
ds_list = []
meta_df = read_hydro_metadata(path, gis_path, False)
for df in dfs:
st_id = df['id'].iloc[0]
st_name = df['name'].iloc[0]
print('proccessing station number: {}, {}'.format(st_id, st_name))
meta = meta_df[meta_df['id'] == st_id]
ds = xr.Dataset()
df.set_index('tide_start', inplace=True)
attrs = {}
attrs['station_name'] = st_name
if not meta.empty:
attrs['lon'] = meta.lon.values.item()
attrs['lat'] = meta.lat.values.item()
attrs['alt'] = meta.alt.values.item()
attrs['drainage_basin_area'] = meta.area.values.item()
attrs['active'] = meta.active.values.item()
attrs['units'] = 'm'
max_height = df['max_height'].to_xarray()
max_height.name = 'TS_{}_max_height'.format(st_id)
max_height.attrs = attrs
max_flow = df['max_flow[m^3/sec]'].to_xarray()
max_flow.name = 'TS_{}_max_flow'.format(st_id)
attrs['units'] = 'm^3/sec'
max_flow.attrs = attrs
attrs['units'] = 'MCM'
tide_vol = df['tide_vol[MCM]'].to_xarray()
tide_vol.name = 'TS_{}_tide_vol'.format(st_id)
tide_vol.attrs = attrs
attrs.pop('units')
# tide_start = df['tide_start'].to_xarray()
# tide_start.name = 'TS_{}_tide_start'.format(st_id)
# tide_start.attrs = attrs
tide_end = df['tide_end'].to_xarray()
tide_end.name = 'TS_{}_tide_end'.format(st_id)
tide_end.attrs = attrs
tide_max = df['tide_max'].to_xarray()
tide_max.name = 'TS_{}_tide_max'.format(st_id)
tide_max.attrs = attrs
ds['{}'.format(max_height.name)] = max_height
ds['{}'.format(max_flow.name)] = max_flow
ds['{}'.format(tide_vol.name)] = tide_vol
# ds['{}'.format(tide_start.name)] = tide_start
ds['{}'.format(tide_end.name)] = tide_end
ds['{}'.format(tide_max.name)] = tide_max
ds_list.append(ds)
dsu = [get_unique_index(x, dim='tide_start') for x in ds_list]
print('merging...')
ds = xr.merge(dsu)
ds.attrs['time'] = 'UTC'
filename = 'hydro_tides.nc'
print('saving {} to {}'.format(filename, path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in ds.data_vars}
ds.to_netcdf(path / filename, 'w', encoding=encoding)
print('Done!')
return ds
def plot_hydro_events(hs_id, path=hydro_path, field='max_flow', min_flow=10):
import xarray as xr
import matplotlib.pyplot as plt
tides = xr.open_dataset(path/'hydro_tides.nc')
sta_slice = [x for x in tides.data_vars if str(hs_id) in x]
tide = tides[sta_slice]['TS_{}_{}'.format(hs_id, field)]
tide = tide.dropna('tide_start')
fig, ax = plt.subplots()
tide.plot.line(linewidth=0., marker='x', color='r', ax=ax)
if min_flow is not None:
tide[tide > min_flow].plot.line(
linewidth=0., marker='x', color='b', ax=ax)
print('min flow of {} m^3/sec: {}'.format(min_flow,
tide[tide > min_flow].dropna('tide_start').size))
return tide
def text_process_hydrographs(path=hydro_path, gis_path=gis_path):
from aux_gps import path_glob
files = path_glob(path, 'hydro_flow*.txt')
for i, file in enumerate(files):
print(file)
with open(file, 'r') as f:
big_list = f.read().splitlines()
# for small_list in big_list:
# flat_list = [item for sublist in l7 for item in sublist]
big = [x.replace(',', ' ') for x in big_list]
big = big[6:]
big = [x.replace('\t', ',') for x in big]
filename = 'hydro_graph_{}.txt'.format(i)
with open(path / filename, 'w') as fs:
for item in big:
fs.write('{}\n'.format(item))
print('{} saved to {}'.format(filename, path))
return
def read_hydrographs(path=hydro_path):
from aux_gps import path_glob
import pandas as pd
import xarray as xr
from aux_gps import get_unique_index
files = path_glob(path, 'hydro_graph*.txt')
df_list = []
for file in files:
print(file)
df = pd.read_csv(file, header=0, sep=',')
df.columns = [
'id',
'name',
'time',
'tide_height[m]',
'flow[m^3/sec]',
'data_type',
'flow_type',
'record_type',
'record_code']
# make sure the time is in UTC since database is in ISR winter clock (no DST)
df['time'] = pd.to_datetime(df['time'], dayfirst=True) - | pd.Timedelta(2, unit='H') | pandas.Timedelta |
import os
import argparse
import pickle
import itertools
import awkward as ak
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from src.data import read_nanoaod
def read_data(paths, ds_predictions, pn_predictions):
dfs = []
for path in paths:
valid_jets = read_nanoaod(path)
jet_pt = ak.to_pandas(valid_jets.pt)
gen_jet_pt = ak.to_pandas(valid_jets.matched_gen.pt)
gen_jet_eta = ak.to_pandas(valid_jets.matched_gen.eta)
parton_flavour = ak.to_pandas(valid_jets.matched_gen.partonFlavour)
hadron_flavour = ak.to_pandas(valid_jets.matched_gen.hadronFlavour)
df = pd.concat((jet_pt, gen_jet_pt, gen_jet_eta, parton_flavour, hadron_flavour), axis=1)
df.columns = ['Jet_pt', 'GenJet_pt', 'GenJet_eta', 'GenJet_partonFlavour', 'GenJet_hadronFlavour']
flavour = df.GenJet_hadronFlavour.where(df.GenJet_hadronFlavour != 0, other=np.abs(df.GenJet_partonFlavour))
df = df.drop(columns=['GenJet_partonFlavour', 'GenJet_hadronFlavour'])
df['flavour'] = flavour
dfs.append(df)
df = pd.concat(dfs, axis=0)
df['response'] = df.Jet_pt / df.GenJet_pt
df['ds_response'] = ds_predictions.flatten() * df.Jet_pt / df.GenJet_pt
df['pn_response'] = pn_predictions.flatten() * df.Jet_pt / df.GenJet_pt
return df
def plot_distrs(dataframe, fig_dir):
"""Plot distributions of response in a few representative bins."""
binning = np.linspace(0.5, 1.5, num=101)
pt_bins = [(30, 40), (100, 110), (1000, 1100)]
eta_bins = [(0., 2.5), (2.5, 5)]
ref_histograms, ds_histograms, pn_histograms = {}, {}, {}
for (ipt, pt_bin), (ieta, eta_bin) in itertools.product(
enumerate(pt_bins), enumerate(eta_bins)
):
df_bin = dataframe[
(dataframe.GenJet_pt >= pt_bin[0]) & (dataframe.GenJet_pt < pt_bin[1])
& (np.abs(dataframe.GenJet_eta) >= eta_bin[0])
& (np.abs(dataframe.GenJet_eta) < eta_bin[1])
]
for label, selection in [
('uds', (df_bin.flavour <= 3) & (df_bin.flavour != 0)),
('b', df_bin.flavour == 5),
('g', df_bin.flavour == 21)
]:
h, _ = np.histogram(df_bin.response[selection], bins=binning)
ref_histograms[ipt, ieta, label] = h
h, _ = np.histogram(df_bin.ds_response[selection], bins=binning)
ds_histograms[ipt, ieta, label] = h
h, _ = np.histogram(df_bin.pn_response[selection], bins=binning)
pn_histograms[ipt, ieta, label] = h
for ipt, ieta, flavour in itertools.product(
range(len(pt_bins)), range(len(eta_bins)), ['uds', 'b', 'g']
):
fig = plt.figure()
ax = fig.add_subplot()
ax.hist(
binning[:-1], weights=ref_histograms[ipt, ieta, flavour],
bins=binning, histtype='step', label='Standard')
ax.hist(
binning[:-1], weights=ds_histograms[ipt, ieta, flavour],
bins=binning, histtype='step', label='Deep Sets')
ax.hist(
binning[:-1], weights=pn_histograms[ipt, ieta, flavour],
bins=binning, histtype='step', label='ParticleNet')
ax.axvline(1., ls='dashed', lw=0.8, c='gray')
ax.margins(x=0)
ax.set_xlabel(
r'$p_\mathrm{T}^\mathrm{corr}\//\/p_\mathrm{T}^\mathrm{gen}$')
ax.set_ylabel('Jets')
ax.legend(loc='upper right')
ax.text(
1., 1.002,
r'${}$, ${:g} < p_\mathrm{{T}}^\mathrm{{gen}} < {:g}$ GeV, '
r'${:g} < |\eta^\mathrm{{gen}}| < {:g}$'.format(
flavour, pt_bins[ipt][0], pt_bins[ipt][1],
eta_bins[ieta][0], eta_bins[ieta][1]
),
ha='right', va='bottom', transform=ax.transAxes
)
ax.tick_params(
axis='both', which='both', direction='in',
bottom=True, top=True, left=True, right=True
)
fig.savefig(os.path.join(
fig_dir, f'{flavour}_pt{ipt + 1}_eta{ieta + 1}.png'
))
plt.close(fig)
def bootstrap_median(x, num=30):
"""Compute errors on median with bootstrapping."""
if len(x) == 0:
return np.nan
medians = []
for _ in range(num):
x_resampled = np.random.choice(x, len(x))
medians.append(np.median(x_resampled))
return np.std(medians)
def compare_flavours(dataframe, fig_dir):
"""Plot median response as a function of jet flavour."""
pt_cut = 30
for ieta, eta_bin in enumerate([(0, 2.5), (2.5, 5)], start=1):
df_pteta = dataframe[
(np.abs(dataframe.GenJet_eta) >= eta_bin[0])
& (np.abs(dataframe.GenJet_eta) < eta_bin[1])
]
ref_median, ref_median_error = [], []
ds_median, ds_median_error = [], []
pn_median, pn_median_error = [], []
flavours = [('g', {21}), ('uds', {1, 2, 3}), ('c', {4}), ('b', {5})]
for _, pdg_ids in flavours:
df = df_pteta[df_pteta.flavour.isin(pdg_ids)]
ref_median.append(df.response.median())
ref_median_error.append(bootstrap_median(df.response))
ds_median.append(df.ds_response.median())
ds_median_error.append(bootstrap_median(df.ds_response))
pn_median.append(df.pn_response.median())
pn_median_error.append(bootstrap_median(df.pn_response))
fig = plt.figure()
ax = fig.add_subplot()
ax.errorbar(
np.arange(len(flavours)) - 0.04, ref_median, yerr=ref_median_error,
marker='o', ms=3, lw=0, elinewidth=0.8, label='Standard'
)
ax.errorbar(
np.arange(len(flavours)), ds_median, yerr=ds_median_error,
marker='^', ms=3, lw=0, elinewidth=0.8, label='Deep Sets'
)
ax.errorbar(
np.arange(len(flavours)) + 0.04, pn_median, yerr=pn_median_error,
marker='s', ms=3, lw=0, elinewidth=0.8, label='ParticleNet'
)
ax.set_xlim(-0.5, len(flavours) - 0.5)
ax.axhline(1, ls='dashed', lw=0.8, c='gray')
ax.set_xticks(np.arange(len(flavours)))
ax.set_xticklabels([f[0] for f in flavours])
ax.legend()
ax.set_ylabel('Median response')
ax.text(
1., 1.002,
r'$p_\mathrm{{T}}^\mathrm{{gen}} > {:g}$ GeV, '
r'${:g} < |\eta^\mathrm{{gen}}| < {:g}$'.format(
pt_cut, eta_bin[0], eta_bin[1]
),
ha='right', va='bottom', transform=ax.transAxes
)
ax.tick_params(
axis='both', which='both', direction='in',
bottom=True, top=True, left=True, right=True
)
fig.savefig(os.path.join(fig_dir, f'eta{ieta}.png'))
plt.close(fig)
def plot_median_response(outdir, flavour_label, bins, bin_centers, eta_bin, ieta):
"""Plot median response as a function of pt."""
ref_median = bins.response.median().to_numpy()
ref_median_error = np.empty_like(ref_median)
for i, (_, df) in enumerate(bins):
ref_median_error[i] = bootstrap_median(df.response.to_numpy())
ds_median = bins.ds_response.median().to_numpy()
ds_median_error = np.empty_like(ref_median)
for i, (_, df) in enumerate(bins):
ds_median_error[i] = bootstrap_median(df.ds_response.to_numpy())
pn_median = bins.pn_response.median().to_numpy()
pn_median_error = np.empty_like(ref_median)
for i, (_, df) in enumerate(bins):
pn_median_error[i] = bootstrap_median(df.pn_response.to_numpy())
fig = plt.figure()
fig.suptitle('Median ' + flavour_label + '-jet response w.r.t. gen p$_{T}$')
ax = fig.add_subplot()
vals = np.geomspace(0.5, 50, 20)
shift = np.sqrt(vals[:-1] * vals[1:])
ax.errorbar(
bin_centers - shift, ref_median, yerr=ref_median_error,
ms=3, fmt='o', elinewidth=0.8, label='Standard'
)
ax.errorbar(
bin_centers, ds_median, yerr=ds_median_error,
ms=3, fmt='^', elinewidth=0.8, label='Deep Sets'
)
ax.errorbar(
bin_centers + shift, pn_median, yerr=pn_median_error,
ms=3, fmt='s', elinewidth=0.8, label='ParticleNet'
)
ax.axhline(1, ls='dashed', c='gray', alpha=.7)
ax.set_xlabel('$p^\\mathrm{{gen}}_{T}$')
ax.set_ylabel('Median response')
ax.text(
1., 1.002,
'{}${:g} < |\\eta^\\mathrm{{gen}}| < {:g}$'.format(
f'${flavour_label}$, ' if flavour_label != 'all' else '',
eta_bin[0], eta_bin[1]
),
ha='right', va='bottom', transform=ax.transAxes
)
ax.legend(loc='upper right')
ax.tick_params(
axis='both', which='both', direction='in',
bottom=True, top=True, left=True, right=True
)
ax.set_xscale('log')
fig.savefig(os.path.join(outdir, f'{flavour_label}_eta{ieta}.png'))
plt.close(fig)
def bootstrap_iqr(x, num=30):
"""Compute errors on IQR with bootstrapping."""
if len(x) == 0:
return np.nan
iqrs = []
for _ in range(num):
x_resampled = np.random.choice(x, len(x))
quantiles = np.percentile(x_resampled, [25, 75])
iqrs.append(quantiles[1] - quantiles[0])
return np.std(iqrs)
def compute_iqr(groups):
"""Compute IQR from series GroupBy."""
q = groups.quantile([0.25, 0.75])
iqr = q[1::2].values - q[0::2].values
return iqr
def plot_resolution(outdir, flavour_label, bins, bin_centers, eta_bin, ieta):
ref_median = bins.response.median().to_numpy()
ref_iqr = compute_iqr(bins.response)
ref_iqr_error = np.empty_like(ref_iqr)
for i, (_, df) in enumerate(bins):
ref_iqr_error[i] = bootstrap_iqr(df.response.to_numpy())
ds_median = bins.ds_response.median().to_numpy()
ds_iqr = compute_iqr(bins.ds_response)
ds_iqr_error = np.empty_like(ref_iqr)
for i, (_, df) in enumerate(bins):
ds_iqr_error[i] = bootstrap_iqr(df.ds_response.to_numpy())
pn_median = bins.pn_response.median().to_numpy()
pn_iqr = compute_iqr(bins.pn_response)
pn_iqr_error = np.empty_like(ref_iqr)
for i, (_, df) in enumerate(bins):
pn_iqr_error[i] = bootstrap_iqr(df.pn_response.to_numpy())
fig = plt.figure()
gs = mpl.gridspec.GridSpec(2, 1, hspace=0.02, height_ratios=[4, 1])
axes_upper = fig.add_subplot(gs[0, 0])
axes_lower = fig.add_subplot(gs[1, 0])
axes_upper.errorbar(
bin_centers, ref_iqr / ref_median, yerr=ref_iqr_error / ref_median,
ms=3, marker='o', lw=0, elinewidth=0.8, label='Standard'
)
axes_upper.errorbar(
bin_centers, ds_iqr / ds_median, yerr=ds_iqr_error / ds_median,
ms=3, marker='^', lw=0, elinewidth=0.8, label='Deep Sets'
)
axes_upper.errorbar(
bin_centers, pn_iqr / pn_median, yerr=pn_iqr_error / pn_median,
ms=3, marker='s', lw=0, elinewidth=0.8, label='ParticleNet'
)
axes_lower.plot(
bin_centers, (ds_iqr / ds_median) / (ref_iqr / ref_median),
ms=3, marker='^', lw=0, color='tab:orange'
)
axes_lower.plot(
bin_centers, (pn_iqr / pn_median) / (ref_iqr / ref_median),
ms=3, marker='s', lw=0, color='tab:green'
)
axes_upper.set_ylim(0, None)
axes_lower.set_ylim(0.85, 1.02)
for axes in [axes_upper, axes_lower]:
axes.set_xscale('log')
axes.set_xlim(binning[0], binning[-1])
axes_upper.xaxis.set_major_formatter(mpl.ticker.NullFormatter())
axes_upper.xaxis.set_minor_formatter(mpl.ticker.NullFormatter())
axes_upper.legend()
axes_upper.text(
1, 1.002,
'{}${:g} < |\\eta^\\mathrm{{gen}}| < {:g}$'.format(
f'${flavour_label}$, ' if flavour_label != 'all' else '',
eta_bin[0], eta_bin[1]
),
ha='right', va='bottom', transform=axes_upper.transAxes
)
axes_upper.set_ylabel('IQR / median for response')
axes_lower.set_ylabel('Ratio')
axes_lower.set_xlabel(r'$p_\mathrm{T}^\mathrm{gen}$')
axes_upper.tick_params(
axis='both', which='both', direction='in',
bottom=True, top=True, left=True, right=True
)
axes_lower.tick_params(
axis='both', which='both', direction='in',
bottom=True, top=True, left=True, right=True
)
fig.align_ylabels()
fig.savefig(os.path.join(outdir, f'{flavour_label}_eta{ieta}_iqr.png'))
plt.close(fig)
def plot_median_residual(outdir, bin_centers, flavour_labels, bins, eta_bin, ieta):
"""Plot difference in median response between flavours as a function of pt."""
ref_median_1 = bins[0].response.median().to_numpy()
ref_median_error_1 = np.empty_like(ref_median_1)
for i, (_, df) in enumerate(bins[0]):
ref_median_error_1[i] = bootstrap_median(df.response.to_numpy())
ds_median_1 = bins[0].ds_response.median().to_numpy()
ds_median_error_1 = np.empty_like(ref_median_1)
for i, (_, df) in enumerate(bins[0]):
ds_median_error_1[i] = bootstrap_median(df.ds_response.to_numpy())
pn_median_1 = bins[0].pn_response.median().to_numpy()
pn_median_error_1 = np.empty_like(ref_median_1)
for i, (_, df) in enumerate(bins[0]):
pn_median_error_1[i] = bootstrap_median(df.pn_response.to_numpy())
ref_median_2 = bins[1].response.median().to_numpy()
ref_median_error_2 = np.empty_like(ref_median_2)
for i, (_, df) in enumerate(bins[1]):
ref_median_error_2[i] = bootstrap_median(df.response.to_numpy())
ds_median_2 = bins[1].ds_response.median().to_numpy()
ds_median_error_2 = np.empty_like(ref_median_2)
for i, (_, df) in enumerate(bins[1]):
ds_median_error_2[i] = bootstrap_median(df.ds_response.to_numpy())
pn_median_2 = bins[1].pn_response.median().to_numpy()
pn_median_error_2 = np.empty_like(ref_median_2)
for i, (_, df) in enumerate(bins[1]):
pn_median_error_2[i] = bootstrap_median(df.pn_response.to_numpy())
diff = ref_median_1 - ref_median_2
err = np.sqrt(ref_median_error_1 ** 2 + ref_median_error_2 ** 2)
ds_diff = ds_median_1 - ds_median_2
ds_err = np.sqrt(ds_median_error_1 ** 2 + ds_median_error_2 ** 2)
pn_diff = pn_median_1 - pn_median_2
pn_err = np.sqrt(pn_median_error_1 ** 2 + pn_median_error_2 ** 2)
fig = plt.figure()
ax = fig.add_subplot()
fig.suptitle('Median response residuals w.r.t. gen p$_{T}$')
vals = np.geomspace(0.5, 50, 20)
shift = np.sqrt(vals[:-1] * vals[1:])
ax.errorbar(
bin_centers - shift, diff, yerr=err,
ms=3, fmt='o', elinewidth=0.8, label='Standard'
)
ax.errorbar(
bin_centers, ds_diff, yerr=ds_err,
ms=3, fmt='^', elinewidth=0.8, label='Deep Sets'
)
ax.errorbar(
bin_centers + shift, pn_diff, yerr=pn_err,
ms=3, fmt='s', lw=0, elinewidth=0.8, label='ParticleNet'
)
ax.axhline(0, ls='dashed', c='gray', alpha=.7)
ax.set_xlabel('$p^\\mathrm{{gen}}_{T}$')
ax.set_ylabel('$R_{' + flavour_labels[0] + '}-R_{' + flavour_labels[1] + '}$')
ax.text(
1., 1.002,
'${:g} < |\\eta^\\mathrm{{gen}}| < {:g}$'.format(eta_bin[0], eta_bin[1]),
ha='right', va='bottom', transform=ax.transAxes
)
ax.set_xscale('log')
ax.legend(loc='upper right')
ax.tick_params(
axis='both', which='both', direction='in',
bottom=True, top=True, left=True, right=True
)
fig.savefig(os.path.join(outdir, f'{flavour_labels[0]}-{flavour_labels[1]}_eta{ieta}.png'))
plt.close(fig)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description=__doc__)
arg_parser.add_argument('-d', '--deepsets', required=True, help='Deep Sets results directory')
arg_parser.add_argument('-p', '--particlenet', required=True, help='ParticleNet results directory')
arg_parser.add_argument('-o', '--outdir', required=True, help='Where to store plots')
args = arg_parser.parse_args()
try:
os.mkdir(args.outdir)
except FileExistsError:
pass
with open(os.path.join(args.deepsets, 'predictions.pkl'), 'rb') as f:
ds_predictions, ds_test_files = pickle.load(f)
with open(os.path.join(args.particlenet, 'predictions.pkl'), 'rb') as f:
pn_predictions, pn_test_files = pickle.load(f)
if pn_test_files != ds_test_files:
raise RuntimeError('Test files are different.')
df = read_data(ds_test_files, ds_predictions, pn_predictions)
for subdir in ['distributions', 'flavours', 'response', 'resolution', 'residual']:
try:
os.makedirs(os.path.join(args.outdir, subdir))
except FileExistsError:
pass
plot_distrs(df, os.path.join(args.outdir, 'distributions'))
compare_flavours(df, os.path.join(args.outdir, 'flavours'))
binning = np.geomspace(30, 3000, 20)
bin_centers = np.sqrt(binning[:-1] * binning[1:])
for (ieta, eta_bin), (flavour_label, flavour_ids) in itertools.product(
enumerate([(0, 2.5), (2.5, 5)], start=1),
[
('uds', {1, 2, 3}), ('b', {5}), ('g', {21}),
('all', {0, 1, 2, 3, 4, 5, 21})
]
):
df_bin = df[
(np.abs(df.GenJet_eta) >= eta_bin[0])
& (np.abs(df.GenJet_eta) < eta_bin[1])
& df.flavour.isin(flavour_ids)
]
bins = df_bin.groupby(pd.cut(df_bin.GenJet_pt, binning))
plot_median_response(
os.path.join(args.outdir, 'response'),
flavour_label, bins, bin_centers, eta_bin, ieta
)
plot_resolution(
os.path.join(args.outdir, 'resolution'),
flavour_label, bins, bin_centers, eta_bin, ieta
)
for (ieta, eta_bin), flavours in itertools.product(
enumerate([(0, 2.5), (2.5, 5)], start=1),
itertools.combinations([('uds', {1, 2, 3}), ('b', {5}), ('g', {21})], r=2),
):
bins = []
for i, flavour_ids in enumerate([flavours[0][1], flavours[1][1]]):
df_bin = df[
(np.abs(df.GenJet_eta) >= eta_bin[0])
& (np.abs(df.GenJet_eta) < eta_bin[1])
& df.flavour.isin(flavour_ids)
]
bins.append(df_bin.groupby( | pd.cut(df_bin.GenJet_pt, binning) | pandas.cut |
import glob
import os
import sys
from pprint import pprint
import pandas as pd
from ..constants import (DATA_DIR, DTYPES, RAW_DATA_DIR, USE_VAR_LIST,
USE_VAR_LIST_DICT, USE_VAR_LIST_DICT_REVERSE)
from ..download.nppes import nppes_month_list
from ..utils.utils import coerce_dtypes, month_name_to_month_num
def get_filepaths_from_dissemination_zips(folder):
'''
Each dissemination folder contains a large / bulk data file of the format
npidata_20050523-yearmonthday.csv, sometimes
deep in a subdirectory. This identifies the likeliest candidate and maps
in a dictionary to the main zip folder
'''
zip_paths = os.path.join(folder, 'NPPES_Data_Dissemination*')
stub = os.path.join(folder, 'NPPES_Data_Dissemination_')
folders = [x for x
in glob.glob(zip_paths)
if not x.endswith('.zip')]
folders = [x for x in folders if 'Weekly' not in x]
possbl = list(set(glob.glob(zip_paths + '/**/*npidata_*', recursive=True)))
possbl = [x for x in possbl if 'Weekly' not in x]
paths = {(x.partition(stub)[2].split('/')[0].split('_')[1],
str(month_name_to_month_num(
x.partition(stub)[2].split('/')[0].split('_')[0]))): x
for x in possbl if 'eader' not in x}
assert len(folders) == len(paths)
return paths
def get_weekly_dissemination_zips(folder):
'''
Each weekly update folder contains a large / bulk data file of the format
npidata_pfile_20200323-20200329, representing the week covered
Will need to later add functionality for weekly updates for ploc2 files
'''
zip_paths = os.path.join(folder, 'NPPES_Data_Dissemination*')
stub = os.path.join(folder, 'NPPES_Data_Dissemination_')
folders = [x for x
in glob.glob(zip_paths)
if not x.endswith('.zip')]
folders = [x for x in folders if 'Weekly' in x]
possbl = list(set(glob.glob(zip_paths + '/**/*npidata_*', recursive=True)))
possbl = [x for x in possbl if 'Weekly' in x]
paths = {(x.partition(stub)[2].split('/')[0].split('_')[0],
x.partition(stub)[2].split('/')[0].split('_')[1]): x
for x in possbl if 'eader' not in x}
assert len(folders) == len(paths)
return paths
def which_weekly_dissemination_zips_are_updates(folder):
"""
Will need to later add functionality for weekly updates for ploc2 files
"""
last_monthly = max([pd.to_datetime(val.split('-')[1]
.split('.csv')[0]
.replace(' Jan 2013/', '')
.replace('npidata_', ''))
for key, val in
get_filepaths_from_dissemination_zips(folder).items()])
updates = [(x, val) for x, val
in get_weekly_dissemination_zips(folder).items()
if pd.to_datetime(x[1]) > last_monthly]
return updates
def get_secondary_loc_filepaths_from_dissemination_zips(folder):
zip_paths = os.path.join(folder, 'NPPES_Data_Dissemination*')
stub = os.path.join(folder, 'NPPES_Data_Dissemination_')
possbl = list(set(glob.glob(zip_paths + '/**/pl_pfile_*', recursive=True)))
possbl = [x for x in possbl if 'Weekly' not in x]
paths = {(x.partition(stub)[2].split('/')[0].split('_')[1],
str(month_name_to_month_num(
x.partition(stub)[2].split('/')[0].split('_')[0]))): x
for x in possbl if 'eader' not in x}
return paths
def get_filepaths_from_single_variable_files(variable, folder, noisily=True):
'''
Returns a dictionary of the path to each single variable file, for
each month and year
'''
files = glob.glob(os.path.join(folder, '%s*' % variable))
file_dict = {(x.split(variable)[1].split('.')[0][:4],
x.split(variable)[1].split('.')[0][4:]): x
for x in files}
if noisily:
print('For variable %s, there are %s files:'
% (variable, len(file_dict)))
pprint(sorted(list(file_dict.keys())))
return file_dict
def convert_dtypes(df):
'''
Note: should move to generic version found in utils
'''
# weird fix required for bug in select_dtypes
ints_init = (df.dtypes
.reset_index()[df.dtypes.reset_index()[0] == int]['index']
.values.tolist())
current_dtypes = {x: 'int' for x in ints_init}
for t in ['int', 'object', ['float32', 'float64'], 'datetime', 'string']:
current_dtypes.update({x: t for x in df.select_dtypes(t).columns})
dissem_file = (not set(current_dtypes.keys()).issubset(DTYPES.keys()))
for col in df.columns:
final_dtype = (DTYPES[col] if not dissem_file
else DTYPES[{**USE_VAR_LIST_DICT_REVERSE,
**{'seq': 'seq'}}[col]])
if (current_dtypes[col] != final_dtype and
final_dtype not in current_dtypes[col]):
try:
df = df.assign(**{col: coerce_dtypes(df[col],
current_dtypes[col],
final_dtype)})
except ValueError as err:
if final_dtype == 'string':
newcol = coerce_dtypes(df[col], current_dtypes[col], 'str')
newcol = coerce_dtypes(newcol, 'str', 'string')
else:
raise ValueError("{0}".format(err))
return df
def column_details(variable, dissem_file, dta_file):
'''
Generates column list to get from the raw data; dissem files
have long string names and are wide, whereas NBER files have
short names and are long
'''
diss_var = USE_VAR_LIST_DICT[variable]
multi = True if isinstance(diss_var, list) else False
tvar = ['npi', 'seq']
if not dissem_file:
if multi:
if str.isupper(variable) and not dta_file:
def collist(col): return col.upper() == variable or col in tvar
elif str.isupper(variable) and dta_file:
collist = tvar + [variable.lower()]
else:
collist = tvar + [variable]
else:
collist = ['npi', variable]
d_use = {} if not variable == 'ploczip' else {'ploczip': str}
else:
diss_vars = diss_var if multi else [diss_var]
collist = (['NPI'] + diss_var if multi else ['NPI'] + [diss_var])
d_use = {x: object for x in diss_vars if DTYPES[variable] == 'string'}
return collist, d_use
def locate_file(folder, year, month, variable):
'''
'''
paths1 = get_filepaths_from_single_variable_files(variable, folder, False)
if not variable.startswith('ploc2'):
paths2 = get_filepaths_from_dissemination_zips(folder)
else:
paths2 = get_secondary_loc_filepaths_from_dissemination_zips(folder)
try:
return paths1[(year, month)]
except KeyError:
try:
return paths2[(year, month)]
except KeyError:
return None
def read_and_process_df(folder, year, month, variable):
'''
Locates and reads in year-month-variable df from disk,
checks and converts dtypes, makes consistent variable names,
and adds a standardized month column
'''
file_path = locate_file(folder, '%s' % year, '%s' % month, variable)
if file_path:
df = process_filepath_to_df(file_path, variable)
df['month'] = pd.to_datetime('%s-%s' % (year, month))
return df
def read_and_process_weekly_updates(folder, variable):
"""
"""
filepaths = which_weekly_dissemination_zips_are_updates(folder)
if filepaths:
updates = pd.concat(
[process_filepath_to_df(f[1], variable).assign(
week=pd.to_datetime(f[0][0]))
for f in filepaths])
updates['month'] = (pd.to_datetime(updates.week.dt.year.astype(str)
+ '-'
+ updates.week.dt.month.astype(str) + '-' + '1'))
updates = (updates.dropna()
.groupby(['npi', 'month'])
.max()
.reset_index()
.merge(updates)
.drop(columns='week'))
return updates
def process_filepath_to_df(file_path, variable):
"""
"""
is_dissem_file = len(file_path.split('/')) > 6
is_dta_file = os.path.splitext(file_path)[1] == '.dta'
is_pl_file = ('pl_pfile_' in file_path) and is_dissem_file
collist, d_use = column_details(variable, is_dissem_file, is_dta_file)
df = (pd.read_csv(file_path, usecols=collist, dtype=d_use)
if file_path.endswith('.csv')
else pd.read_stata(file_path, columns=collist))
if is_pl_file:
df = (pd.concat([df, df.groupby('NPI').cumcount() + 1], axis=1)
.rename(columns={0: 'seq'}))
if (not is_dissem_file
and variable not in df.columns
and variable.lower() in df.columns):
df = df.rename(columns={variable.lower(): variable})
df = convert_dtypes(df)
df = reformat(df, variable, is_dissem_file)
return df
def reformat(df, variable, is_dissem_file):
'''
'''
multi = True if isinstance(USE_VAR_LIST_DICT[variable], list) else False
if is_dissem_file and multi:
stb = list(set([x.split('_')[0] for x in USE_VAR_LIST_DICT[variable]]))
assert len(stb) == 1
stb = stb[0] + '_'
df = pd.wide_to_long(df, [stb], i="NPI", j="seq").dropna()
df = df.reset_index().rename(columns={'NPI': 'npi', stb: variable})
elif is_dissem_file:
df = df.rename(columns={x: {**USE_VAR_LIST_DICT_REVERSE,
**{'seq': 'seq'}}[x]
for x in df.columns})
return df
def process_variable(folder, variable, searchlist, final_weekly_updates=True):
'''
'''
# searchlist = [x for x in searchlist if x != (2011, 3)]
df_list = []
for (year, month) in searchlist:
print(year, month)
if variable == "PTAXGROUP":
try:
df = read_and_process_df(folder, year, month, variable)
except ValueError as err:
assert year < 2012
else:
df = read_and_process_df(folder, year, month, variable)
df_list.append(df)
df = pd.concat(df_list, axis=0) if df_list else None
if df_list and final_weekly_updates:
u = read_and_process_weekly_updates(folder, variable)
if isinstance(u, pd.DataFrame):
df = df.merge(u, on=['npi', 'month'], how='outer', indicator=True)
if (df._merge == "right_only").sum() != 0:
df.loc[df._merge == "right_only",
'%s_x' % variable] = df['%s_y' % variable]
if (df._merge == "both").sum() != 0:
df.loc[df._merge == "both",
'%s_x' % variable] = df['%s_y' % variable]
df = (df.drop(columns=['_merge', '%s_y' % variable])
.rename(columns={'%s_x' % variable: variable}))
assert (df[['npi', 'month']].drop_duplicates().shape[0]
== df.shape[0])
return df
def sanitize_csv_for_update(df, variable):
df['month'] = pd.to_datetime(df.month)
if DTYPES[variable] == 'datetime64[ns]':
df[variable] = pd.to_datetime(df[variable])
elif variable == 'ploctel':
# Still not sure the ploctel update is working
df[variable] = df[variable].astype(str)
df.loc[df.ploctel.str.endswith('.0'),
'ploctel'] = df.ploctel.str[:-2]
df[variable] = df[variable].astype(DTYPES[variable])
else:
df[variable] = df[variable].astype(DTYPES[variable])
return df
def main_process_variable(variable, update):
# Should figure out NPPES_Data_Dissemination_March_2011 because it's weird;
# deleting for now
if not update:
print(f'Making new: {variable}')
searchlist = [x for x in nppes_month_list() if x != (2011, 3)]
df = process_variable(RAW_DATA_DIR, variable, searchlist)
df.to_csv(os.path.join(DATA_DIR, '%s.csv' % variable),
index=False)
else:
print(f'Updating: {variable}')
df = pd.read_csv(os.path.join(DATA_DIR, '%s.csv' % variable))
df = sanitize_csv_for_update(df, variable)
last_month = max(list(df.month.value_counts().index))
searchlist = [x for x in nppes_month_list() if
(pd.to_datetime('%s-%s-01' % (x[0], x[1]))
>= | pd.to_datetime(last_month) | pandas.to_datetime |
import logging
from neo4j import GraphDatabase
from py2neo import Graph, NodeMatcher, RelationshipMatch, Node
import pandas as pd
from tqdm import tqdm
from curami.commons import common_utils, file_utils
from curami.commons.models import Curation, RelationshipType
from curami.commons.config_params import NEO4J_URL, NEO4J_USERNAME, NEO4J_PASSWORD
# db_url = 'bolt://neo4j:7687'
db_url = "bolt://localhost:7687"
# db_url = "bolt://scooby.ebi.ac.uk:7687"
userName = "neo4j"
password = "<PASSWORD>"
# Neo4j 4.0.3
# neo4j 1.7.4 4.0.0
# neobolt 1.7.4 1.7.17
class Neo4jConnector:
def __init__(self):
self.driver = GraphDatabase.driver(NEO4J_URL, auth=(NEO4J_USERNAME, NEO4J_PASSWORD))
def get_user(self, username):
user_password = ''
with self.driver.session() as session:
results = session.run("MATCH (a:User { username: $username }) RETURN a.password", username=username)
user_password = results.single()["a.password"]
return user_password
def create_user(self, username, password_hash):
with self.driver.session() as session:
results = session.run("CREATE (a:User{ username: $username, password: $password }) RETURN a",
username=username, password=password_hash)
return results.single()
def get_suggested_curations(self, search_term, page, size, user):
search_query = (
"WHERE a.name = '" + search_term + "' OR b.name = '" + search_term + "' ") if search_term else ""
with self.driver.session() as session:
results = session.run("MATCH(a:Attribute)-[r:LOOKS_SIMILAR]->(b:Attribute) RETURN COUNT(r) as total_count")
for result in results:
total_records = result["total_count"]
results = session.run("MATCH(a:Attribute)-[r:LOOKS_SIMILAR]->(b:Attribute) " + search_query +
"UNWIND [a.count, b.count] AS count " +
"RETURN r.owner as owner, r.score as score, r.class as class, TYPE(r) as type, " +
"a.name as attribute_name, a.count as attribute_count, "
"a.quality as attribute_quality, " +
"b.name as curation_name, b.count as curation_count, "
"b.quality as curation_quality, " +
"min(count) as min_count " +
"ORDER BY min_count DESC SKIP $skip LIMIT $limit", skip=(page - 1) * size,
limit=size)
curations = []
for result in results:
attribute_1 = result["attribute_name"]
attribute_2 = result["curation_name"]
curation = Curation(attribute_1, attribute_2)
curation.attribute_1.count = result["attribute_count"]
curation.attribute_1.quality = result["attribute_quality"]
curation.attribute_2.count = result["curation_count"]
curation.attribute_2.quality = result["curation_quality"]
status, rel_type, attribute_curated = self.get_manual_curations(attribute_1, attribute_2, user)
if status != 0:
curation.attribute_curated = attribute_curated
curation.type = rel_type
curation.status = status
curations.append(curation)
return curations
def get_manual_curations(self, attribute_1, attribute_2, user):
attribute_names = [attribute_1, attribute_2]
with self.driver.session() as session:
results = session.run(
"MATCH (a:Attribute)-[r]->(b:Attribute) WHERE a.name in $attributes AND r.owner = $owner " +
"RETURN a.name AS attribute_name, b.name as curation_name, r.owner AS owner, r.score as score, r.class as class, TYPE(r) as type",
owner=user, attributes=attribute_names)
curation_count = 0
curated_name = ''
status = 0 # 0: no curations, -1: conflicts, 1,2: curations
rel_type = None
for result in results:
if result["type"] == RelationshipType.SAME_AS.name:
if curation_count == 0:
curated_name = result["curation_name"]
rel_type = result["type"]
curation_count += 1
if result["attribute_name"] in attribute_names and result["curation_name"] in attribute_names:
status = 1
# return result["type"], curated_name
else:
status = -1
elif curation_count == 1:
curation_count += 1
rel_type = result["type"]
if curated_name == result["curation_name"]:
status = 2
# return result["type"], curated_name
else:
status = -1
else:
logging.error("Invalid status in db: more than one outgoing SAME_AS relationship")
elif result["type"] == RelationshipType.DIFFERENT_FROM.name:
if result["attribute_name"] in attribute_names and result["curation_name"] in attribute_names:
status = 1
rel_type = result["type"]
# return result["type"], curated_name
return status, rel_type, curated_name
def add_curation(self, attribute_1, attribute_2, attribute_curated, user):
with self.driver.session() as session:
# delete SAME_AS manual outgoing relationship for user for two attributes
session.run(
"MATCH (a:Attribute)-[r:SAME_AS]->(b:Attribute) WHERE a.name IN $attributes AND r.owner = $user " +
"DELETE r", attributes=[attribute_1, attribute_2], user=user)
session.run(
"MATCH (a:Attribute)-[r:DIFFERENT_FROM]->(b:Attribute) WHERE a.name = $attribute_1 "
"AND b.name = $attribute_2 AND r.owner = $user " +
"DELETE r", attribute_1=attribute_1, attribute_2=attribute_2, user=user)
# create new manual relationship
if attribute_curated == attribute_1:
session.run(
"MATCH (a:Attribute),(b:Attribute) WHERE a.name = $attribute AND b.name = $curation " +
"CREATE (a)-[r:SAME_AS {class: 'HUMAN', owner: $user, confidence: $score}]->(b)",
attribute=attribute_2, curation=attribute_curated, user=user, score=0.8)
session.run("MATCH (a:Attribute {name: $attribute}) SET a.quality = a.quality + $score RETURN a",
attribute=attribute_1, score=0.25)
elif attribute_curated == attribute_2:
session.run(
"MATCH (a:Attribute),(b:Attribute) WHERE a.name = $attribute AND b.name = $curation " +
"CREATE (a)-[r:SAME_AS {class: 'HUMAN', owner: $user, confidence: $score}]->(b)",
attribute=attribute_1, curation=attribute_curated, user=user, score=0.8)
session.run("MATCH (a:Attribute {name: $attribute}) SET a.quality = a.quality + $score RETURN a",
attribute=attribute_2, score=0.25)
else:
session.run("MERGE (a:Attribute {name: $curation}) " +
"ON CREATE SET a:Manual, a.quality = 1, a.count = 0 " +
"ON MATCH SET a.quality = a.quality + $score",
curation=attribute_curated, score=0.25)
session.run(
"MATCH (a:Attribute),(b:Attribute) WHERE a.name = $attribute AND b.name = $curation " +
"CREATE (a)-[r:SAME_AS {class: 'HUMAN', owner: $user, confidence: $score}]->(b)",
attribute=attribute_1, curation=attribute_curated, user=user, score=0.8)
session.run(
"MATCH (a:Attribute),(b:Attribute) WHERE a.name = $attribute AND b.name = $curation " +
"CREATE (a)-[r:SAME_AS {class: 'HUMAN', owner: $user, confidence: $score}]->(b)",
attribute=attribute_2, curation=attribute_curated, user=user, score=0.8)
def reject_curation(self, attribute_1, attribute_2, user):
with self.driver.session() as session:
# delete any manual relationship between two attributes for the user
session.run(
"MATCH (a:Attribute)-[r]-(b:Attribute) WHERE a.name = $attribute_1 AND a.name = $attribute_2 "
"AND r.owner = $user " +
"DELETE r", attribute_1=attribute_1, attribute_2=attribute_2, user=user)
# create relationship
session.run(
"MATCH (a:Attribute),(b:Attribute) " +
"WHERE a.name = $attribute_1 AND b.name = $attribute_2 "
"CREATE (a)-[r:DIFFERENT_FROM {class: 'HUMAN', owner: $owner, confidence: 1}]->(b)",
attribute_1=attribute_1, attribute_2=attribute_2, owner=user)
def get_manual_curations_all(self):
with self.driver.session() as session:
results = session.run("MATCH(a:Attribute)-[r:LOOKS_SIMILAR]->(b:Attribute) "
"WHERE (a)-[:SAME_AS|:IGNORES|:DIFFERENT_FROM]-() "
"OR (b)-[:SAME_AS|:IGNORES|:DIFFERENT_FROM]-() "
"RETURN a.name AS attribute_name, b.name AS curation_name")
curation_map = {}
for result in results:
attribute_1 = result["attribute_name"]
attribute_2 = result["curation_name"]
if attribute_2 not in curation_map:
curation_map[attribute_2] = {"attribute": attribute_2, "suggested": attribute_1}
node_relationships = session.run("MATCH (a:Attribute {name: $attribute})-[r]->(b) "
"RETURN TYPE(r) AS rel_type, r.owner AS owner, b.name as curation",
attribute=attribute_2)
for r in node_relationships:
r_type = r["rel_type"]
if r_type == RelationshipType.SAME_AS.name:
curation_map[attribute_2][r['owner']] = r['curation']
# elif r_type == RelationshipType.LOOKS_SIMILAR.name:
# curation_map[attribute_2][r['owner']] = r['curation']
# curation_map_filtered = {k: v for k, v in curation_map.items() if v}
pd_curations = pd.DataFrame.from_dict(curation_map, orient="index")
# pd_curations = pd_curations[(pd_curations['isuru'] == pd_curations['isuru'])][(pd_curations['fuqi'] == pd_curations['fuqi'])]
pd_curations = pd_curations.dropna()
return pd_curations
def build_curation_graph(self, filename, delete):
attribute_misspelling_df = pd.read_csv(filename, encoding=file_utils.encoding)
print("Loading " + str(len(attribute_misspelling_df)) + " attribute relationships...")
attribute_map = common_utils.build_attribute_map()
if delete:
with self.driver.session() as session:
session.run("MATCH (n) DETACH DELETE n")
with self.driver.session() as session:
progress_bar = tqdm(total=len(attribute_misspelling_df), position=0, leave=True)
for index, row in tqdm(attribute_misspelling_df.iterrows()):
curation_name = row[0]
attribute_name = row[1]
session.run("MERGE (a:Attribute { name: $attribute, count: $count, quality: $quality })",
attribute=curation_name, count=attribute_map[curation_name], quality=0)
session.run("MERGE (a:Attribute { name: $attribute, count: $count, quality: $quality })",
attribute=attribute_name, count=attribute_map[attribute_name], quality=0)
session.run(
"MATCH (a:Attribute),(b:Attribute) WHERE a.name = $attribute AND b.name = $curation " +
"CREATE (a)-[r:LOOKS_SIMILAR {class: 'MACHINE', owner: 'dictionary', score: $score}]->(b)",
attribute=attribute_name, curation=curation_name, score=0.2)
progress_bar.update(1)
print("Finished loading data into neo4j")
def build_cooccurance_graph(self, filename, delete):
coexistence_df = | pd.read_csv(filename, encoding=file_utils.encoding) | pandas.read_csv |
import pandas as pd
import numpy as np
import time
from components.binary_conway_forward_prop_fn import BinaryConwayForwardPropFn
# Randomly take a row in the data and verify the numbers are correct.
def sample_verify(data):
conway = BinaryConwayForwardPropFn(numpy_mode=True)
nrows = len(data)
# Randomly take a row to verify. Avoid random() since seed is fixed.
# Use current time digits after the decimal point.
j = int(str(time.time()%1)[2:])%nrows
row = data.iloc[j, :]
if len(row) < 10:
print('No state data to verify')
return
(game_index, delta, target_lives, cnn_lives, cnn_errors,
ga_lives, ga_errors) = map(int, row[:7])
(target, cnn, ga) = row[7:]
end_state = np.array(list(target)).astype(int).reshape((1,25,25,1))
expect =end_state.sum()
if not expect == target_lives:
raise Exception('Game {} failed target_live {} vs expected {}'.format(
game_index, target_lives, expect))
ga_state = np.array(list(ga)).astype(int).reshape((1,25,25,1))
expect = ga_state.sum()
if not ga_lives == expect:
raise Exception('Game {} failed ga_lives {} vs expected {}'.format(
game_index, ga_lives, expect))
expect = abs(conway(ga_state, delta) - end_state).sum()
if not ga_errors == expect:
raise Exception('Game {} failed ga_errors {} vs expected {}'.format(
game_index, ga_errors, expect))
if not cnn == 0:
cnn_state = np.array(list(cnn)).astype(int).reshape((1,25,25,1))
expect = cnn_state.sum()
if not cnn_lives == expect:
raise Exception('Game {} failed cnn_lives {} vs expected {}'.format(
game_index, cnn_lives, expect))
expect = abs(conway(cnn_state, delta) - end_state).sum()
if not cnn_errors == expect:
raise Exception('Game {} failed cnn_errors {} vs expected {}'.format(
game_index, cnn_errors, expect))
print('Verified row {} delta {} on game {}.'.format(j, delta, game_index))
def post_run_report(output_dir):
data = | pd.read_csv(output_dir + 'results.csv', index_col=0) | pandas.read_csv |
import datetime
from collections import OrderedDict
import pandas as pd
from google.cloud import bigquery
CLIENT = None
PROJECT_ID = None
def insert_date_range(sql, date_range):
start, end = date_range
if start is None and end is None: return sql
if start is None:
return sql + ' WHERE `date` <= DATE("%s")' % end
if end is None:
return sql + ' WHERE `date` >= DATE("%s")' % start
return sql + ' WHERE DATE("%s") <= `date` AND `date` <= DATE("%s")' % (start, end)
# define helper fns:
def query_covariate_df_from_gbq(pid, date_range, covariate):
"""
Query a table from Google BigQuery, via SQL.
:param pid: patient id (str)
:param covariate: `heartrate`, `step`, `sleep`
"""
assert covariate in ['heartrate', 'steps', 'sleep']
columns = ['Date', 'Time', 'Source', 'Value']
if covariate != 'sleep':
sql = """
SELECT date, time, device, value
FROM `%s.%s.%s`
""" % (PROJECT_ID, pid, covariate)
else:
sql = """
SELECT date, time, device, type, value
FROM `%s.%s.%s`
""" % (PROJECT_ID, pid, covariate)
columns = ['Date', 'Time', 'Source', 'Value', 'n_sleep_seconds']
sql = insert_date_range(sql, date_range)
df = CLIENT.query(sql).to_dataframe()
df.columns = columns
try:
df['date_time'] = pd.to_datetime(df['date_time'])
except KeyError: # if there is SHIT it in the db
df['date_time'] = df['date_time'] = ['%s %s' % (d, t) for d, t in zip(df['Date'].values, df['Time'].values)]
df['date_time'] = pd.to_datetime(df['date_time'])
df.drop(['Date', 'Time'], inplace=True, axis=1)
# df = df.set_index('date_time').drop('Test', axis=0).reset_index()
# df['date_time'] = pd.to_datetime(df['date_time'])
df['UserID'] = pid
if covariate == 'sleep':
df = df[['UserID', 'Source', 'Value', 'n_sleep_seconds', 'date_time']]
df['n_sleep_seconds'] = pd.to_numeric(df['n_sleep_seconds'])
else:
df = df[['UserID', 'Source', 'Value', 'date_time']]
df['Value'] = pd.to_numeric(df['Value'])
return df
def preprocess_covariate_df(pid, pid_df, covariate):
"""
Preprocess a covariate dataframe:
- expand data to 1 min resolution
- expand sleep data
:param covariate: `heartrate`, `steps` or `sleep`
:return:
"""
pid_df_expanded = []
# do the following per device and concatenate afterwards.
for device, ddf in pid_df.groupby('Source'):
if covariate == 'sleep':
# apple hk data
if any(['InBed' in ddf['Value'].unique(), 'Asleep' in ddf['Value'].unique()]):
ddf.columns = ['uid', 'device', 'sleep', 'date_time']
elif ddf.empty:
ddf.columns = ['uid', 'device', 'sleep', 'date_time']
ddf = ddf.set_index('date_time').resample('T').median().reset_index()
ddf['sleep'] = 0.
# fitbit data
elif any(['rem' in ddf['Value'].unique(),
'awake' in ddf['Value'].unique(),
'wake' in ddf['Value'].unique(),
'deep' in ddf['Value'].unique(),
'restless' in ddf['Value'].unique(),
'alseep' in ddf['Value'].unique(),
'unknown' in ddf['Value'].unique(),
]):
# we need to expand:
expanded_dfs = []
for i, r in ddf.iterrows():
n_mins = r['n_sleep_seconds'] // 60
df = pd.DataFrame([r['Value']] * n_mins,
index=pd.date_range(r['date_time'].round(freq='T'), periods=n_mins, freq='T'))
df['uid'] = r['UserID']
expanded_dfs.append(df)
ddf = pd.concat(expanded_dfs, sort=True, axis=0)
# delete dublicate indices:
ddf = ddf.loc[~ddf.index.duplicated(keep='first')]
ddf.reset_index(inplace=True)
ddf.columns = ['date_time', 'sleep', 'uid'] # sort out the user ID
else: # corrupted fitbit data
ddf.columns = ['uid', 'device', 'sleep', 'date_time']
uid = ddf['uid'].unique()[0]
ddf['sleep'] = 0.
ddf = ddf.set_index('date_time').resample('T').median().reset_index()
ddf['uid'] = uid
ddf['device'] = device
ddf = ddf[['uid', 'device', 'sleep', 'date_time']]
ddf['sleep'] = ddf['sleep'].astype(float)
elif covariate == 'steps':
ddf.columns = ['uid', 'device', 'steps', 'date_time']
ddf['steps'] = ddf['steps'].astype(float)
ddf = ddf.set_index('date_time').resample('T').mean().reset_index()
elif covariate == 'heartrate':
ddf.columns = ['uid', 'device', 'heart_rate', 'date_time']
ddf['heart_rate'] = ddf['heart_rate'].astype(float)
ddf = ddf.set_index('date_time').resample('T').median().reset_index()
ddf['uid'] = pid
ddf['device'] = device
ddf = ddf.loc[~ddf.index.duplicated(keep='first')]
pid_df_expanded.append(ddf)
try:
pid_df = pd.concat(pid_df_expanded, axis=0)
except ValueError:
raise OSError('Empty input files!')
pid_df = pid_df.set_index(['device', 'date_time']).sort_index()
return pid_df
def get_PID_df_per_device(pid, dfs, devices=['fitbit'], ndays=1000):
"""
This returns a pid_df per device in the input .csvs or .jsons
Possible Devices:
['FB-Fitbit', # Fitbit
'HK-Connect', # Garmin
'HK-Health', # ??
'HK-iPhone', # Phone -> Steps only
'HK-Motiv', # motiv ring
'HK-Apple', # apple watch
'HK-Biostrap' # Biostrap
]
:param pid:
:return:
"""
data_per_device = OrderedDict()
for d in devices:
p_dfs = []
for covariate in dfs.keys():
try:
p_dfs.append(dfs[covariate].xs(d, level='device', drop_level=True).drop('uid', axis=1))
except KeyError:
print('No %s data found for %s' % (covariate, d))
pdf = pd.DataFrame(columns=[covariate])
pdf.index.name = 'date_time'
p_dfs.append(pdf)
device_df = p_dfs[0].join(p_dfs[1], how='outer')
device_df = device_df.join(p_dfs[2], how='outer')
try:
last_timestamp = device_df.index.values[-1]
limit = last_timestamp - pd.Timedelta(days=ndays)
device_df = device_df.loc[limit:last_timestamp]
except IndexError:
pass
device_df['uid'] = pid
if device_df.index.name != 'date_time':
device_df.reset_index(inplace=True)
device_df.set_index('date_time', inplace=True)
device_df.dropna(subset=['heart_rate', 'steps',
# 'sleep'
], axis=0, thresh=1, inplace=True)
device_df[['heart_rate', 'steps']] = device_df[['heart_rate', 'steps']].astype(float)
data_per_device[d] = device_df
return data_per_device
def impute_PID_df(in_df, slen, granularity, **kwargs):
"""
The main preprocessing function.
IMPORTANT: As we reasample, we need to binarize the sleep before doing this.
:param in_df:
:return:
"""
uid = in_df['uid'].unique()
assert len(uid) == 1, 'There must be exactly 1 ID per user.'
in_df.drop('uid', axis=1)
in_df = in_df[in_df['heart_rate'] >= 20] # hard cut-off for HR as HR of 20 is non-realistic
# binarize the sleep:
in_df['sleep'] = in_df['sleep'].map(dict([('awake', 0),
('wake', 0),
('unknown', 1),
('light', 1),
('deep', 1),
('restless', 1),
('rem', 1),
('asleep', 1),
('Asleep', 1),
('InBed', 0),
('NaN', 0)]))
sleep_df = in_df.copy()
sleep_df.loc[~sleep_df[['heart_rate', 'steps']].isnull().all(axis=1), 'sleep'] = sleep_df.loc[
~sleep_df[['heart_rate', 'steps']].isnull().all(axis=1), 'sleep'].fillna(0.)
# resample
in_df = in_df.resample(granularity).median()
in_df['sleep'] = sleep_df.resample(granularity).max()
# set the steps to 0, where we have sleep == 1
in_df.loc[in_df['sleep'] == 1, 'steps'] = 0
# now extend the index of days that have x% of slen, and fill the nans w/ the average in sleep stratification
in_df.dropna(thresh=1, axis=0, inplace=True)
days = []
for n, d in in_df.groupby(pd.Grouper(freq='D')):
exclusioncounter = 0
if len(d.index.values) >= .5 * slen:
# get the date and reindex:
date = d.index[0].date()
# create full range:
full_day_index = pd.date_range(date, periods=slen, freq=granularity)
d = d.reindex(full_day_index)
days.append(d)
else:
exclusioncounter += 1
try:
in_df = pd.concat(days)
except ValueError:
return pd.DataFrame({'Empty': []})
in_df, _, _ = fill_nans_w_stratified_average(in_df, slen, granularity)
# This dropna is very important: Drop the hours for which we did not have data!!
in_df.dropna(axis=0, inplace=True)
in_df = in_df.groupby( | pd.Grouper(freq='D') | pandas.Grouper |
from unittest import skip, TestCase
import pandas as pd
import dankypipe.pipe as pipe
class TestPipe(TestCase):
"""test class for loading features into S3
"""
@classmethod
def setUpClass(cls):
"""perform at test class initialization
"""
cls.config = dict(
job_name='test_job_1',
features=['feature_3', 'feature_4'],
model=dict(
name='lightgbm',
parameters=dict(
verbosity=1
)
),
task='tune',
tuning=dict(
search_type='grid',
parameters=[
dict(
name='max_depth',
values=[2]
),
dict(
name='learning_rate',
values=[.1, .2, .3]
)
],
metric='auc'
)
)
cls.kwargs = dict(
test=True
)
@classmethod
def tearDownClass(cls):
"""perform when all tests are complete
"""
pass
def setUp(self):
"""perform before each unittest"""
pass
def tearDown(self):
"""perform after each unittest
"""
pass
def test_get_feature_names(self):
features = pipe.get_feature_names(**self.kwargs)
self.assertTrue(all([
c in ['test_1', 'test_2'] for c in features
]))
def test_validate_name(self):
self.assertFalse(pipe.validate_name('test_1', **self.kwargs))
self.assertTrue(pipe.validate_name('test-1', **self.kwargs))
def test_upload_feature(self):
self.assertRaises(
ValueError,
pipe.upload_feature,
feature_name='test_1',
datasets=('.', '.', '.'),
**self.kwargs
)
self.assertRaises(
FileNotFoundError,
pipe.upload_feature,
feature_name='test_3',
paths=('test_3.csv', 'test_3.csv', 'test_3.csv'),
**self.kwargs
)
paths = (
'feature_3_train.csv',
'feature_3_test.csv',
'feature_3_validate.csv'
)
self.assertTrue(pipe.upload_feature(
feature_name='feature_3',
datasets=paths,
overwrite=True,
**self.kwargs
) is not None)
self.assertRaises(
ValueError,
pipe.upload_feature,
feature_name='feature_3',
datasets=paths,
**self.kwargs
)
def test_upload_feature_from_df(self):
dfs = [
pd.read_csv('feature_3_test.csv'),
| pd.read_csv('feature_3_train.csv') | pandas.read_csv |
from wf_core_data_dashboard import core
import wf_core_data
import mefs_utils
import pandas as pd
import inflection
import urllib.parse
import os
def generate_mefs_table_data(
test_events_path,
student_info_path,
student_assignments_path
):
test_events = pd.read_pickle(test_events_path)
student_info = pd.read_pickle(student_info_path)
student_assignments = pd.read_pickle(student_assignments_path)
students = mefs_utils.summarize_by_student(
test_events=test_events,
student_info=student_info,
student_assignments=student_assignments
)
groups = mefs_utils.summarize_by_group(
students=students,
grouping_variables=[
'school_year',
'group_name_mefs'
]
)
return students, groups
def groups_page_html(
groups,
school_year=None,
group_name_mefs=None,
title=None,
subtitle=None,
include_details_link=True
):
if title is None:
title = 'MEFS results'
if subtitle is None:
subtitle = ':'.join(filter(
lambda x: x is not None,
[
school_year,
group_name_mefs
]
)).replace('/', ':')
table_html = groups_table_html(
groups,
school_year=school_year,
group_name_mefs=group_name_mefs,
include_details_link=include_details_link
)
template = core.get_template("groups_table.html")
return template.render(
title=title,
subtitle=subtitle,
table_html=table_html
)
def students_page_html(
students,
school_year=None,
group_name_mefs=None,
title=None,
subtitle=None
):
if title is None:
title = 'MEFS results'
if subtitle is None:
subtitle = ':'.join(filter(
lambda x: x is not None,
[
school_year,
group_name_mefs
]
)).replace('/', ':')
table_html = students_table_html(
students=students,
school_year=school_year,
group_name_mefs=group_name_mefs
)
template = core.get_template("students_table.html")
return template.render(
title=title,
subtitle=subtitle,
table_html=table_html
)
def groups_table_html(
groups,
school_year=None,
group_name_mefs=None,
include_details_link=True
):
groups = groups.copy()
groups['mean_ending_total_score_sem_range'] = groups.apply(
lambda row: '{:.1f} – {:.1f}'.format(
row['mean_ending_total_score'] - row['mean_ending_total_score_sem'],
row['mean_ending_total_score'] + row['mean_ending_total_score_sem'],
) if not pd.isna(row['mean_ending_total_score']) and not pd.isna(row['mean_ending_total_score_sem']) else '',
axis=1
)
groups['mean_total_score_growth_sem_range'] = groups.apply(
lambda row: '{:+.1f} – {:+.1f}'.format(
row['mean_total_score_growth'] - row['mean_total_score_growth_sem'],
row['mean_total_score_growth'] + row['mean_total_score_growth_sem'],
) if not pd.isna(row['mean_total_score_growth']) and not pd.isna(row['mean_total_score_growth_sem']) else '',
axis=1
)
groups['mean_total_score_growth_per_school_year_sem_range'] = groups.apply(
lambda row: '{:+.1f} – {:+.1f}'.format(
row['mean_total_score_growth_per_school_year'] - row['mean_total_score_growth_per_school_year_sem'],
row['mean_total_score_growth_per_school_year'] + row['mean_total_score_growth_per_school_year_sem'],
) if not pd.isna(row['mean_total_score_growth_per_school_year']) and not pd.isna(row['mean_total_score_growth_per_school_year_sem']) else '',
axis=1
)
groups['mean_ending_percentile_sem_range'] = groups.apply(
lambda row: '{:.1f} – {:.1f}'.format(
row['mean_ending_percentile'] - row['mean_ending_percentile_sem'],
row['mean_ending_percentile'] + row['mean_ending_percentile_sem'],
) if not pd.isna(row['mean_ending_percentile']) and not pd.isna(row['mean_ending_percentile_sem']) else '',
axis=1
)
groups['mean_percentile_growth_sem_range'] = groups.apply(
lambda row: '{:+.1f} – {:+.1f}'.format(
row['mean_percentile_growth'] - row['mean_percentile_growth_sem'],
row['mean_percentile_growth'] + row['mean_percentile_growth_sem'],
) if not pd.isna(row['mean_percentile_growth']) and not pd.isna(row['mean_percentile_growth_sem']) else '',
axis=1
)
groups['mean_percentile_growth_per_school_year_sem_range'] = groups.apply(
lambda row: '{:+.1f} – {:+.1f}'.format(
row['mean_percentile_growth_per_school_year'] - row['mean_percentile_growth_per_school_year_sem'],
row['mean_percentile_growth_per_school_year'] + row['mean_percentile_growth_per_school_year_sem'],
) if not pd.isna(row['mean_percentile_growth_per_school_year']) and not pd.isna(row['mean_percentile_growth_per_school_year_sem']) else '',
axis=1
)
groups['mean_ending_total_score'] = groups['mean_ending_total_score'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['ending_total_score_sd'] = groups['ending_total_score_sd'].apply(
lambda x: '{:.1f}'.format(x) if not | pd.isna(x) | pandas.isna |
import pandas as pd
import xlwings as xw
import math
def Get_Inven():
df = pd.read_csv(r'E:\OneDrive\露露\22-03-15-Inventorys (2)(1).csv')
df1 = pd.DataFrame(df,columns=['Goods Name','Packing Size','Gross Weight'])
df_2 = df1['Packing Size'].str.split('*',expand=True)
df1 = pd.concat([df1,df_2],axis=1)
df1 = df1.rename(columns={0:'Long',1:'Width',2:'Height'})
df1 = pd.DataFrame(df1,columns=['Goods Name','Packing Size','Gross Weight','Long','Width','Height'])
return df1
def Get_Inven1():
df = | pd.read_csv(r'E:\OneDrive\露露\邮费.csv') | pandas.read_csv |
import pandas as pd
#import seaborn as sns
from matplotlib import pyplot as plt
import pdb
import glob
def get_all_dataframe(
data_source='election_reporting_dot_com',
state='mi',
year=2020,
):
county_folder_list = [
x.split('/')[-2] for x in glob.glob(
'./election_reporting_com/2020/mi/*/'
)
]
state = 'mi'
df_dict_all = {
'party': pd.DataFrame(),
'president': pd.DataFrame(),
'senator': | pd.DataFrame() | pandas.DataFrame |
import os
import random
from io import BytesIO
from tempfile import TemporaryDirectory
import tensorflow as tf
from PIL import Image
from google.cloud import storage
import numpy as np
import glob
from tqdm import tqdm
import h5py
import json
from data.thor_constants import THOR_AFFORDANCES, THOR_OBJECT_TYPES, THOR_ACTIONS, _action_to_type_ind, \
_object_to_type_ind, _object_to_statechange_df, _fixup_df, THOR_ACTION_TYPE_TO_IND
from typing import List
import pandas as pd
class S3TFRecordWriter(object):
def __init__(self, fn, buffer_size=10000):
"""
Upload to gcloud
:param fn:
:param buffer_size: Trying to space out idential things here by shuffling a buffer
p(first lasts until the end,N) = (1-pflush) ^ (N/(p*buffer_size))
each flush event removes buffer_size*p
If the buffer size is big enough then we have good randomness I think
"""
self.fn = fn
if fn.startswith('gs://'):
self.gclient = storage.Client()
self.storage_dir = TemporaryDirectory()
self.writer = tf.io.TFRecordWriter(os.path.join(self.storage_dir.name, 'temp.tfrecord'))
self.bucket_name, self.file_name = self.fn.split('gs://', 1)[1].split('/', 1)
else:
self.gclient = None
self.bucket_name = None
self.file_name = None
self.storage_dir = None
self.writer = tf.io.TFRecordWriter(fn)
self.buffer_size = buffer_size
self.buffer = []
self.num_written = 0
def write(self, x):
self.num_written += 1
if self.buffer_size < 10:
self.writer.write(x)
return
if len(self.buffer) < self.buffer_size:
self.buffer.append(x)
else:
random.shuffle(self.buffer)
for i in range(self.buffer_size // 5): # Pop 20%
self.writer.write(self.buffer.pop())
def close(self):
# Flush buffer
for x in self.buffer:
self.writer.write(x)
self.writer.close()
if self.gclient is not None:
print(f"UPLOADING {self.num_written}ex!!!!!", flush=True)
bucket = self.gclient.get_bucket(self.bucket_name)
blob = bucket.blob(self.file_name)
blob.upload_from_filename(os.path.join(self.storage_dir.name, 'temp.tfrecord'))
self.storage_dir.cleanup()
def __enter__(self):
# Called when entering "with" context.
return self
def __exit__(self, *_):
# Called when exiting "with" context.
# Upload shit
print("CALLING CLOSE")
self.close()
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _convert_image_to_jpgstring(image):
"""
:param image: Numpy array of an image [H, W, 3]
:return: it, as a jpg string
"""
with BytesIO() as output:
image_pil = Image.fromarray(image, mode='RGB')
image_pil.save(output, format='JPEG', quality=95)
return output.getvalue()
def _convert_image_seq_to_jpgstring(image):
"""
:param image: Numpy array of an image [N, H, W, 3]
:return: it, as a jpg string
"""
with BytesIO() as output:
image_pil = Image.fromarray(image.reshape((image.shape[0] * image.shape[1], image.shape[2], 3)), mode='RGB')
image_pil.save(output, format='JPEG', quality=95)
return output.getvalue()
def _print_padding_tradeoff(lens, ps=(80, 85, 90, 95, 99, 100,)):
"""
Given the lengths of everything, print out how mcuh we lose by cutting it off to a shorter percentile
:param lens: Lengths
:param ps: Percentiles
:return:
"""
lens_array = np.array(lens)
for p in ps:
lensp = np.percentile(lens_array, p)
lensused = np.minimum(lens_array, lensp).sum()
lenstotal = np.sum(lens_array)
wasted_space = np.sum(lensp - np.minimum(lens_array, lensp)) / (lensp * len(lens_array))
print(
"Lens {}%: {:.3f}. Using that as seqlength, we use {} frames of {} ({:.3f}), wasted space {:.3f}".format(
p, np.percentile(lens_array, p), lensused, lenstotal, lensused / lenstotal, wasted_space),
flush=True)
#############################
def traj_dataloader(all_fns, include_frames=False):
"""
:param all_fns: list of all filenames to use
:param include_frames: Whether to include the img
:return:
"""
for fn in tqdm(all_fns):
try:
h5reader = h5py.File(fn, 'r')
# Process it
item = {}
for k in ['meta_info', 'alias_object_id_to_old_object_id', 'object_id_to_states', 'output_action_results',
'output_actions']:
item[k] = json.loads(h5reader[k][()].decode('utf-8'))
item['object_ids'] = [x.decode('utf-8') for x in h5reader['object_ids'][()].tolist()]
for k, k_v in h5reader['pos3d'].items():
for t, v in k_v.items():
item['object_id_to_states'][k][t]['pos3d'] = v[()]
# bboxes
bbox_keys = sorted([int(k) for k in h5reader['bboxes'].keys()])
item['bboxes'] = [h5reader['bboxes'][(str(k))][()] for k in bbox_keys]
if not all([x.dtype == np.uint16 for x in item['bboxes']]): # Previously I had a clipping bug
raise ValueError("dtype")
if include_frames:
item['frames'] = h5reader['frames'][()]
item['agent_states'] = h5reader['agent_states'][()]
item['meta_info']['fn'] = fn
yield item
except Exception as e:
print("Error with {}: {}".format(fn, str(e)), flush=True)
def traj_dataloader_v2(all_fns, IMAGE_WIDTH=640, IMAGE_HEIGHT=384):
for item in traj_dataloader(all_fns, include_frames=True):
num_frames = item['frames'].shape[0]
main_object_ids, object_id_to_main_ind = _get_main_object_id_mappings(item['meta_info']['main_object_ids'],
all_object_ids=item['object_ids'],
output_actions=item['output_actions'],
alias_object_id_to_old_object_id=item[
'alias_object_id_to_old_object_id'])
# boxes - use an extra ind that tells us what frame we're on
# [img_id, obj_id, x1, y1, x2, y2].
bboxes_list = [_convert_bboxes(v, t, object_ids=item['object_ids'],
image_width=IMAGE_WIDTH,
image_height=IMAGE_HEIGHT,
) for t, v in enumerate(item['bboxes']) if v.size > 0]
bboxes_df = pd.concat([x for x in bboxes_list if x.size > 0], 0)
bboxes_df['main_inds'] = bboxes_df['object_ids'].apply(lambda x: object_id_to_main_ind[x])
# SORT bboxes_df by first, frame number, then, whether it's a main ind or not, and third the size
item['bboxes_df'] = bboxes_df.sort_values(by=['frame', 'main_inds', 'size'], ascending=[True, False, False],
ignore_index=True)
item['output_actions'].append({'action': 'Done'})
item['output_action_results'].append({'action_success': True, 'action_err_msg': ''})
# Predict next action maybe
item['actions'] = pd.DataFrame([_convert_action(x, main_object_ids=main_object_ids,
alias_object_id_to_old_object_id=item[
'alias_object_id_to_old_object_id'])
for x in item['output_actions']])
del item['output_actions']
df_mapping = {}
# Compute object -> size and also get a dynamic mapping of the states over time
object_to_size = {}
for k, sz in item['object_id_to_states'].items():
for s in sz.values():
size = np.prod(s['pos3d'][-1] + 1e-8)
object_to_size[k] = max(size, object_to_size.get(k, 0.0))
for oid in main_object_ids:
oid_list = [oid] + [aid for aid, oid2 in item['alias_object_id_to_old_object_id'].items() if oid2 == oid]
df_mapping[oid] = _object_to_statechange_df([item['object_id_to_states'][k] for k in oid_list],
num_frames=item['frames'].shape[0],
object_to_size=object_to_size)
item['df_mapping'] = df_mapping
item['main_object_ids'] = main_object_ids
item['object_id_to_main_ind'] = object_id_to_main_ind
yield item
def _convert_bboxes(bboxes_t, t, object_ids, image_width, image_height):
"""
Converts bboxes into tensorflow format
:param bboxes_t: [N boxes, [obj_id, x1, y1, x2, y2]]
:param t: Int
:param object_ids: Mapping obj_id -> string
:param image_width:
:param image_height:
:return:
"""
# Convert to tf format
bbox_info_float = bboxes_t.astype(np.float32)[:, 1:5] / \
np.array([image_width, image_height, image_width, image_height], dtype=np.float32)[None]
sizes = np.sqrt((bbox_info_float[:, 2] - bbox_info_float[:, 0]) * (bbox_info_float[:, 3] - bbox_info_float[:, 1]))
# Get rid of really small objects
big_enough = sizes > np.sqrt(4.0 / (image_height * image_width))
bbox_info_float = bbox_info_float[big_enough]
bboxes_t = bboxes_t[big_enough]
sizes = sizes[big_enough]
df = pd.DataFrame(np.column_stack([bbox_info_float, sizes]), columns=['xmin', 'ymin', 'xmax', 'ymax', 'size'])
df['frame'] = t
df['object_ids'] = [object_ids[i] for i in bboxes_t[:, 0]]
df['category_ids'] = df['object_ids'].apply(_object_to_type_ind)
return df
def _convert_action(action, main_object_ids: List[str], alias_object_id_to_old_object_id):
"""
Convert an action into something referring to the main obj ids (dealing with aliases and stuff)
:param action:
:param main_object_ids:
:param alias_object_id_to_old_object_id:
:return:
"""
results = {'action_id': _action_to_type_ind(action)}
oid_to_ind = {oid: i for i, oid in enumerate(main_object_ids)}
for alias_object_id, old_object_id in alias_object_id_to_old_object_id.items():
oid_to_ind[alias_object_id] = oid_to_ind[old_object_id]
if 'objectId' in action:
results['object_id'] = oid_to_ind[action['objectId']]
else:
results['object_id'] = -1
if 'receptacleObjectId' in action:
results['receptacle_object_id'] = oid_to_ind[action['receptacleObjectId']]
else:
results['receptacle_object_id'] = -1
return results
def _get_main_object_id_mappings(main_object_ids, all_object_ids,
output_actions, alias_object_id_to_old_object_id):
"""
Return a list of main object IDs, and a mapping from all object Ids to the main ones
:param main_object_ids: Main ids identified by the sampler
:param all_object_ids: All object IDs ever seen
:param output_actions: All output actions -- we might need to add more main object IDs if needed
:param alias_object_id_to_old_object_id: Aliases - e.g. if we chop somethign it changes ID. ugh
:return: new list of main object IDs, and a mapping of objectId to main ind (or 0 otherwise). Starts at 1.
"""
# Create a mapping of objectId -> mainObjectId ind (or nothing!)
# Tack on enough things to main object ids if they're referenced
if isinstance(main_object_ids, str): # Not sure what's going on here
main_object_ids = [main_object_ids]
ref_oids = set([v for a in output_actions for k, v in a.items() if k.endswith('bjectId')])
for roid in sorted(ref_oids):
if roid not in sorted(alias_object_id_to_old_object_id.keys()) + main_object_ids:
main_object_ids.append(roid)
# print("{} objects: {}".format(len(main_object_ids), main_object_ids), flush=True)
object_id_to_main_ind = {oid: -1 for oid in all_object_ids}
for i, mi in enumerate(main_object_ids):
object_id_to_main_ind[mi] = i
for k, v in alias_object_id_to_old_object_id.items():
if v == mi:
object_id_to_main_ind[k] = i
return main_object_ids, object_id_to_main_ind
def traj_dataloader_v3(all_fns, IMAGE_WIDTH=640, IMAGE_HEIGHT=384, include_frames=False):
for item in traj_dataloader(all_fns, include_frames=include_frames):
main_object_ids, object_id_to_main_ind = _get_main_object_id_mappings(item['meta_info']['main_object_ids'],
all_object_ids=item['object_ids'],
output_actions=item['output_actions'],
alias_object_id_to_old_object_id=item[
'alias_object_id_to_old_object_id'])
# boxes - use an extra ind that tells us what frame we're on
# [img_id, obj_id, x1, y1, x2, y2].
bboxes_list = [_convert_bboxes(v, t, object_ids=item['object_ids'],
image_width=IMAGE_WIDTH,
image_height=IMAGE_HEIGHT,
) for t, v in enumerate(item['bboxes']) if v.size > 0]
bboxes_df = | pd.concat([x for x in bboxes_list if x.size > 0], 0) | pandas.concat |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = | tm.box_expected(tdi, box) | pandas.util.testing.box_expected |
import pandas as pd
from datetime import datetime
from multiprocessing import Pool
import seaborn as sns
from matplotlib import pyplot as plt
from pathlib import Path
# ================================
# MARKING SCHEME NOTES
# ===============================
# 1. In the accompanying assignment Python file, students are supposed to fill in required code
# in all places where it says "YOUR CODE HERE"
# 2. In order to find points for that particular line, please refer to the
# corresponding line here where it has comment POINTS at the end of the line
def get_date_range_by_chunking(large_csv):
"""
In this function, the idea is to use pandas chunk feature.
:param large_csv: Full path to activity_log_raw.csv
:return:
"""
# ======================================
# EXPLORE THE DATA
# ======================================
# Read the first 100,000 rows in the dataset
df_first_100k = pd.read_csv(large_csv, nrows=100000) # POINTS: 1
print(df_first_100k)
# Identify the time column in the dataset
str_time_col = 'ACTIVITY_TIME' # POINTS: 1
# ============================================================
# FIND THE FIRST [EARLIEST] AND LAST DATE IN THE WHOLE DATASET
# BY USING CHUNKING
# =============================================================
# set chunk size
chunksize = 1000000 # POINTS: 1
# declare a list to hold the dates
dates = [] # POINTS: 1
with pd.read_csv(large_csv, chunksize=chunksize) as reader: # POINTS: 1
for chunk in reader:
# convert the string to Python datetime object
# add a new column to hold this datetime object
time_col = 'activ_time' # POINTS: 1
chunk[time_col] = chunk[str_time_col].apply(lambda x: | pd.to_datetime(x[:9]) | pandas.to_datetime |
import unittest
import numpy as np
import pandas as pd
from numpy.testing import assert_almost_equal
from pandas.testing import assert_frame_equal, assert_series_equal
import cvxpy as cvx
from zipline.optimize import MaximizeAlpha, TargetWeights
from zipline.optimize.constraints import (
Basket, CannotHold, DollarNeutral, FactorExposure, FixedWeight, Frozen,
LongOnly, MaxGrossExposure, NetExposure, NetGroupExposure, NotConstrained,
NotExceed, NotLessThan, Pair, PositionConcentration, ReduceOnly, ShortOnly)
class TestObjectives(unittest.TestCase):
def setUp(self):
stocks = [str(i).zfill(6) for i in range(1, 8)]
alphas = pd.Series([-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], index=stocks)
# 用于调整特定目标
# alphas = pd.Series([-0.2, -0.1, -0.1, 0, 0.1, 0.2, 0.3], index=stocks)
weights = pd.Series(alphas.values, index=stocks)
labels = {
'000001': 'A',
'000002': 'A',
'000003': 'B',
'000004': 'B',
'000005': 'C',
'000006': 'C',
'000007': 'D'
}
min_weights = {'A': -0.1, 'B': -0.2, 'C': -0.3, 'D': -0.4}
max_weights = {'A': 0.1, 'B': 0.2, 'C': 0.3, 'D': 0.4}
factors = ['f1', 'f2', 'f3']
data = [[3, 2, 3], [5, 1, 5], [2, 3, 1], [3, 5, 2], [4, 2, 3],
[3, 4, 4]] #, [2, 5, 3]],
loadings = pd.DataFrame(data=data, index=stocks[:6], columns=factors)
min_exposures = pd.Series([2, 1, 2], index=factors)
max_exposures = pd.Series([4, 5, 4], index=factors)
self.stocks = stocks
self.alphas = alphas
self.weights = weights
self.labels = labels
self.min_weights = min_weights
self.max_weights = max_weights
self.loadings = loadings
self.min_exposures = min_exposures
self.max_exposures = max_exposures
# 假设初始权重股票数量少于目标权重的股票数量(4只股票)
init_weights = | pd.Series([-0.6, -0.1, 0.1, 0.55], index=stocks[:4]) | pandas.Series |
""""""
from __future__ import print_function, division
import os
import torch
import torch.nn as nn
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import utils
import sys
from pathlib import Path
from typing import List, Dict, Tuple
from PIL import Image
import math
import re
from efficientnet_pytorch import EfficientNet
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
import cv2
import gc
from torchvision import transforms as T
from tensorflow.keras.applications import EfficientNetB0
print('TF', tf.__version__)
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfVectorizer
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
plt.ion() # interactive mode
class ShopeeDataset:
""" ShopeeDataset class
Documentation to be implemented.
"""
def __init__(self, root) -> None:
self.root = root
self.train_path_csv = os.path.join(root, "train.csv")
self.test_path_csv = os.path.join(root, "test.csv")
# self.df = pd.read_csv(self.train_path_csv)
self.df = self.read_dataset(self.train_path_csv)
def read_dataset(self, path: str, GET_CV: bool = True, CHECK_SUB: bool = True):
if GET_CV:
df = pd.read_csv(path)
tmp = df.groupby(['label_group'])['posting_id'].unique().to_dict()
df['matches'] = df['label_group'].map(tmp)
df['matches'] = df['matches'].apply(lambda x: ' '.join(x))
if CHECK_SUB:
df = | pd.concat([df, df], axis=0) | pandas.concat |
import json
import pandas as pd
from objects.folder import Folder
from objects.mapping import Mapping
from objects.source import Source
from objects.target import Target
from objects.target_field import TargetField
from objects.source_field import SourceField
from objects.transformation import Transformation
from objects.transformation_field import TransformationField
from objects.connector import Connector
class FileProcess(object):
def __init__(self, input_file: str = None, output_folder: str = None):
self.input_file = input_file
self.output_folder = output_folder
def _get_folders(self, data: dict):
# Data folder informations
powermart_object = data['POWERMART']
repository_object = powermart_object['REPOSITORY']
repository_version = powermart_object['REPOSITORY_VERSION']
repository_name = repository_object['NAME']
folder_object = repository_object['FOLDER']
database_type = repository_object['DATABASETYPE']
folders = []
for _folder in folder_object:
folder_name = _folder['NAME']
# sources
sources = self._get_sources(_folder)
# targets
targets = self._get_targets(_folder)
# mappings
mappings = self._get_mappings(_folder)
folder = Folder(repository_version, repository_name, folder_name, database_type, sources=sources,
mappings=mappings,
targets=targets)
folders.append(folder)
return folders
def _get_sources(self, folder: dict):
sources = []
if not folder.get('SOURCE'):
return sources
if isinstance(folder.get('SOURCE'), list):
for _source in folder['SOURCE']:
source_fields = self._get_source_fields(_source)
source = Source(_source['DATABASETYPE'], _source['DBDNAME'], _source['NAME'], _source['OWNERNAME'],
source_fields, folder['NAME'])
sources.append(source)
elif isinstance(folder.get('SOURCE'), dict):
source_fields = self._get_source_fields(folder['SOURCE'])
source = Source(folder['SOURCE']['DATABASETYPE'], folder['SOURCE']['DBDNAME'], folder['SOURCE']['NAME'],
folder['SOURCE']['OWNERNAME'], source_fields, folder['NAME'])
sources.append(source)
return sources
def _get_source_fields(self, source: dict):
source_fields = []
if not source.get('SOURCEFIELD'):
return source_fields
if isinstance(source.get('SOURCEFIELD'), list):
for _source_field in source['SOURCEFIELD']:
source_field = SourceField(_source_field['DATATYPE'], _source_field['NAME'],
_source_field['NULLABLE'], _source_field['KEYTYPE'], source['NAME'],
_source_field['PRECISION'])
source_fields.append(source_field)
elif isinstance(source.get('SOURCEFIELD'), dict):
source_field = SourceField(source['SOURCEFIELD']['DATATYPE'], source['SOURCEFIELD']['NAME'],
source['SOURCEFIELD']['NULLABLE'], source['SOURCEFIELD']['KEYTYPE'],
source['NAME'], source['SOURCEFIELD']['PRECISION'])
source_fields.append(source_field)
return source_fields
def _get_transformation_fields(self, transformation: dict):
transform_fields = []
if not transformation.get('TRANSFORMFIELD'):
return transform_fields
if isinstance(transformation.get('TRANSFORMFIELD'), list):
for _transform_field in transformation['TRANSFORMFIELD']:
transform_field = TransformationField(_transform_field['DATATYPE'],
_transform_field['NAME'],
_transform_field['PORTTYPE'],
_transform_field['DEFAULTVALUE'],
_transform_field['PRECISION'],
transformation['NAME'],
_transform_field.get('EXPRESSION'),
transformation['MAPPING_NAME'])
transform_fields.append(transform_field)
elif isinstance(transformation.get('TRANSFORMFIELD'), dict):
transform_field = TransformationField(transformation['TRANSFORMFIELD']['DATATYPE'],
transformation['TRANSFORMFIELD']['NAME'],
transformation['TRANSFORMFIELD']['PORTTYPE'],
transformation['TRANSFORMFIELD']['DEFAULTVALUE'],
transformation['TRANSFORMFIELD']['PRECISION'],
transformation['NAME'],
transformation['TRANSFORMFIELD'].get('EXPRESSION'),
transformation['MAPPING_NAME'])
transform_fields.append(transform_field)
return transform_fields
def _get_transformations(self, mapping: dict):
transformations = []
if not mapping.get('TRANSFORMATION'):
return transformations
if isinstance(mapping.get('TRANSFORMATION'), list):
for _transformation in mapping['TRANSFORMATION']:
_transformation['MAPPING_NAME'] = mapping['NAME']
transformation_fields = self._get_transformation_fields(_transformation)
transformation_sql = self._get_query(_transformation)
transformation = Transformation(_transformation['NAME'],
transformation_fields, transformation_sql,
mapping['NAME'])
transformations.append(transformation)
elif isinstance(mapping.get('TRANSFORMATION'), dict):
mapping['TRANSFORMATION']['MAPPING_NAME'] = mapping['NAME']
transformation_fields = self._get_transformation_fields(mapping['TRANSFORMATION'])
transformation_sql = self._get_query(mapping['TRANSFORMATION'])
transformation = Transformation(mapping['TRANSFORMATION']['NAME'],
transformation_fields, transformation_sql,
mapping['NAME'])
transformations.append(transformation)
return transformations
def _get_query(self, transformation):
if isinstance(transformation.get('TABLEATTRIBUTE'), list):
for _table_attribute in transformation['TABLEATTRIBUTE']:
if _table_attribute['NAME'] == "Sql Query":
return _table_attribute['VALUE']
return ""
def _get_targets(self, folder: dict):
targets = []
if not folder.get('TARGET'):
return targets
if isinstance(folder.get('TARGET'), list):
for _target in folder['TARGET']:
target_fields = self._get_target_fields(_target)
target = Target(_target['NAME'], _target['DATABASETYPE'], target_fields, folder['NAME'])
targets.append(target)
elif isinstance(folder.get('TARGET'), dict):
target_fields = self._get_target_fields(folder['TARGET'])
target = Target(folder['TARGET']['NAME'], folder['TARGET']['DATABASETYPE'], target_fields, folder['NAME'])
targets.append(target)
return targets
def _get_target_fields(self, target: dict):
target_fields = []
if not target.get('TARGETFIELD'):
return target_fields
if isinstance(target.get('TARGETFIELD'), list):
for _target_field in target['TARGETFIELD']:
target_field = TargetField(_target_field['DATATYPE'], _target_field['NAME'], _target_field['NULLABLE'],
_target_field['KEYTYPE'], target['NAME'], _target_field['PRECISION'])
target_fields.append(target_field)
elif isinstance(target.get('TARGETFIELD'), dict):
# data_type, name, nullable, key_type, precision
target_field = TargetField(target['TARGETFIELD']['DATATYPE'], target['TARGETFIELD']['NAME'],
target['TARGETFIELD']['NULLABLE'], target['TARGETFIELD']['KEYTYPE'],
target['NAME'], target['TARGETFIELD']['PRECISION'])
target_fields.append(target_field)
return target_fields
def _get_session_name(self, folder: dict, mapping_name: str):
if isinstance(folder.get('SESSION'), list):
for session in folder['SESSION']:
if session['MAPPINGNAME'] == mapping_name:
return session['NAME']
elif isinstance(folder.get('SESSION'), dict):
if folder['SESSION']['MAPPINGNAME'] == mapping_name:
return folder['SESSION']['NAME']
return None
def _get_task_name(self, task_instance: dict):
if not task_instance['NAME']:
return None
if 's_' in task_instance['TASKNAME'] and task_instance['TASKTYPE'] == 'Session':
return task_instance['TASKNAME']
return None
def _get_workflow_name(self, folder: dict, session_name: str):
if isinstance(folder.get('WORKFLOW'), list):
for workflow in folder['WORKFLOW']:
if isinstance(workflow['TASKINSTANCE'], list):
for task_instance in workflow['TASKINSTANCE']:
task_name = self._get_task_name(task_instance)
if task_name == session_name:
return workflow['NAME']
elif isinstance(workflow['TASKINSTANCE'], dict):
task_name = self._get_task_name(workflow['TASKINSTANCE'])
if task_name == session_name:
return workflow['NAME']
elif isinstance(folder.get('WORKFLOW'), dict):
if isinstance(folder['WORKFLOW']['TASKINSTANCE'], list):
for task_instance in folder['WORKFLOW']['TASKINSTANCE']:
task_name = self._get_task_name(task_instance)
if task_name:
return task_name
elif isinstance(folder['WORKFLOW']['TASKINSTANCE'], dict):
task_name = self._get_task_name(folder['WORKFLOW']['TASKINSTANCE'])
if task_name:
return task_name
return None
def _get_mappings(self, folder: dict):
mappings = []
if not folder.get('MAPPING'):
return mappings
if isinstance(folder.get('MAPPING'), list):
for _mapping in folder['MAPPING']:
session_name = self._get_session_name(folder, _mapping['NAME'])
workflow_name = self._get_workflow_name(folder, session_name)
connectors = self._get_connectors(_mapping)
transformations = self._get_transformations(_mapping)
mapping = Mapping(_mapping['NAME'], connectors, transformations, folder['NAME'], session_name,
workflow_name)
mappings.append(mapping)
elif isinstance(folder.get('MAPPING'), dict):
session_name = self._get_session_name(folder, folder['MAPPING']['NAME'])
workflow_name = self._get_workflow_name(folder, session_name)
connectors = self._get_connectors(folder['MAPPING'])
transformations = self._get_transformations(folder['MAPPING'])
mapping = Mapping(folder['MAPPING']['NAME'], connectors, transformations, folder['NAME'], session_name,
workflow_name)
mappings.append(mapping)
return mappings
def _get_connectors(self, mapping: dict):
connectors = []
if not mapping.get('CONNECTOR'):
return connectors
if isinstance(mapping.get('CONNECTOR'), list):
for _connector in mapping['CONNECTOR']:
connector = Connector(_connector['FROMFIELD'], _connector['FROMINSTANCE'],
_connector['FROMINSTANCETYPE'], _connector['TOFIELD'], _connector['TOINSTANCE'],
_connector['TOINSTANCETYPE'], mapping['NAME'])
connectors.append(connector)
elif isinstance(mapping.get('MAPPING'), dict):
connector = Connector(mapping['MAPPING']['FROMFIELD'], mapping['MAPPING']['FROMINSTANCE'],
mapping['MAPPING']['FROMINSTANCETYPE'], mapping['MAPPING']['TOFIELD'],
mapping['MAPPING']['TOINSTANCE'],
mapping['MAPPING']['TOINSTANCETYPE'],
mapping['NAME'])
connectors.append(connector)
return connectors
def _parser(self, input_file):
with open(input_file, 'r') as data_file:
data = json.loads(data_file.read())
return data
def _write_files(self, data_frames: list, names_sheets: list, writer):
if len(data_frames) == len(names_sheets):
ValueError("Dataframes and names sheets must be equal lengths")
for df, sheet_name in zip(data_frames, names_sheets):
# Write each dataframe to a different worksheet.
df.to_excel(writer, sheet_name=sheet_name)
def generate_documentation(self, folders: list):
for folder in folders:
dfs, sheets = [], []
# Sources
sources = [s.to_dict() for s in folder.sources]
source_fields = []
for s in sources:
source_fields.extend(s.pop('source_fields', None))
dfs.append(pd.DataFrame(sources))
sheets.append("Sources")
# source fields
source_fields = [sf.to_dict() for sf in source_fields]
dfs.append(pd.DataFrame(source_fields))
sheets.append("Source Fields")
# targets
targets = [t.to_dict() for t in folder.targets]
target_fields = []
for t in targets:
target_fields.extend(t.pop('target_fields', None))
dfs.append(pd.DataFrame(targets))
sheets.append("Targets")
# target fields
target_fields = [tf.to_dict() for tf in target_fields]
dfs.append(pd.DataFrame(target_fields))
sheets.append("Target Fields")
# mappings
mappings = [m.to_dict() for m in folder.mappings]
connectors = []
transformations = []
for m in mappings:
connectors.extend(m.pop('connectors', None))
transformations.extend(m.pop('transformations', None))
dfs.append( | pd.DataFrame(mappings) | pandas.DataFrame |
import pytest
import numpy as np
from datetime import date, timedelta, time, datetime
import dateutil
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndexLikeTimestamp(object):
# Tests for DatetimeIndex behaving like a vectorized Timestamp
def test_dti_date_out_of_range(self):
# see gh-1475
pytest.raises(ValueError, DatetimeIndex, ['1400-01-01'])
pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter',
'days_in_month', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
assert result == expected
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
class TestDatetimeIndex(object):
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')) == 1
assert idx.get_loc('2000-01-01', method='nearest') == 0
assert idx.get_loc('2000-01-01T12', method='nearest') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError,
'unit abbreviation w/o a number'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
with pytest.raises(
ValueError,
match='tolerance size must match target index size'):
idx.get_loc('2000-01-01', method='nearest',
tolerance=[pd.Timedelta('1day').to_timedelta64(),
pd.Timedelta('1day').to_timedelta64()])
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
assert idx.get_loc('1999', method='nearest') == 0
assert idx.get_loc('2001', method='nearest') == 2
with pytest.raises(KeyError):
idx.get_loc('1999', method='pad')
with pytest.raises(KeyError):
idx.get_loc('2001', method='backfill')
with pytest.raises(KeyError):
idx.get_loc('foobar')
with pytest.raises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
assert idx.get_loc('2000-01-02', method='nearest') == 0
assert idx.get_loc('2000-01-03', method='nearest') == 1
assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with pytest.raises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
pd.Timedelta('1 hour').to_timedelta64(), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
'foo', ]
with pytest.raises(
ValueError, match='abbreviation w/o a number'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
assert '2000' in str(e)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
# coding: utf-8
# # Imported Modules
# In[1]:
import os
import sys
import pandas as pd
import numpy as np
import seaborn as sns
from pandas import DataFrame as df
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import DBSCAN
from sklearn.cluster import AffinityPropagation
from sklearn.model_selection import RandomizedSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from matplotlib import pyplot as plt
# # All functions
# In[2]:
def run_tSNE(try_id, data):
X_embedded = TSNE(
n_components=2,
init='pca').fit_transform(data)
with open('corr_68sig_linc/tSNE_CR_score/tSNE_embedding_' + try_id + '.csv', 'w') as f:
f.write(',tSNE-1,tSNE-2')
f.write('\n')
for i in range(len(X_embedded)):
f.write(data.index[i] + ',' + str(X_embedded[i][0]) + ',' + str(X_embedded[i][1]) + '\n')
return
def visual_tSNE(try_id, data, label):
coordinates = pd.read_csv(
'corr_68sig_linc/tSNE_CR_score/tSNE_embedding_' + try_id + '.csv',
header=0,
index_col=0,
sep=',')
coordinates['tSNE-1'] = (coordinates['tSNE-1'] - coordinates['tSNE-1'].min()) / (coordinates['tSNE-1'].max() - coordinates['tSNE-1'].min())
coordinates['tSNE-2'] = (coordinates['tSNE-2'] - coordinates['tSNE-2'].min()) / (coordinates['tSNE-2'].max() - coordinates['tSNE-2'].min())
plt.subplots(figsize=(8, 8))
if label is None:
plt.scatter(
coordinates['tSNE-1'], coordinates['tSNE-2'],
s=20, c='grey', linewidths=0)
else:
plt.scatter(
coordinates['tSNE-1'], coordinates['tSNE-2'],
s=20, c=data[label], linewidths=0,
vmin=-1, vmax=1, cmap=plt.cm.bwr)
plt.axvline(x=coordinates.loc['EPIC1', 'tSNE-1'], ls=':')
plt.axhline(y=coordinates.loc['EPIC1', 'tSNE-2'], ls=':')
plt.show()
return
def visual_sub_tSNE(try_id, subset, label):
coordinates = pd.read_csv(
'corr_68sig_linc/tSNE_CR_score/tSNE_embedding_' + try_id + '.csv',
header=0,
index_col=0,
sep=',')
coordinates['tSNE-1'] = (coordinates['tSNE-1'] - coordinates['tSNE-1'].min()) / (coordinates['tSNE-1'].max() - coordinates['tSNE-1'].min())
coordinates['tSNE-2'] = (coordinates['tSNE-2'] - coordinates['tSNE-2'].min()) / (coordinates['tSNE-2'].max() - coordinates['tSNE-2'].min())
coordinates = df(coordinates.loc[subset.index, :])
plt.subplots(figsize=(8, 8))
if label is None:
plt.scatter(
coordinates['tSNE-1'], coordinates['tSNE-2'],
s=20, c='grey', linewidths=0)
else:
plt.scatter(
coordinates['tSNE-1'], coordinates['tSNE-2'],
s=20, c=subset[label], linewidths=0,
vmin=-1, vmax=1, cmap=plt.cm.bwr)
plt.axvline(x=coordinates.loc['EPIC1', 'tSNE-1'], ls=':')
plt.axhline(y=coordinates.loc['EPIC1', 'tSNE-2'], ls=':')
plt.show()
return
def run_AP(try_id, data):
clustering = AffinityPropagation().fit(data)
label_lncRNAs = df(index=data.index, columns=['label_assigned'])
label_lncRNAs['label_assigned'] = clustering.labels_
label_lncRNAs.to_csv('corr_68sig_linc/tSNE_CR_score/clustering/AP_' + try_id + '.csv', sep=',')
return label_lncRNAs
def run_DBSCAN(try_id, subset, eps, min_samples):
# read in tSNE embedding coordinates
coordinates = pd.read_csv(
'corr_68sig_linc/tSNE_CR_score/tSNE_embedding_' + try_id + '.csv',
header=0,
index_col=0,
sep=',')
if subset != None:
coordinates = df(coordinates.loc[subset.index, :])
# scaled to [0, 1]
coordinates['tSNE-1'] = (coordinates['tSNE-1'] - coordinates['tSNE-1'].min()) / (coordinates['tSNE-1'].max() - coordinates['tSNE-1'].min())
coordinates['tSNE-2'] = (coordinates['tSNE-2'] - coordinates['tSNE-2'].min()) / (coordinates['tSNE-2'].max() - coordinates['tSNE-2'].min())
# input hyperparameter
db = DBSCAN(eps=eps, min_samples=min_samples).fit(coordinates)
# initial assign
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
label_cell = df(index=coordinates.index, columns=['cluster'])
label_cell['cluster'] = labels
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# visualize
plt.subplots(figsize=(10, 10))
plt.scatter(coordinates['tSNE-1'], coordinates['tSNE-2'], c=label_cell['cluster'], s=20, linewidths=0, cmap='Dark2')
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.axvline(x=coordinates.loc['EPIC1', 'tSNE-1'], ls=':')
plt.axhline(y=coordinates.loc['EPIC1', 'tSNE-2'], ls=':')
plt.show()
print('EPIC1 is in ' + str(label_cell.loc['EPIC1', :]))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(coordinates, labels))
return label_cell
def number_cluster(label_cell):
# show number of genes in each cluster
for c in label_cell['cluster'].unique():
print('cluster ' + str(c))
print(len(label_cell[label_cell['cluster'] == c].index))
return
def report_KNN(results, n_top, try_id):
f = open('corr_68sig_linc/classifier/' + try_id + '_KNN_hyper_parameter_selection.txt', 'w')
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
f.write("Model with rank: {0}".format(i))
f.write('\n')
f.write("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
f.write('\n')
f.write("Parameters: {0}".format(results['params'][candidate]))
f.write('\n')
f.write("")
return
def hyperpara_KNN(target, training, try_id):
Y = target[target['cluster'] != -1]
X = df(training.loc[Y.index, :])
# select KNN for the following training
clf = KNeighborsClassifier(p=2)
# specify parameters and distributions to sample from
param_dist = {"n_neighbors": np.arange(5, 50, 5),
"leaf_size": np.arange(30, 80, 5),
"weights": ['uniform', 'distance']}
# run randomized search
n_iter_search = 50
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search, cv=5,
random_state=0,
refit=True)
random_search.fit(np.array(X.values), np.ravel(Y.values))
report_KNN(results=random_search.cv_results_, n_top=10, try_id=try_id)
return
def final_KNN(target, training, n_neighbours, leaf_size, weights):
Y = target[target['cluster'] != -1]
X = df(training.loc[Y.index, :])
# construction of final model
clf = KNeighborsClassifier(
n_neighbors=n_neighbours,
leaf_size=leaf_size,
weights=weights,
p=2)
class_label = Y['cluster'].unique()
class_label.sort()
# evaluate by 5-fold cross-validation
score = cross_val_score(clf, np.array(X.values), np.ravel(Y.values), cv=5)
kf = KFold(n_splits=5)
for train_index, test_index in kf.split(np.array(X.values)):
X_train, X_test = np.array(X.values)[train_index], np.array(X.values)[test_index]
y_train, y_test = np.ravel(Y.values)[train_index], np.ravel(Y.values)[test_index]
clf_fit = clf.fit(X_train, y_train)
y_true = np.ravel(y_test)
y_pred = clf_fit.predict(X_test)
cmatrix = confusion_matrix(y_true, y_pred, labels=class_label)
cmatrix = cmatrix.astype(float) / cmatrix.sum(axis=1)[:, np.newaxis]
cmatrix_frame = df(cmatrix, index=class_label, columns=class_label)
# visualize the confusion matrix
sns.heatmap(cmatrix)
plt.show()
# prediction
X_pred = df(training.loc[target[target['cluster'] == -1].index, :])
sample_label = X_pred.index
ambi_predict = clf.predict_proba(X_pred.values)
proba_frame = df(ambi_predict, index=sample_label, columns=class_label)
# only get assignment with posterior probability >= .99
proba_frame = proba_frame[proba_frame >= .99]
# assign the predicted cell type
posterior_assign = target.copy()
for c in proba_frame.columns:
current_assign = proba_frame[c].dropna(axis=0, how='any').index
posterior_assign.at[current_assign, 'cluster'] = c
return posterior_assign
def visual_posterior_assign(try_id, subset, posterior_assign):
# read in tSNE embedding coordinates
coordinates = pd.read_csv(
'corr_68sig_linc/tSNE_CR_score/tSNE_embedding_' + try_id + '.csv',
header=0,
index_col=0,
sep=',')
if subset is not None:
coordinates = df(coordinates.loc[subset.index, :])
# visualize
plt.subplots(figsize=(10, 10))
plt.scatter(coordinates['tSNE-1'], coordinates['tSNE-2'], c=posterior_assign['cluster'], s=20, linewidths=0, cmap='Dark2')
plt.title('Posterior assign by KNN')
plt.axvline(x=coordinates.loc['EPIC1', 'tSNE-1'], ls=':')
plt.axhline(y=coordinates.loc['EPIC1', 'tSNE-2'], ls=':')
plt.show()
return
def visual_cluster(posterior_assign, data):
for c in posterior_assign['cluster'].unique():
if c == -1:
continue
print('cluster ' + str(c))
tmp_linc = posterior_assign[posterior_assign['cluster'] == c].index
sns.clustermap(
data.loc[tmp_linc, :].astype(float),
figsize=(16, 10),
cmap='bwr', vmin=-8, vmax=8)
plt.show()
return
# # Analyses
# ## 1. Read consensus regulation score
# Concensus regulation score (CR score) is defined by positive hits minus negative hits. The range of CR score is -30 to 30, indicating the regulation direction is consensus negative or consensus positive.
# In[3]:
CR_score = pd.read_csv(
'corr_68sig_linc/dich/pos_neg_immune_count.csv',
header=0, index_col=0, sep=',')
positive = pd.read_csv(
'corr_68sig_linc/dich/promote_immune_count.csv',
header=0, index_col=0, sep=',')
negative = pd.read_csv(
'corr_68sig_linc/dich/inhibit_immune_count.csv',
header=0, index_col=0, sep=',')
# In[27]:
# mean distribution of CR_score
sns.kdeplot(CR_score.mean(axis=1))
# In[11]:
# variance distribution of CR_score
sns.kdeplot(CR_score.std(axis=0))
# ## 2. The sign distribution of CR score
# In[14]:
# get the sign of CR score
sign_CR_score = df(np.sign(CR_score))
# In[15]:
# visualize by heatmap
sns.clustermap(sign_CR_score, cmap='bwr', vmin=-1, vmax=1)
# ## 3. tSNE on sign of CR score
# To determine if the sign will tell something.
# In[17]:
# run tSNE
run_tSNE(data=sign_CR_score, try_id='sign_of_CR')
# In[18]:
# visualize tSNE in MHC
visual_tSNE(try_id='sign_of_CR', data=sign_CR_score, label='MHC.I_19272155')
# In[24]:
# visualize tSNE in Module 11 proliferation
visual_tSNE(try_id='sign_of_CR', data=sign_CR_score, label='Module11_Prolif_score')
# In[22]:
# visualize tSNE (rw embedding)
visual_tSNE(try_id='sign_of_CR', data=sign_CR_score, label=None)
# ## 3. tSNE on CR score
# ### 3.1 tSNE on full set
# In[25]:
# run tSNE
run_tSNE(data=CR_score, try_id='raw_CR')
# In[28]:
# visualize tSNE in MHC
visual_tSNE(try_id='raw_CR', data=CR_score, label=None)
# In[29]:
# run tSNE
run_tSNE(data=CR_score / 32., try_id='scaled_CR')
# In[143]:
visual_tSNE(try_id='scaled_CR', data=CR_score, label=None)
# In[144]:
visual_tSNE(try_id='scaled_CR', data=CR_score, label='MHC.I_19272155')
# ### 3.2 tSNE on reduced set
# Only perform tSNE on lncRNAs with CR score > 5 in at least 10 signature
# In[22]:
# number of lncRNAs as CR cutoff goes high
lncRNA_CR_cutoff = df(columns=['CR_cutoff', '#lncRNAs'])
lncRNA_CR_sig = {}
for i in range(0, int(abs(CR_score.max().max()))):
lncRNA_CR_cutoff.at[i, 'CR_cutoff'] = i
tmp_CR = abs(CR_score.copy())
tmp_CR = df(tmp_CR[tmp_CR >= i]).dropna(axis=0, how='all')
lncRNA_CR_cutoff.at[i, '#lncRNAs'] = len(tmp_CR.index)
tmp_count = df(columns=['sig_cutoff', '#lncRNAs'])
for j in range(0, 68):
lead_lncRNA = 0
for lnc in tmp_CR.index:
if len(df(tmp_CR.loc[lnc, :]).dropna(axis=0, how='any').index) >= j:
lead_lncRNA += 1
else:
continue
tmp_count.at[j, 'sig_cutoff'] = j
tmp_count.at[j, '#lncRNAs'] = lead_lncRNA
lncRNA_CR_sig[i] = tmp_count
# In[44]:
plt.figure(figsize=(8, 8))
sns.set_palette(palette='Spectral', n_colors=32)
for k in lncRNA_CR_sig.keys():
sns.scatterplot(x='sig_cutoff', y='#lncRNAs', data=lncRNA_CR_sig[k])
plt.yticks(rotation=90)
plt.tight_layout()
plt.axvline(x=10., ls=':', color='k')
plt.show()
# In[81]:
# number of lncRNAs as CR cutoff goes high
lncRNA_CR_fraction = df(columns=['CR_cutoff', '#lncRNAs/sig'])
for i in range(0, int(abs(CR_score.max().max()))):
lncRNA_CR_fraction.at[i, 'CR_cutoff'] = i
lncRNA_CR_fraction.at[i, '#lncRNAs/sig'] = df(CR_score[abs(CR_score) < i]).isna().sum().mean() / len(CR_score.index)
# In[100]:
plt.figure(figsize=(6, 6))
sns.scatterplot(x='CR_cutoff', y='#lncRNAs/sig', data=lncRNA_CR_fraction)
plt.axvline(x=5., ls=':', color='k')
# In[112]:
sns.kdeplot(CR_score.mean(axis=0))
plt.axvline(x=5., ls=':', color='k')
plt.axvline(x=-5., ls=':', color='k')
print(CR_score.mean(axis=0).mean() + CR_score.mean(axis=0).std()*2)
# In[110]:
sns.kdeplot(CR_score.mean(axis=1))
plt.axvline(x=10., ls=':', color='k')
plt.axvline(x=-10., ls=':', color='k')
print(CR_score.mean(axis=1).std()*2)
# In[89]:
# number of lncRNAs as CR cutoff goes high
lncRNA_CR_sig_fraction = df(columns=['CR_cutoff', '#sig/lncRNA'])
for i in range(0, int(abs(CR_score.max().max()))):
lncRNA_CR_sig_fraction.at[i, 'CR_cutoff'] = i
tmp_cr = df(CR_score[abs(CR_score) < i]).isna().sum(axis=1).mean()
lncRNA_CR_sig_fraction.at[i, '#sig/lncRNA'] = tmp_cr
# In[90]:
plt.figure(figsize=(6, 6))
sns.scatterplot(x='CR_cutoff', y='#sig/lncRNA', data=lncRNA_CR_sig_fraction)
# In[16]:
print(lncRNA_CR_cutoff)
# In[17]:
sns.scatterplot(x='CR_cutoff', y='#lncRNAs', data=lncRNA_CR_cutoff)
# In[48]:
# lncRNAs with CR > 5 at least in one signature
rr_CRS = abs(CR_score.copy())
rr_CRS = df(rr_CRS[rr_CRS >= 5.]).dropna(axis=0, how='all')
print(rr_CRS.shape)
# In[16]:
sns.kdeplot(rr_CRS.isna().sum()/len(rr_CRS['MHC.I_19272155']))
# In[30]:
(CR_score[CR_score == 0].isna().sum()).mean() / len(CR_score.index)
# In[32]:
(rr_CRS.isna().sum()).mean() / len(CR_score.index)
# In[28]:
sns.kdeplot(rr_CRS.isna().sum()/len(CR_score['MHC.I_19272155']))
# In[15]:
df(rr_CRS.isna().sum()/len(rr_CRS['MHC.I_19272155'])).mean()
# In[62]:
# lncRNAs with CR > 5 at least in ten signature
lead_lncRNA = []
for lnc in rr_CRS.index:
if len(df(rr_CRS.loc[lnc, :]).dropna(axis=0, how='any').index) >= 10:
lead_lncRNA.append(lnc)
else:
continue
print(len(lead_lncRNA))
rr_CRS = df(rr_CRS.loc[lead_lncRNA, :])
# In[63]:
df(rr_CRS.isna().sum()/len(CR_score.index)).mean()
# In[96]:
print(((len(rr_CRS.index) - rr_CRS.isna().sum()) / len(CR_score.index)).mean())
# In[72]:
print(df(CR_score[abs(CR_score) < 5]).isna().sum().mean() / len(CR_score.index))
# In[17]:
4292 / 6569
# In[18]:
print(CR_score.shape)
# In[120]:
# attach the sign
rr_CRS = np.sign(CR_score) * rr_CRS
# In[123]:
rr_CRS = rr_CRS.dropna(axis=0, how='all').fillna(0)
# In[125]:
print(rr_CRS.loc['EPIC1', :].head())
# ### 3.2.1 Reduced set with lower value masked (CR score < 5 will be masked)
# In[129]:
# run tSNE
run_tSNE(data=rr_CRS, try_id='reduced_set_CR')
# visualize
visual_tSNE(try_id='reduced_set_CR', data=rr_CRS, label=None)
# In[131]:
# visualize
visual_tSNE(try_id='reduced_set_CR', data=rr_CRS, label='MHC.I_19272155')
# In[132]:
# visualize
visual_tSNE(try_id='reduced_set_CR', data=rr_CRS, label='Module11_Prolif_score')
# In[133]:
sns.clustermap(rr_CRS, cmap='bwr', vmin=-10, vmax=10)
# In[137]:
sns.scatterplot(x='Module11_Prolif_score', y='MHC.I_19272155', data=CR_score)
# In[138]:
sns.scatterplot(x='Module11_Prolif_score', y='MHC.I_19272155', data=rr_CRS)
# ### 3.2.2 Reduced set with lower value unmasked (CR score < 5 will not be masked)
# In[317]:
# run tSNE
run_tSNE(data=CR_score.loc[rr_CRS.index, :], try_id='reduced_CR_low_unmask')
# visualize
visual_tSNE(try_id='reduced_CR_low_unmask', data=rr_CRS, label=None)
# In[320]:
# visualize the unmasked one
visual_tSNE(try_id='reduced_CR_low_unmask', data=CR_score.loc[rr_CRS.index, :], label='MHC.I_19272155')
# In[321]:
visual_tSNE(try_id='reduced_CR_low_unmask', data=CR_score.loc[rr_CRS.index, :], label='Module11_Prolif_score')
# ### 3.3 Plot the reduced set in the full embedding
# In[153]:
# only plot the reduced set on the full set tSNE
visual_sub_tSNE(try_id='scaled_CR', subset=rr_CRS, label=None)
# In[154]:
visual_sub_tSNE(try_id='scaled_CR', subset=rr_CRS, label='MHC.I_19272155')
# In[149]:
visual_sub_tSNE(try_id='scaled_CR', subset=rr_CRS, label='Module11_Prolif_score')
# ## 4. DBSCAN on tSNE embedding
# Density-based spatial clustering of applications with noise (DBSCAN) is a data clustering algorithm proposed by <NAME>, <NAME>, <NAME> and <NAME> in 1996.It is a density-based clustering algorithm: given a set of points in some space, it groups together points that are closely packed together (points with many nearby neighbors), marking as outliers points that lie alone in low-density regions (whose nearest neighbors are too far away). DBSCAN is one of the most common clustering algorithms and also most cited in scientific literature.
# ### 4.1 Clustering on embedding with reduced set (low value masked)
# In[427]:
# initial assign
label = run_DBSCAN(try_id='reduced_set_CR', subset=None, eps=.08, min_samples=180)
number_cluster(label_cell=label)
# In[258]:
# posterior assign
hyperpara_KNN(target=label, training=rr_CRS, try_id='reduced_set_CR')
# In[265]:
# final model building
posterior_assign = final_KNN(target=label, training=rr_CRS, n_neighbours=10, leaf_size=75, weights='distance')
# In[270]:
# visual final assign
visual_posterior_assign(try_id='reduced_set_CR', subset=None, posterior_assign=posterior_assign)
number_cluster(label_cell=posterior_assign)
# In[316]:
visual_cluster(posterior_assign=posterior_assign, data=CR_score.loc[rr_CRS.index, :])
# In[322]:
# compare clusters: average CR score
average_CR = df()
for c in [0, 3, 2, 1]:
tmp_linc = posterior_assign[posterior_assign['cluster'] == c].index
average_CR[c] = CR_score.loc[tmp_linc, :].mean()
sns.clustermap(average_CR, cmap='bwr', z_score=0,
figsize=(8, 16), col_cluster=False)
# In[408]:
# EPIC1 in cluster 0
cluster_0 = rr_CRS.loc[posterior_assign[posterior_assign['cluster'] == 0].index, :]
# sort by module 11 and MHCI
mhc1 = df(cluster_0[cluster_0['MHC1_21978456'] <= -8])
mhc1 = df(mhc1[mhc1['MHC.I_19272155'] <= -8])
prolif = | df(cluster_0[cluster_0['Module11_Prolif_score'] >= 8]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from sparse_dot_topn_for_blocks import awesome_cossim_topn
from scipy.sparse.csr import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import rand
import numpy as np
import pandas as pd
import multiprocessing
import pytest
PRUNE_THRESHOLD = 0.1
NUM_CANDIDATES = 3
MAX_N_PROCESSES = min(8, multiprocessing.cpu_count()) - 1
LIST_N_JOBS = [n for n in range(MAX_N_PROCESSES + 1)]
def get_n_top_sparse(mat, n_top=10):
"""
Get list of (index, value) of the n largest elements in a 1-dimensional sparse matrix
:param mat: input sparse matrix
:param n_top: number of largest elements, default is 10.
:return: sorted list of largest elements
"""
length = mat.getnnz()
if length == 0:
return None
if length <= n_top:
result = list(zip(mat.indices, mat.data))
else:
arg_idx = np.argpartition(mat.data, -n_top)[-n_top:]
result = list(zip(mat.indices[arg_idx], mat.data[arg_idx]))
return sorted(result, key=lambda x: -x[1])
def awesome_cossim_topn_wrapper(A, B, ntop, lower_bound=0, use_threads=False, n_jobs=1, return_best_ntop=False, test_nnz_max=-1, expect_best_ntop=None, sort=True):
"""
This function is running awesome_cossim_topn()
with and without return_best_ntop and checking if we get the expected result and if both results are the same.
It has the same signature, but has an extra parameter: expect_best_ntop
"""
nnz_rows = np.full(A.shape[0], 0, dtype=np.int32)
result1, best_ntop = awesome_cossim_topn(A, B, ntop, nnz_rows, lower_bound, use_threads, n_jobs, True, test_nnz_max, sort=sort)
assert expect_best_ntop == best_ntop
nnz_rows[:] = 0
result2 = awesome_cossim_topn(A, B, ntop, nnz_rows, lower_bound, use_threads, n_jobs, False, test_nnz_max, sort=sort)
assert (result1 != result2).nnz == 0 # The 2 CSR matrix are the same
if ntop < B.shape[1]:
result3 = awesome_cossim_topn(A, B, ntop, nnz_rows, lower_bound, use_threads, n_jobs, False, test_nnz_max, sort=(not sort))
assert (result1 != result3).nnz == 0 # The 2 CSR matrix are the same
return result1
def awesome_cossim_topn_array_wrapper_test(
A, B, ntop, lower_bound=0, use_threads=False, n_jobs=1, return_best_ntop=False, test_nnz_max=-1, expect_best_ntop=None, sort=True):
"""
This function is running awesome_cossim_topn_wrapper()
with and without test_nnz_max=1 and checking if we get the expected result and if both results are the same.
It has the same signature as awesome_cossim_topn(), but has an extra parameter: expect_best_ntop
"""
result1 = awesome_cossim_topn_wrapper(A, B, ntop, lower_bound, use_threads, n_jobs, expect_best_ntop=expect_best_ntop, sort=sort)
result2 = awesome_cossim_topn_wrapper(A, B, ntop, lower_bound, use_threads, n_jobs, test_nnz_max=1, expect_best_ntop=expect_best_ntop, sort=sort)
assert (result1 != result2).nnz == 0 # The 2 CSR matrix are the same
assert result1.nnz == result2.nnz
return result1
def pick_helper_awesome_cossim_topn_dense(
a_dense,
b_dense,
dtype,
n_jobs=0
):
if n_jobs == 0:
helper_awesome_cossim_topn_dense(a_dense, b_dense, dtype=dtype)
elif n_jobs > 0:
helper_awesome_cossim_topn_dense(a_dense, b_dense, dtype=dtype, use_threads=True, n_jobs=n_jobs)
def helper_awesome_cossim_topn_dense(
a_dense,
b_dense,
dtype,
use_threads=False,
n_jobs=1,
):
dense_result = np.dot(a_dense, np.transpose(b_dense)) # dot product
max_ntop_dense = max(len(row[row > 0]) for row in dense_result)
sparse_result = csr_matrix(dense_result).astype(dtype)
max_ntop_sparse = max(row.nnz for row in sparse_result)
assert max_ntop_dense == max_ntop_sparse
sparse_result_top3 = [get_n_top_sparse(row, NUM_CANDIDATES)
for row in sparse_result] # get ntop using the old method
pruned_dense_result = dense_result.copy()
pruned_dense_result[pruned_dense_result < PRUNE_THRESHOLD] = 0 # prune low similarity
max_ntop_pruned_dense = max(len(row[row > 0]) for row in pruned_dense_result)
pruned_sparse_result = csr_matrix(pruned_dense_result)
max_ntop_pruned_sparse = max(row.nnz for row in pruned_sparse_result)
assert max_ntop_pruned_dense == max_ntop_pruned_sparse
pruned_sparse_result_top3 = [get_n_top_sparse(row, NUM_CANDIDATES) for row in pruned_sparse_result]
a_csr = csr_matrix(a_dense).astype(dtype)
b_csr_t = csr_matrix(b_dense).T.astype(dtype)
awesome_result = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
len(b_dense),
0.0,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_dense
)
awesome_result_unsorted = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
len(b_dense),
0.0,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_dense,
sort=False
)
awesome_result_top3 = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
NUM_CANDIDATES,
0.0,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_dense
)
awesome_result_top3 = [list(zip(row.indices, row.data)) if len(
row.data) > 0 else None for row in awesome_result_top3] # make comparable, normally not needed
pruned_awesome_result = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
len(b_dense),
PRUNE_THRESHOLD,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_dense
)
pruned_awesome_result_unsorted = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
len(b_dense),
PRUNE_THRESHOLD,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_dense,
sort=False
)
pruned_awesome_result_top3 = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
NUM_CANDIDATES,
PRUNE_THRESHOLD,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_dense
)
pruned_awesome_result_top3 = [list(zip(row.indices, row.data)) if len(
row.data) > 0 else None for row in pruned_awesome_result_top3]
# no candidate selection, no pruning
assert awesome_result.nnz == sparse_result.nnz
# no candidate selection, below PRUNE_THRESHOLD similarity pruned
assert pruned_awesome_result.nnz == pruned_sparse_result.nnz
# no candidate selection, no pruning
assert awesome_result_unsorted.nnz == sparse_result.nnz
# no candidate selection, below PRUNE_THRESHOLD similarity pruned
assert pruned_awesome_result_unsorted.nnz == pruned_sparse_result.nnz
all_none1 = np.all(pd.isnull(awesome_result_top3)) and np.all(pd.isnull(sparse_result_top3))
all_none2 = np.all(pd.isnull(pruned_awesome_result_top3)) and np.all(pd.isnull(pruned_sparse_result_top3))
# top NUM_CANDIDATES candidates selected, no pruning
if not all_none1:
# Sometime we can have this test failing for test_awesome_cossim_topn_manually()
# when we have rows giving the same dot product value and then we have them in random different order.
np.testing.assert_array_almost_equal(awesome_result_top3, sparse_result_top3)
else:
assert len(awesome_result_top3) == len(sparse_result_top3)
# top NUM_CANDIDATES candidates selected, below PRUNE_THRESHOLD similarity pruned
if not all_none2:
np.testing.assert_array_almost_equal(pruned_awesome_result_top3, pruned_sparse_result_top3)
else:
assert len(pruned_awesome_result_top3) == len(pruned_sparse_result_top3)
def pick_helper_awesome_cossim_topn_sparse(
a_sparse,
b_sparse,
flag=True,
n_jobs=0
):
if n_jobs == 0:
helper_awesome_cossim_topn_sparse(a_sparse, b_sparse, flag=flag)
elif n_jobs > 0:
helper_awesome_cossim_topn_sparse(a_sparse, b_sparse, flag=flag, use_threads=True, n_jobs=n_jobs)
def helper_awesome_cossim_topn_sparse(
a_sparse,
b_sparse,
flag=True,
use_threads=False,
n_jobs=1
):
# Note: helper function using awesome_cossim_topn
sparse_result = a_sparse.dot(b_sparse.T) # dot product
max_ntop_sparse = max(row.nnz for row in sparse_result)
sparse_result_top3 = [get_n_top_sparse(row, NUM_CANDIDATES)
for row in sparse_result] # get ntop using the old method
pruned_sparse_result = sparse_result.copy()
pruned_sparse_result[pruned_sparse_result < PRUNE_THRESHOLD] = 0 # prune low similarity
pruned_sparse_result.eliminate_zeros()
max_ntop_pruned_sparse = max(row.nnz for row in pruned_sparse_result)
pruned_sparse_result_top3 = [get_n_top_sparse(row, NUM_CANDIDATES) for row in pruned_sparse_result]
a_csr = csr_matrix(a_sparse)
b_csr_t = csr_matrix(b_sparse).T
awesome_result = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
b_sparse.shape[0],
0.0,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_sparse
)
awesome_result_unsorted = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
b_sparse.shape[0],
0.0,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_sparse,
sort=False
)
awesome_result_top3 = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
NUM_CANDIDATES,
0.0,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_sparse
)
awesome_result_top3 = [list(zip(row.indices, row.data)) if len(
row.data) > 0 else None for row in awesome_result_top3] # make comparable, normally not needed
pruned_awesome_result = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
b_sparse.shape[0],
PRUNE_THRESHOLD,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_pruned_sparse
)
pruned_awesome_result_unsorted = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
b_sparse.shape[0],
PRUNE_THRESHOLD,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_pruned_sparse,
sort=False
)
pruned_awesome_result_top3 = awesome_cossim_topn_array_wrapper_test(
a_csr,
b_csr_t,
NUM_CANDIDATES,
PRUNE_THRESHOLD,
use_threads=use_threads,
n_jobs=n_jobs,
expect_best_ntop=max_ntop_pruned_sparse
)
pruned_awesome_result_top3 = [list(zip(row.indices, row.data)) if len(
row.data) > 0 else None for row in pruned_awesome_result_top3]
# no candidate selection, no pruning
assert awesome_result.nnz == sparse_result.nnz
# no candidate selection, below PRUNE_THRESHOLD similarity pruned
assert pruned_awesome_result.nnz == pruned_sparse_result.nnz
# no candidate selection, no pruning
assert awesome_result_unsorted.nnz == sparse_result.nnz
# no candidate selection, below PRUNE_THRESHOLD similarity pruned
assert pruned_awesome_result_unsorted.nnz == pruned_sparse_result.nnz
if flag:
all_none1 = np.all(pd.isnull(awesome_result_top3)) and np.all(pd.isnull(sparse_result_top3))
all_none2 = np.all( | pd.isnull(pruned_awesome_result_top3) | pandas.isnull |
import pandas as pd
import numpy as np
def process_df(interim_dir):
df_train = pd.read_csv(interim_dir+'/census_income_learn.csv')
df_train['set']='train'
df_test = pd.read_csv(interim_dir+'/census_income_test.csv')
df_test['set']='test'
df = | pd.concat([df_train, df_test], ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 21:03:18 2018
@author: andrewcady
"""
import pandas as pd
import numpy as np
ss = pd.read_csv('Prcls_NbrhdAssFront.csv')
prices = pd.read_csv('LRA Price List.csv')
#df = pd.read_csv('VacantParcels_updated20180615.csv')
df = pd.read_csv('FinalVacant_LRA_20180707.csv')
#sqft_df = pd.read_csv('Parcel_SquareFeet.csv')
#sqft_df.drop_duplicates('HANDLE',inplace=True)
ss['Nbrhd'] = | pd.to_numeric(ss['Nbrhd'],'coerce','integer') | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@version: 1.4.0
@file: GSP_main.py
@time: 2021/1/26 10:50
@functions: graph signal processing main script
@update: support Yeo-ICN definition
@update: support ICN-level brain activity and connecitivty strength saving
"""
import numpy as np
import glob
import os
import time
import matplotlib.pyplot as plt
from pygsp import graphs, filters, plotting
from GSP_utilities import surrogate_BOLD_create, save_variable, load_variable
import pandas as pd
from dppd import dppd
dp, X = dppd()
# 1. path locations and parameters
start = time.time()
deriv_path = '/home/amax/data/cye/MScohort_BIDS_clean/derivatives'
connectome_path = os.path.join(deriv_path, 'mrtrix')
xcpengine_path = os.path.join(deriv_path, 'xcpengine')
network_assign_path = 'CAB-NP_v1.1_Labels-ReorderedbyNetworks_Yeo.csv'
num_BOLD_timepoints = 180
num_rand = 100 # number of surrogates
functional_type = 'BOLD'
tract_type = 'meanlength' # one of the following: invlength, invnodevol, level-participant_connectome, meanlength
ICN_type = 'Yeo' # one of the following: 'Yeo', 'Cole'
normalize_type = 'both' # 'W': normalize W; 'L': normalize Laplacian (Preti method); 'both': normalize both W and Laplacian
# 2. read network assignment for hcpmmp
network_assign_csv = pd.read_csv(network_assign_path)
network_assign_csv = dp(network_assign_csv).mutate(NETWORK=X.Yeo_NETWORK).pd
network_assign_csv = dp(network_assign_csv).mutate(NETWORKKEY=X.Yeo_NETWORKKEY).pd
num_network_df = dp(network_assign_csv).summarise((X.NETWORKKEY, np.max, 'hp_max')).pd
num_network = num_network_df.iloc[0,0]
network_rowindex_ls = []
for network_i in range(1,num_network+1):
df_network = dp(network_assign_csv).filter_by(X.NETWORKKEY == network_i).pd
network_rowindex_ls.append(df_network.index.values)
network_unique_df = dp(network_assign_csv).distinct('NETWORKKEY').pd
network_unique_df = network_unique_df.sort_values(by='NETWORKKEY',ascending = True)
network_unique_df = dp(network_unique_df).filter_by(-X.NETWORK.isin(['Undefine'])).pd # remove undefined ICN
network_unique_df = network_unique_df.reset_index()
# 3. define group of interests
cohort1 = 'ms'
cohort2 = 'nc'
cohort3 = 'nmo'
cohort4 = 'cis'
cohort1_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort1 + '*'))
cohort2_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort2 + '*'))
cohort3_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort3 + '*'))
cohort4_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort4 + '*'))
cohort_connectome_ls = cohort1_connectome_ls + cohort2_connectome_ls + cohort3_connectome_ls + cohort4_connectome_ls
cohort_connectome_ls.sort()
cohort1_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort1 + '*'))
cohort2_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort2 + '*'))
cohort3_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort3 + '*'))
cohort4_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort4 + '*'))
cohort_fmri_ls = cohort1_fmri_ls + cohort2_fmri_ls + cohort3_fmri_ls + cohort4_fmri_ls
cohort_name_ls = [os.path.basename(item) for item in cohort_connectome_ls]
remove_name_ls = ['sub-nc011','sub-nc039', 'sub-nmo002', 'sub-nmo019', 'sub-cis002','sub-cis015', 'sub-ms015'] # problematic cases
cohort_name_ls = list(set(cohort_name_ls) - set(remove_name_ls)) # remove problematic cases
for i in remove_name_ls: # remove problematic cases
cohort_connectome_ls = [x for x in cohort_connectome_ls if i not in x]
cohort_fmri_ls = [x for x in cohort_fmri_ls if i not in x]
cohort_name_ls.sort()
cohort_connectome_ls.sort()
cohort_fmri_ls.sort()
if len(cohort_connectome_ls) != len(cohort_fmri_ls):
print('Number of connectome and xcpengine results not matched')
# 4. create a dataframe to store individual filepath
path_dict = {'subname':cohort_name_ls, 'mrtrix_path': cohort_connectome_ls, 'xcp_path':cohort_fmri_ls}
path_df = pd.DataFrame(path_dict, columns=['subname','mrtrix_path','xcp_path'])
path_df = dp(path_df).mutate(connectome_path=X.mrtrix_path + '/connectome/' + X.subname +'_parc-hcpmmp1_' + tract_type + '.csv').pd
path_df = dp(path_df).mutate(BOLD_series_path=X.xcp_path + '/fcon/hcpmmp/hcpmmp.1D').pd
path_df = dp(path_df).mutate(fmri_map_path=X.xcp_path + '/roiquant/hcpmmp/' + X.subname +'_hcpmmp_mean.csv').pd
print('finished step 4')
# 5. load individual connectome as ndarray
num_parcels = len(network_assign_csv)
num_sub = len(path_df)
path_df_nc = dp(path_df).filter_by(X.subname.str.contains('nc')).pd
num_nc = len(path_df_nc)
nc_idx = path_df_nc.index
connectome_array = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
indiviudal_connectome = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
connectome_array[:,:,sub_idx] = indiviudal_connectome
# 6. load individual BOLD series and fill missing part according to /fcon/hcpmmp/missing.txt
BOLD_series_3D = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
BOLD_series = np.genfromtxt(path_df.loc[sub_idx, 'BOLD_series_path'])
BOLD_series = BOLD_series.T
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if BOLD_series[missing_parcel_id-1,:].sum() != 0:
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_parcel_id-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
else: # multiple parcels missing
for missing_idx in missing_parcel_id:
network_key = network_assign_csv.loc[missing_idx-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_idx-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
BOLD_series_3D[:,:,sub_idx] = BOLD_series
print('finished loading individual BOLD series and filling missing part')
# 7. load fmri parametric map and fill missing part according to /fcon/hcpmmp/missing.txt
fmri_paramap = np.zeros(shape=(num_parcels, num_sub))
paramap_str = 'mean_alffZ'
for sub_idx in range(len(path_df)):
fmri_map = pd.read_csv(path_df.loc[sub_idx, 'fmri_map_path'],index_col=0)
fmri_map = fmri_map.loc[:,paramap_str]
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if not np.isnan(fmri_map[missing_parcel_id]):
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
fmri_map[int(missing_parcel_id)] = np.mean(fmri_map[network_parcel_idx])
fmri_map = fmri_map.to_numpy()
else: # multiple parcels missing
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_rowindex_ls = np.array(network_rowindex_ls, dtype=object)
network_parcel_idx = network_rowindex_ls[network_key-1]
for parcel_i in range(missing_parcel_id.size):
fmri_map[int(missing_parcel_id[parcel_i])] = np.mean(fmri_map[network_parcel_idx[parcel_i]])
fmri_map = fmri_map.to_numpy()
fmri_paramap[:,sub_idx] = fmri_map
print('finished loading fmri parametric map and fill missing part')
# 8. load connectome and functional signal and do GSP
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
func_sig = BOLD_series_3D
s_head_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_rand_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub, num_rand))
else:
raise ValueError('undefined functional signal')
G_U_cohort = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
W = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
# Symmetric Normalization of adjacency matrix
D = np.diag(np.sum(W,1)) #degree
D_power = np.power(D, (-1/2))
D_power[np.isinf(D_power)] = 0
Wsymm = D_power @ W @ D_power
#The eigenvector matrix G.U is used to define the Graph Fourier Transform of the graph signal S
if normalize_type == 'W':
G = graphs.Graph(Wsymm)
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'L':
G = graphs.Graph(W, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'both':
Wsymm = np.triu(Wsymm) + np.triu(Wsymm).T - np.diag(np.triu(Wsymm).diagonal()) # force symmetric
G = graphs.Graph(Wsymm, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
# L = np.eye(len(Wsymm)) - Wsymm
# lamda, U = np.linalg.eig(L)
# U = U[:, np.argsort(lamda)]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head = U.T @ func_sig[:,:,sub_idx]
s_head_cohort[:,:,sub_idx] = s_head
# calcualte surrogate for individual
s_rand_cohort[:,:,sub_idx,:] = surrogate_BOLD_create(U, func_sig[:,:,sub_idx], num_rand)
print('finished Graph Fourier Transform')
# save_variable(G_U_cohort, 'G_U_cohort.pkl')
# save_variable(s_head_cohort, 's_head_cohort.pkl')
# save_variable(s_rand_cohort, 's_rand_cohort.pkl')
# G_U_cohort = load_variable('G_U_cohort.pkl')
# s_head_cohort = load_variable('s_head_cohort.pkl')
# s_rand_cohort = load_variable('s_rand_cohort.pkl')
# 8.5(optional). plot Sihag2020 plot
# take nc001 as example
nc001_idx = path_df.subname[path_df.subname == 'sub-nc001'].index.tolist()[0]
s_low = G_U_cohort[:,0:4, nc001_idx] @ s_head_cohort[0:4,:,nc001_idx]
s_high = G_U_cohort[:,-55:-51, nc001_idx] @ s_head_cohort[-55:-51,:,nc001_idx]
np.savetxt("nc001_s_low_both.csv", s_low, delimiter=",")
np.savetxt("nc001_s_high_both.csv", s_high, delimiter=",")
# 9. calculate the median-split threshold
NC_index = [cohort_name_ls.index(x) for x in cohort_name_ls if 'nc' in x]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head_NC = s_head_cohort[:,:,NC_index]
s_head_NC_square = np.power(s_head_NC, 2)
#s_head_NC_square = np.power(s_head_NC_square, 1/2)
s_head_NC_square_mean = np.mean(s_head_NC_square, (1,2)) # average for each timepoint and each subject
s_head_NC_AUCTOT = np.trapz(s_head_NC_square_mean)
i=0
AUC=0
while AUC < s_head_NC_AUCTOT/2:
AUC = np.trapz(s_head_NC_square_mean[:i])
i = i + 1
cutoff = i-1
print('finished calculating the median-split threshold')
print('cutoff = {}'.format(cutoff))
# 10. calculate decoupling index for empirical data
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_liberal_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
s_aligned_cohort[:,:,sub_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_head_cohort[0:cutoff,:,sub_idx]
s_liberal_cohort[:,:,sub_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_head_cohort[cutoff-1:-1,:,sub_idx]
s_aligned_individual = np.linalg.norm(s_aligned_cohort, ord=2, axis=1)
s_liberal_individual = np.linalg.norm(s_liberal_cohort, ord=2, axis=1)
s_deCoupIdx_individual = s_liberal_individual / s_aligned_individual
s_aligned = np.mean(s_aligned_individual[:,nc_idx], axis=1)
s_liberal = np.mean(s_liberal_individual[:,nc_idx], axis=1)
s_deCoupIdx_node = s_liberal/s_aligned # only for NC
print('finished calculating decoupling index for empirical data')
# 11. calculate decoupling index for surrogate data only for NC
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
s_liberal_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
for i, sub_idx in enumerate(nc_idx):
for rand_idx in range(num_rand):
s_aligned_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_rand_cohort[0:cutoff,:,sub_idx,rand_idx]
s_liberal_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_rand_cohort[cutoff-1:-1,:,sub_idx,rand_idx]
# norm for BOLD timepoints
s_aligned_norm_rand = np.linalg.norm(s_aligned_cohort_rand, ord=2, axis=1)
s_liberal_norm_rand = np.linalg.norm(s_liberal_cohort_rand, ord=2, axis=1)
# average for cohorts
s_aligned_rand = np.mean(s_aligned_norm_rand, axis=1)
s_liberal_rand = np.mean(s_liberal_norm_rand, axis=1)
# decoupling index
s_deCoupIdx_node_rand = s_liberal_rand/s_aligned_rand
print('finished calculating decoupling index for surrogate data')
# 12. network-level harmonics for emperical and surrogate data
s_aligned_network = np.zeros(shape=(num_network))
s_liberal_network = np.zeros(shape=(num_network))
s_aligned_network_individual = np.zeros(shape=(num_network, num_sub))
s_liberal_network_individual = np.zeros(shape=(num_network, num_sub))
s_aligned_network_rand = np.zeros(shape=(num_network, num_rand))
s_liberal_network_rand = np.zeros(shape=(num_network, num_rand))
for i in range(num_network):
s_aligned_network[i] = np.mean(s_aligned[network_rowindex_ls[i]])
s_liberal_network[i] = np.mean(s_liberal[network_rowindex_ls[i]])
s_aligned_network_individual[i,:] = np.mean(s_aligned_individual[network_rowindex_ls[i],:], axis=0)
s_liberal_network_individual[i,:] = np.mean(s_liberal_individual[network_rowindex_ls[i],:], axis=0)
s_aligned_network_rand[i,:] = np.mean(s_aligned_rand[network_rowindex_ls[i],:], axis=0)
s_liberal_network_rand[i,:] = np.mean(s_liberal_rand[network_rowindex_ls[i],:], axis=0)
s_deCoupIdx_network = s_liberal_network/s_aligned_network
s_deCoupIdx_network_individual = s_liberal_network_individual/s_aligned_network_individual
s_deCoupIdx_network_rand = s_liberal_network_rand/s_aligned_network_rand
# 13. brain-level harmonics for emperical and surrogate data
s_aligned_brain = np.mean(s_aligned)
s_liberal_brain = np.mean(s_liberal)
s_deCoupIdx_brain = s_liberal_brain/s_aligned_brain
s_aligned_brain_individual = np.mean(s_aligned_individual, axis=0)
s_liberal_brain_individual = np.mean(s_liberal_individual, axis=0)
s_deCoupIdx_brain_individual = s_liberal_brain_individual/s_aligned_brain_individual
s_aligned_brain_rand = np.mean(s_aligned_rand, axis=0)
s_liberal_brain_rand = np.mean(s_liberal_rand, axis=0)
s_deCoupIdx_brain_rand = s_liberal_brain_rand/s_aligned_brain_rand
print('s_deCoupIdx_brain = {}'.format(s_deCoupIdx_brain))
# 14. significance of surrogate for plot
# node-level
s_deCoupIdx_node_significance = np.logical_or((np.percentile(s_deCoupIdx_node_rand, 5, axis=1) >= s_deCoupIdx_node), (np.percentile(s_deCoupIdx_node_rand, 95, axis=1) <= s_deCoupIdx_node))
s_deCoupIdx_node_significance = s_deCoupIdx_node_significance.astype(np.int)
# network-level
s_deCoupIdx_network_significance = np.logical_or((np.percentile(s_deCoupIdx_network_rand, 5, axis=1) >= s_deCoupIdx_network), (np.percentile(s_deCoupIdx_network_rand, 95, axis=1) <= s_deCoupIdx_network))
s_deCoupIdx_network_significance = s_deCoupIdx_network_significance.astype(np.int)
# brain-level
s_deCoupIdx_brain_significance = np.logical_or((np.percentile(s_deCoupIdx_brain_rand, 5, axis=0) >= s_deCoupIdx_brain), (np.percentile(s_deCoupIdx_brain_rand, 95, axis=0) <= s_deCoupIdx_brain))
# 15. save results to csv
if normalize_type == 'W':
normalize_str = '_W'
elif normalize_type == 'L':
normalize_str = '_L'
elif normalize_type == 'both':
normalize_str = '_both'
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
csv_folder = 'BOLD_4D_' + tract_type + '_' + normalize_str
if not os.path.exists(os.path.abspath(csv_folder)):
os.mkdir(os.path.abspath(csv_folder))
# save surrogate (ndarray with num_rand × num_region)
s_deCoupIdx_node_rand_df = pd.DataFrame(data = s_deCoupIdx_node_rand.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_rand_df = pd.DataFrame(data = s_deCoupIdx_network_rand.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_rand_df = pd.DataFrame(data = s_deCoupIdx_brain_rand)
s_deCoupIdx_node_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_rand_df.csv'))
s_deCoupIdx_network_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_rand_df.csv'))
s_deCoupIdx_brain_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_rand_df.csv'))
# save surrogate significance (ndarray with 1 × num_region)
s_deCoupIdx_node_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node_significance, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network_significance, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_significance_df.csv'))
s_deCoupIdx_network_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_significance_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_significance.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain_significance))
# save empirical harmonics for NC cohort (for plot usage, ndarray with 1 × num_region)
s_deCoupIdx_node_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_empirical_df.csv'))
s_deCoupIdx_network_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' +'-network_empirical_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_empirical.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain))
# save subject-level harmonics (ndarray with num_sub × num_region)
s_deCoupIdx_node_individual_df = pd.DataFrame(data = s_deCoupIdx_individual.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_individual_df = pd.DataFrame(data = s_deCoupIdx_network_individual.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_individual_df = pd.DataFrame(data = s_deCoupIdx_brain_individual)
s_deCoupIdx_node_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_node_individual_df],axis=1)
s_deCoupIdx_network_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_network_individual_df],axis=1)
s_deCoupIdx_brain_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_brain_individual_df],axis=1)
s_deCoupIdx_node_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_individual_df.csv'))
s_deCoupIdx_network_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_individual_df.csv'))
s_deCoupIdx_brain_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_individual_df.csv'))
# 16.(optional) save connectivity strength
# parcel-level
connectome_parcel_individual = np.zeros(shape=(num_sub, num_parcels))
# mean of nonzero
def non_zero_mean(np_arr):
exist = (np_arr != 0)
num = np_arr.sum(axis=1)
den = exist.sum(axis=1)
return num/den
for sub_idx in range(num_sub):
connectome_parcel_individual[sub_idx,:] = non_zero_mean(connectome_array[:,:,sub_idx])
connectome_parcel_individual_df = pd.DataFrame(data = connectome_parcel_individual, columns = network_assign_csv.loc[:,'LABEL'])
connectome_parcel_individual_df = pd.concat([path_df.loc[:,'subname'], connectome_parcel_individual_df],axis=1)
connectome_parcel_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 'connectome_' + '-parcel_individual_df.csv'))
# ICN-level
connectome_network_individual = np.zeros(shape=(num_network, num_sub))
for i in range(num_network):
network_i = network_unique_df.loc[i,'NETWORK']
parcel_network_df = dp(network_assign_csv).filter_by(X.NETWORK.isin([network_i])).pd
parcel_network_id = parcel_network_df.loc[:,'INDEX'].to_numpy()
connectome_network_individual[i,:] = np.mean(connectome_array[np.ix_(parcel_network_id-1,parcel_network_id-1)], axis=(0,1))
connectome_network_individual_df = pd.DataFrame(data = connectome_network_individual.T, columns = network_unique_df.loc[:,'NETWORK'])
connectome_network_individual_df = pd.concat([path_df.loc[:,'subname'], connectome_network_individual_df],axis=1)
connectome_network_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 'connectome_' +'-network_individual_df.csv'))
# 17.(optional) save ICN-level brain activity
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
BOLD_network_individual = np.zeros(shape=(num_network, num_sub))
for i in range(num_network):
network_i = network_unique_df.loc[i,'NETWORK']
parcel_network_df = dp(network_assign_csv).filter_by(X.NETWORK.isin([network_i])).pd
parcel_network_id = parcel_network_df.loc[:,'INDEX'].to_numpy()
BOLD_series_norm = np.linalg.norm(BOLD_series_3D, ord=2, axis=1)
BOLD_network_individual[i,:] = np.mean(BOLD_series_norm[parcel_network_id-1,:], axis=0)
BOLD_network_individual_df = pd.DataFrame(data = BOLD_network_individual.T, columns = network_unique_df.loc[:,'NETWORK'])
BOLD_network_individual_df = | pd.concat([path_df.loc[:,'subname'], BOLD_network_individual_df],axis=1) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.