prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#SPDX-License-Identifier: MIT
"""
Creates a WSGI server that serves the Augur REST API
"""
import glob
import sys
import inspect
import types
import json
import os
import base64
import logging
from flask import Flask, request, Response, redirect
from flask_cors import CORS
import pandas as pd
import augur
from augur.routes import create_routes
AUGUR_API_VERSION = 'api/unstable'
logger = logging.getLogger(__name__)
class Server(object):
"""
Defines Augur's server's behavior
"""
def __init__(self, augur_app=None):
"""
Initializes the server, creating both the Flask application and Augur application
"""
# Create Flask application
self.app = Flask(__name__)
logger.debug("Created Flask app")
self.api_version = AUGUR_API_VERSION
app = self.app
CORS(app)
app.url_map.strict_slashes = False
self.augur_app = augur_app
self.manager = augur_app.manager
self.broker = augur_app.broker
self.housekeeper = augur_app.housekeeper
# Initialize cache
expire = int(self.augur_app.config.get_value('Server', 'cache_expire'))
self.cache = self.augur_app.cache.get_cache('server', expire=expire)
self.cache.clear()
app.config['WTF_CSRF_ENABLED'] = False
self.show_metadata = False
logger.debug("Creating API routes...")
create_routes(self)
#####################################
### UTILITY ###
#####################################
@app.route('/')
@app.route('/ping')
@app.route('/status')
@app.route('/healthcheck')
def index():
"""
Redirects to health check route
"""
return redirect(self.api_version)
@app.route('/{}/'.format(self.api_version))
@app.route('/{}/status'.format(self.api_version))
def status():
"""
Health check route
"""
status = {
'status': 'OK',
}
return Response(response=json.dumps(status),
status=200,
mimetype="application/json")
def transform(self, func, args=None, kwargs=None, repo_url_base=None, orient='records',
group_by=None, on=None, aggregate='sum', resample=None, date_col='date'):
"""
Serializes a dataframe in a JSON object and applies specified transformations
"""
if orient is None:
orient = 'records'
result = ''
if not self.show_metadata:
if repo_url_base:
kwargs['repo_url'] = str(base64.b64decode(repo_url_base).decode())
if not args and not kwargs:
data = func()
elif args and not kwargs:
data = func(*args)
else:
data = func(*args, **kwargs)
if hasattr(data, 'to_json'):
if group_by is not None:
data = data.group_by(group_by).aggregate(aggregate)
if resample is not None:
data['idx'] = | pd.to_datetime(data[date_col]) | pandas.to_datetime |
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
import pandas as pd
fra_fon__fra_lines = []
with open('fon/train-parts/JW300-fra_fon.fra', 'r') as f:
for line in f:
fra_fon__fra_lines.append(line.strip())
fra_fon__fon_lines = []
with open('fon/train-parts/JW300-fra_fon.fon', 'r') as f:
for line in f:
fra_fon__fon_lines.append(line.strip())
fra_ewe__fra_lines = []
with open('ewe/train-parts/JW300-fra_ewe.fra', 'r') as f:
for line in f:
fra_ewe__fra_lines.append(line.strip())
fra_ewe__ewe_lines = []
with open('ewe/train-parts/JW300-fra_ewe.ewe', 'r') as f:
for line in f:
fra_ewe__ewe_lines.append(line.strip())
fra_fon_df = pd.DataFrame()
fra_fon_df['French'] = fra_fon__fra_lines
fra_fon_df['Target'] = fra_fon__fon_lines
print(fra_fon_df.shape) # (31962, 2)
fra_fon_df.to_csv('jw300_fra_fon.csv', index=False, encoding='utf-8')
fra_ewe_df = pd.DataFrame()
fra_ewe_df['French'] = fra_ewe__fra_lines
fra_ewe_df['Target'] = fra_ewe__ewe_lines
print(fra_ewe_df.shape) # (611204, 2)
fra_ewe_df.to_csv('jw300_fra_ewe.csv', index=False, encoding='utf-8')
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
fra_fon_df['Target_Language'] = 'Fon'
fra_ewe_df['Target_Language'] = 'Ewe'
fra_fon_df['ID'] = 'no_id'
fra_ewe_df['ID'] = 'no_id'
train_fon_df = pd.read_csv('Train_Fon.csv')
train_ewe_df = pd.read_csv('Train_Ewe.csv')
all_fon_df = pd.concat([train_fon_df, fra_fon_df])
print(all_fon_df.shape) # (85096, 4)
all_ewe_df = | pd.concat([train_ewe_df, fra_ewe_df]) | pandas.concat |
import pandas as pd
import glob
import os
import numpy as np
import time
import fastparquet
import argparse
from multiprocessing import Pool
import multiprocessing as mp
from os.path import isfile
parser = argparse.ArgumentParser(description='Program to run google compounder for a particular file and setting')
parser.add_argument('--data', type=str,
help='location of the pickle file')
# don't use this for now
parser.add_argument('--word', action='store_true',
help='Extracting context for words only?')
parser.add_argument('--output', type=str,
help='directory to save dataset in')
args = parser.parse_args()
with open('/mnt/dhr/CreateChallenge_ICC_0821/no_ner_0_50000.txt','r') as f:
contexts=f.read().split("\n")
contexts=contexts[:-1]
def left_side_parser(df): # N N _ _ _
cur_df=df.copy()
try:
cur_df[['modifier','head','w1','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid1_parser(df): # _ N N _ _
cur_df=df.copy()
try:
cur_df[['w1','modifier','head','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid2_parser(df): # _ _ N N _
cur_df=df.copy()
try:
cur_df[['w1','w2','modifier','head','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def right_side_parser(df): # _ _ _ N N
cur_df=df.copy()
try:
cur_df[['w1','w2','w3','modifier','head']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def syntactic_reducer(df):
pattern=df.iloc[0].comp_class
if pattern==1: # N N _ _ N N
compound_left_df,modifier_left_df,head_left_df=left_side_parser(df)
compound_right_df,modifier_right_df,head_right_df=right_side_parser(df)
final_compound_df=pd.concat([compound_left_df,compound_right_df],ignore_index=True)
final_modifier_df=pd.concat([modifier_left_df,modifier_right_df],ignore_index=True)
final_head_df=pd.concat([head_left_df,head_right_df],ignore_index=True)
elif pattern==2: # N N _ _ _
final_compound_df,final_modifier_df,final_head_df=left_side_parser(df)
elif pattern==3: # _ N N _ _
final_compound_df,final_modifier_df,final_head_df=mid1_parser(df)
elif pattern==4: # _ _ N N _
final_compound_df,final_modifier_df,final_head_df=mid2_parser(df)
elif pattern==5: # _ _ _ N N
final_compound_df,final_modifier_df,final_head_df=right_side_parser(df)
return final_compound_df,final_modifier_df,final_head_df
def compound_extracter(df):
if df.loc[df.comp_class==1].shape[0]!=0:
sides_comp_df,sides_mod_df,sides_head_df=syntactic_reducer(df.loc[df.comp_class==1])
else:
sides_comp_df=pd.DataFrame()
sides_mod_df=pd.DataFrame()
sides_head_df=pd.DataFrame()
if df.loc[df.comp_class==2].shape[0]!=0:
left_comp_df,left_mod_df,left_head_df=syntactic_reducer(df.loc[df.comp_class==2])
else:
left_comp_df=pd.DataFrame()
left_mod_df=pd.DataFrame()
left_head_df=pd.DataFrame()
if df.loc[df.comp_class==3].shape[0]!=0:
mid1_comp_df,mid1_mod_df,mid1_head_df=syntactic_reducer(df.loc[df.comp_class==3])
else:
mid1_comp_df=pd.DataFrame()
mid1_mod_df=pd.DataFrame()
mid1_head_df=pd.DataFrame()
if df.loc[df.comp_class==4].shape[0]!=0:
mid2_comp_df,mid2_mod_df,mid2_head_df=syntactic_reducer(df.loc[df.comp_class==4])
else:
mid2_comp_df=pd.DataFrame()
mid2_mod_df=pd.DataFrame()
mid2_head_df=pd.DataFrame()
if df.loc[df.comp_class==5].shape[0]!=0:
right_comp_df,right_mod_df,right_head_df=syntactic_reducer(df.loc[df.comp_class==5])
else:
right_comp_df=pd.DataFrame()
right_mod_df=pd.DataFrame()
right_head_df=pd.DataFrame()
compounds=pd.concat([sides_comp_df,left_comp_df,mid1_comp_df,mid2_comp_df,right_comp_df],ignore_index=True,sort=False)
modifiers=pd.concat([sides_mod_df,left_mod_df,mid1_mod_df,mid2_mod_df,right_mod_df],ignore_index=True,sort=False)
heads=pd.concat([sides_head_df,left_head_df,mid1_head_df,mid2_head_df,right_head_df],ignore_index=True,sort=False)
if len(compounds)==0:
return compounds,modifiers,heads
compounds.dropna(inplace=True)
compounds=compounds.groupby(['modifier','head','context','year'])['count'].sum().to_frame()
compounds.reset_index(inplace=True)
modifiers.dropna(inplace=True)
modifiers=modifiers.groupby(['modifier','context','year'])['count'].sum().to_frame()
modifiers.reset_index(inplace=True)
heads.dropna(inplace=True)
heads=heads.groupby(['head','context','year'])['count'].sum().to_frame()
heads.reset_index(inplace=True)
return compounds,modifiers,heads
def parallelize_dataframe(df):
num_partitions=round(0.95*mp.cpu_count())
df_split = np.array_split(df, num_partitions)
print("Done splitting the datasets")
pool = Pool(num_partitions)
cur_time=time.time()
print("Starting parallelizing")
if not args.word:
results=pool.map_async(compound_extracter,df_split)
pool.close()
pool.join()
results=results.get()
print("Done parallelizing")
print("Total time taken",round(time.time()-cur_time),"secs")
compound_list = [ result[0] for result in results]
compounds=pd.concat(compound_list,ignore_index=True)
compounds=compounds.groupby(['modifier','head','context','year'])['count'].sum().to_frame()
compounds.reset_index(inplace=True)
if not isfile(f'{args.output}/compounds.csv'):
compounds.to_csv(f'{args.output}/compounds.csv',sep="\t",index=False)
else:
compounds.to_csv(f'{args.output}/compounds.csv', mode='a',sep="\t", header=False,index=False)
modifier_list = [ result[1] for result in results]
modifiers=pd.concat(modifier_list,ignore_index=True)
modifiers=modifiers.groupby(['modifier','context','year'])['count'].sum().to_frame()
modifiers.reset_index(inplace=True)
if not isfile(f'{args.output}/modifiers.csv'):
modifiers.to_csv(f'{args.output}/modifiers.csv',sep="\t",index=False)
else:
modifiers.to_csv(f'{args.output}/modifiers.csv', mode='a',sep="\t",header=False,index=False)
head_list = [ result[2] for result in results]
heads= | pd.concat(head_list,ignore_index=True) | pandas.concat |
' Packages '
from __future__ import (absolute_import, division, print_function)
#from __future__ import division
import os
import sys
import numpy as np
import pandas as pd
import os
import networkx as nx
import multiprocessing as mp
import matplotlib.pyplot as plt
from gurobipy import *
import pickle
import pandas as pd
import numpy as np
import datetime
import csv
#Read dbf files as dataframes with pandas
def dbf2DF(dbfile, upper=True): #Reads in DBF files and returns Pandas DF
db = ps.open(dbfile) #Pysal to open DBF
d = {col: db.by_col(col) for col in db.header} #Convert dbf to dictionary
#pandasDF = pd.DataFrame(db[:]) #Convert to Pandas DF
pandasDF = pd.DataFrame(d) #Convert to Pandas DF
# if upper == True: #Make columns uppercase if wanted
# pandasDF.columns = map(str.upper, db.header)
db.close()
return pandasDF
#Filtering by TMC
def filter_tmc(df,tmc_list,confidence_score_min,c_value_min):
df = df[df.tmc_code.isin(tmc_list)]
#df = df[df.confidence_score >= confidence_score_min]
df2 = df[df.cvalue >= c_value_min]
return df2
#Filtering between specific dates
def filter_dates(df,start_date,end_date):
df = df[df.index >= start_date]
df = df[df.index <= end_date]
df_filter_data = df
return df_filter_data
#Filerting between specific times
def filter_time(df,start_time,end_time):
df['measurement_tstamp']=pd.to_datetime(df['measurement_tstamp'], format='%Y-%m-%d %H:%M:%S')
df= df.set_index('measurement_tstamp')
df = df.between_time(start_time, end_time, include_start=True, include_end=True)
df_filter_time = df
return df_filter_time
#Redefining percentile function to use it on a pandas groupby
def percentile(n):
def percentile_(x):
return np.nanpercentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
#Green Shield model
#def greenshield(speed,capacity,free_flow_speed):
# x = 4 * capacity * (np.true_divide(speed,free_flow_speed)-(np.true_divide(speed,free_flow_speed)**2))
# return x
def greenshield(speed,capacity,free_flow_speed):
if speed > free_flow_speed or capacity < 0:
return 0
x = 4 * capacity * speed / free_flow_speed - 4 * capacity * (speed ** 2) / (free_flow_speed ** 2)
return x
#Greenshield, density
# v = v_min + (v_max - v_min)(1-den/max_den)
# -> den = ((v - v_min)/(v_max - v_min) -1) * - max_den
def greenshield_density(speed, capacity, free_flow_speed, length, num_lanes):
v_min = 1
v_max = free_flow_speed
#max_den = length/0.00298295 *num_lanes #average size of a car
max_den = 2*capacity
den = []
for v in speed:
if v > v_max:
v = v_max
if v < v_min:
v = v_min
den.append(((v - v_min)/(v_max - v_min) - 1) * - max_den)
return den
# Plot shapefiles
def plot_shp(shp_obj):
import shapefile as shp
import matplotlib.pyplot as plt
sf = shp_obj
plt.figure()
for shape in sf.shapeRecords():
x = [i[0] for i in shape.shape.points[:]]
y = [i[1] for i in shape.shape.points[:]]
plt.plot(x,y)
plt.show()
# Set differences between lists
def diff(first, second):
second = set(second)
return [item for item in first if item not in second]
def random_color():
import random
r = lambda: random.randint(0,255)
return ('#%02X%02X%02X' % (r(),r(),r()))
def tmc_to_links(G):
import networkx as nx
from random import choice
import pandas as pd
import numpy as np
df = | pd.DataFrame(columns=['link','tmc','roadnumb']) | pandas.DataFrame |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = | pandas.DataFrame(frame_data) | pandas.DataFrame |
#Let's start with importing necessary libraries
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge,Lasso,RidgeCV, LassoCV, ElasticNet, ElasticNetCV, LogisticRegression
from sklearn.model_selection import train_test_split
#from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, roc_auc_score
import matplotlib.pyplot as plt
import seaborn as sns
import scikitplot as skl
import sklearn
sns.set()
data = pd.read_csv("diabetes.csv") # Reading the Data
#Replacing the zero-values for Blood Pressure
df1 = data.loc[data['Outcome'] == 1]
df2 = data.loc[data['Outcome'] == 0]
df1 = df1.replace({'BloodPressure':0}, np.median(df1['BloodPressure']))
df2 = df2.replace({'BloodPressure':0}, np.median(df2['BloodPressure']))
dataframe = [df1, df2]
data = | pd.concat(dataframe) | pandas.concat |
import os
import pandas as pd
def loadData(folder_path: str, date: str, start_time: str='9:30',
end_time: str='16:00') -> pd.DataFrame:
"""Function to load complete price data for a given asset, from a given
folder. This function loads all '*.csv' files from a given directory
corresponding to instruments on a specific asset. Given a date, this
returns a formatted DataFrame with 1 minute intervals from a start
time to end time of the day, with each of the aligned prices in columns
corresponding to the files they were sourced from. This function assumes
dates and times are in the first column of the CSV file (headers 'Dates'),
and that the prices are in the second column. The corresponding column in
the final DataFrame is the name of the file it was read from. This function
also forward and backward propagates prices from the last/first viable
value if one is not available for a given minute.
Arguments:
folder_path {str} -- Path from which CSV files are to be ingested.
date {str} -- Date the data was collected. This is encoded in the index
of the DataFrame (format: yyyy-mm-dd).
Keyword Arguments:
start_time {str} -- Start time (military time) (default: {'9:30'}).
end_time {str} -- End time (military time) (default: {'16:00'}).
Returns:
pd.DataFrame -- Formatted DataFrame with aligned prices.
"""
file_list = os.listdir(folder_path) # Getting files
# Removing non-CSV files from list (Assume one '.' in file name)
file_list = [x for x in file_list if x.split('.')[1] == 'csv']
# Defining full start and end time
start = date + ' ' + start_time
end = date + ' ' + end_time
# Building DataFrame with correct index
data_index = | pd.DatetimeIndex(start=start, end=end, freq='1min') | pandas.DatetimeIndex |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
| Timestamp('2008-10-23 05:53:06') | pandas.Timestamp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 17:28:04 2020
@author: shlomi
"""
from PW_paths import work_yuval
from matplotlib import rcParams
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from PW_paths import savefig_path
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from PW_stations import produce_geo_gnss_solved_stations
tela_results_path = work_yuval / 'GNSS_stations/tela/rinex/30hr/results'
tela_solutions = work_yuval / 'GNSS_stations/tela/gipsyx_solutions'
sound_path = work_yuval / 'sounding'
phys_soundings = sound_path / 'bet_dagan_phys_sounding_2007-2019.nc'
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
dem_path = work_yuval / 'AW3D30'
era5_path = work_yuval / 'ERA5'
hydro_path = work_yuval / 'hydro'
ceil_path = work_yuval / 'ceilometers'
aero_path = work_yuval / 'AERONET'
climate_path = work_yuval / 'climate'
df_gnss = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
st_order_climate = [x for x in df_gnss.dropna().sort_values(
['groups_climate', 'lat', 'lon'], ascending=[1, 0, 0]).index]
rc = {
'font.family': 'serif',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large'}
for key, val in rc.items():
rcParams[key] = val
# sns.set(rc=rc, style='white')
seasonal_colors = {'DJF': 'tab:blue',
'SON': 'tab:red',
'JJA': 'tab:green',
'MAM': 'tab:orange',
'Annual': 'tab:purple'}
def get_twin(ax, axis):
assert axis in ("x", "y")
siblings = getattr(ax, f"get_shared_{axis}_axes")().get_siblings(ax)
for sibling in siblings:
if sibling.bbox.bounds == ax.bbox.bounds and sibling is not ax:
return sibling
return None
def sci_notation(num, decimal_digits=1, precision=None, exponent=None):
"""
Returns a string representation of the scientific
notation of the given number formatted for use with
LaTeX or Mathtext, with specified number of significant
decimal digits and precision (number of decimal digits
to show). The exponent to be used can also be specified
explicitly.
"""
from math import floor, log10
if exponent is None:
exponent = int(floor(log10(abs(num))))
coeff = round(num / float(10**exponent), decimal_digits)
if precision is None:
precision = decimal_digits
return r"${0:.{2}f}\cdot10^{{{1:d}}}$".format(coeff, exponent, precision)
def utm_from_lon(lon):
"""
utm_from_lon - UTM zone for a longitude
Not right for some polar regions (Norway, Svalbard, Antartica)
:param float lon: longitude
:return: UTM zone number
:rtype: int
"""
from math import floor
return floor((lon + 180) / 6) + 1
def scale_bar(ax, proj, length, location=(0.5, 0.05), linewidth=3,
units='km', m_per_unit=1000, bounds=None):
"""
http://stackoverflow.com/a/35705477/1072212
ax is the axes to draw the scalebar on.
proj is the projection the axes are in
location is center of the scalebar in axis coordinates ie. 0.5 is the middle of the plot
length is the length of the scalebar in km.
linewidth is the thickness of the scalebar.
units is the name of the unit
m_per_unit is the number of meters in a unit
"""
import cartopy.crs as ccrs
from matplotlib import patheffects
# find lat/lon center to find best UTM zone
try:
x0, x1, y0, y1 = ax.get_extent(proj.as_geodetic())
except AttributeError:
if bounds is not None:
x0, x1, y0, y1 = bounds
# Projection in metres
utm = ccrs.UTM(utm_from_lon((x0+x1)/2))
# Get the extent of the plotted area in coordinates in metres
x0, x1, y0, y1 = ax.get_extent(utm)
# Turn the specified scalebar location into coordinates in metres
sbcx, sbcy = x0 + (x1 - x0) * location[0], y0 + (y1 - y0) * location[1]
# Generate the x coordinate for the ends of the scalebar
bar_xs = [sbcx - length * m_per_unit/2, sbcx + length * m_per_unit/2]
# buffer for scalebar
buffer = [patheffects.withStroke(linewidth=5, foreground="w")]
# Plot the scalebar with buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, path_effects=buffer)
# buffer for text
buffer = [patheffects.withStroke(linewidth=3, foreground="w")]
# Plot the scalebar label
t0 = ax.text(sbcx, sbcy, str(length) + ' ' + units, transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
left = x0+(x1-x0)*0.05
# Plot the N arrow
t1 = ax.text(left, sbcy, u'\u25B2\nN', transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
# Plot the scalebar without buffer, in case covered by text buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, zorder=3)
return
@ticker.FuncFormatter
def lon_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$W'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$E'.format(abs(x))
elif x == 0:
return r'0$\degree$'
@ticker.FuncFormatter
def lat_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$S'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$N'.format(abs(x))
elif x == 0:
return r'0$\degree$'
def align_yaxis_np(ax1, ax2):
"""Align zeros of the two axes, zooming them out by same ratio"""
import numpy as np
axes = np.array([ax1, ax2])
extrema = np.array([ax.get_ylim() for ax in axes])
tops = extrema[:,1] / (extrema[:,1] - extrema[:,0])
# Ensure that plots (intervals) are ordered bottom to top:
if tops[0] > tops[1]:
axes, extrema, tops = [a[::-1] for a in (axes, extrema, tops)]
# How much would the plot overflow if we kept current zoom levels?
tot_span = tops[1] + 1 - tops[0]
extrema[0,1] = extrema[0,0] + tot_span * (extrema[0,1] - extrema[0,0])
extrema[1,0] = extrema[1,1] + tot_span * (extrema[1,0] - extrema[1,1])
[axes[i].set_ylim(*extrema[i]) for i in range(2)]
# def align_yaxis(ax1, v1, ax2, v2):
# """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# _, y1 = ax1.transData.transform((0, v1))
# _, y2 = ax2.transData.transform((0, v2))
# inv = ax2.transData.inverted()
# _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
# miny, maxy = ax2.get_ylim()
# ax2.set_ylim(miny+dy, maxy+dy)
def get_legend_labels_handles_title_seaborn_histplot(ax):
old_legend = ax.legend_
handles = old_legend.legendHandles
labels = [t.get_text() for t in old_legend.get_texts()]
title = old_legend.get_title().get_text()
return handles, labels, title
def alignYaxes(axes, align_values=None):
'''Align the ticks of multiple y axes
Args:
axes (list): list of axes objects whose yaxis ticks are to be aligned.
Keyword Args:
align_values (None or list/tuple): if not None, should be a list/tuple
of floats with same length as <axes>. Values in <align_values>
define where the corresponding axes should be aligned up. E.g.
[0, 100, -22.5] means the 0 in axes[0], 100 in axes[1] and -22.5
in axes[2] would be aligned up. If None, align (approximately)
the lowest ticks in all axes.
Returns:
new_ticks (list): a list of new ticks for each axis in <axes>.
A new sets of ticks are computed for each axis in <axes> but with equal
length.
'''
from matplotlib.pyplot import MaxNLocator
import numpy as np
nax = len(axes)
ticks = [aii.get_yticks() for aii in axes]
if align_values is None:
aligns = [ticks[ii][0] for ii in range(nax)]
else:
if len(align_values) != nax:
raise Exception(
"Length of <axes> doesn't equal that of <align_values>.")
aligns = align_values
bounds = [aii.get_ylim() for aii in axes]
# align at some points
ticks_align = [ticks[ii]-aligns[ii] for ii in range(nax)]
# scale the range to 1-100
ranges = [tii[-1]-tii[0] for tii in ticks]
lgs = [-np.log10(rii)+2. for rii in ranges]
igs = [np.floor(ii) for ii in lgs]
log_ticks = [ticks_align[ii]*(10.**igs[ii]) for ii in range(nax)]
# put all axes ticks into a single array, then compute new ticks for all
comb_ticks = np.concatenate(log_ticks)
comb_ticks.sort()
locator = MaxNLocator(nbins='auto', steps=[1, 2, 2.5, 3, 4, 5, 8, 10])
new_ticks = locator.tick_values(comb_ticks[0], comb_ticks[-1])
new_ticks = [new_ticks/10.**igs[ii] for ii in range(nax)]
new_ticks = [new_ticks[ii]+aligns[ii] for ii in range(nax)]
# find the lower bound
idx_l = 0
for i in range(len(new_ticks[0])):
if any([new_ticks[jj][i] > bounds[jj][0] for jj in range(nax)]):
idx_l = i-1
break
# find the upper bound
idx_r = 0
for i in range(len(new_ticks[0])):
if all([new_ticks[jj][i] > bounds[jj][1] for jj in range(nax)]):
idx_r = i
break
# trim tick lists by bounds
new_ticks = [tii[idx_l:idx_r+1] for tii in new_ticks]
# set ticks for each axis
for axii, tii in zip(axes, new_ticks):
axii.set_yticks(tii)
return new_ticks
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
adjust_yaxis(ax2, (y1 - y2) / 2, v2)
adjust_yaxis(ax1, (y2 - y1) / 2, v1)
def adjust_yaxis(ax, ydif, v):
"""shift axis ax by ydiff, maintaining point v at the same location"""
inv = ax.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - v, maxy - v
if -miny > maxy or (-miny == maxy and dy > 0):
nminy = miny
nmaxy = miny * (maxy + dy) / (miny + dy)
else:
nmaxy = maxy
nminy = maxy * (miny + dy) / (maxy + dy)
ax.set_ylim(nminy + v, nmaxy + v)
def qualitative_cmap(n=2):
import matplotlib.colors as mcolors
if n == 2:
colorsList = [mcolors.BASE_COLORS['r'], mcolors.BASE_COLORS['g']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 4:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 5:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m'],
mcolors.BASE_COLORS['b']]
cmap = mcolors.ListedColormap(colorsList)
return cmap
def caption(text, color='blue', **kwargs):
from termcolor import colored
print(colored('Caption:', color, attrs=['bold'], **kwargs))
print(colored(text, color, attrs=['bold'], **kwargs))
return
def adjust_lightness(color, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
def produce_colors_for_pwv_station(scope='annual', zebra=False,
as_dict=False, as_cat_dict=False):
import pandas as pd
stns = group_sites_to_xarray(scope=scope)
cdict = {'coastal': 'tab:blue',
'highland': 'tab:green',
'eastern': 'tab:orange'}
if as_cat_dict:
return cdict
# for grp, color in cdict.copy().items():
# cdict[grp] = to_rgba(get_named_colors_mapping()[
# color], alpha=1)
ds = stns.to_dataset('group')
colors = []
for group in ds:
sts = ds[group].dropna('GNSS').values
for i, st in enumerate(sts):
color = cdict.get(group)
if zebra:
if i % 2 != 0:
# rgba = np.array(rgba)
# rgba[-1] = 0.5
color = adjust_lightness(color, 0.5)
colors.append(color)
# colors = [item for sublist in colors for item in sublist]
stns = stns.T.values.ravel()
stns = stns[~pd.isnull(stns)]
if as_dict:
colors = dict(zip(stns, colors))
return colors
def fix_time_axis_ticks(ax, limits=None, margin=15):
import pandas as pd
import matplotlib.dates as mdates
if limits is not None:
ax.set_xlim(*pd.to_datetime(limits))
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(mdates.MonthLocator())
# locator = mdates.AutoDateLocator(minticks=3, maxticks=7)
# formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
return ax
def plot_qflux_climatotlogy_israel(path=era5_path, save=True, reduce='mean',
plot_type='uv'):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
ds = xr.load_dataset(path / 'ERA5_UVQ_mm_israel_1979-2020.nc')
ds = ds.sel(expver=1).reset_coords(drop=True)
if plot_type == 'uv':
f1 = ds['q'] * ds['u']
f2 = ds['q'] * ds['v']
elif plot_type == 'md':
qu = ds['q'] * ds['u']
qv = ds['q'] * ds['v']
f1 = np.sqrt(qu**2 + qv**2)
f2 = np.rad2deg(np.arctan2(qv, qu))
if reduce == 'mean':
f1_clim = f1.groupby('time.month').mean().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').mean().mean(
'longitude').mean('latitude')
center = 0
cmap = 'bwr'
elif reduce == 'std':
f1_clim = f1.groupby('time.month').std().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').std().mean(
'longitude').mean('latitude')
center = None
cmap = 'viridis'
ds_clim = xr.concat([f1_clim, f2_clim], 'direction')
ds_clim['direction'] = ['zonal', 'meridional']
if plot_type == 'md':
fg, axes = plt.subplots(1, 2, figsize=(14, 7))
f1_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[0])
f2_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[1])
else:
fg = ds_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(
levels=41,
yincrease=False,
cmap=cmap,
center=center,
col='direction',
figsize=(
15,
6))
fg.fig.suptitle('Moisture flux climatology over Israel')
# fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# qu_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[0], cmap='bwr', center=0)
# qv_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[1], cmap='bwr', center=0)
fg.fig.subplots_adjust(top=0.923,
bottom=0.102,
left=0.058,
right=0.818,
hspace=0.2,
wspace=0.045)
if save:
filename = 'moisture_clim_from_ERA5_over_israel.png'
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fg
def plot_mean_std_count(da_ts, time_reduce='hour', reduce='mean',
count_factor=1):
import xarray as xr
import seaborn as sns
"""plot mean, std and count of Xarray dataarray time-series"""
cmap = sns.color_palette("colorblind", 2)
time_dim = list(set(da_ts.dims))[0]
grp = '{}.{}'.format(time_dim, time_reduce)
if reduce == 'mean':
mean = da_ts.groupby(grp).mean()
elif reduce == 'median':
mean = da_ts.groupby(grp).median()
std = da_ts.groupby(grp).std()
mean_plus_std = mean + std
mean_minus_std = mean - std
count = da_ts.groupby(grp).count()
if isinstance(da_ts, xr.Dataset):
dvars = [x for x in da_ts.data_vars.keys()]
assert len(dvars) == 2
secondary_y = dvars[1]
else:
secondary_y = None
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 15))
mean_df = mean.to_dataframe()
if secondary_y is not None:
axes[0] = mean_df[dvars[0]].plot(
ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
ax2mean = mean_df[secondary_y].plot(
ax=axes[0],
linewidth=2.0,
marker='s',
color=cmap[1],
secondary_y=True)
h1, l1 = axes[0].get_legend_handles_labels()
h2, l2 = axes[0].right_ax.get_legend_handles_labels()
handles = h1 + h2
labels = l1 + l2
axes[0].legend(handles, labels)
axes[0].fill_between(mean_df.index.values,
mean_minus_std[dvars[0]].values,
mean_plus_std[dvars[0]].values,
color=cmap[0],
alpha=0.5)
ax2mean.fill_between(
mean_df.index.values,
mean_minus_std[secondary_y].values,
mean_plus_std[secondary_y].values,
color=cmap[1],
alpha=0.5)
ax2mean.tick_params(axis='y', colors=cmap[1])
else:
mean_df.plot(ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
axes[0].fill_between(
mean_df.index.values,
mean_minus_std.values,
mean_plus_std.values,
color=cmap[0],
alpha=0.5)
axes[0].grid()
count_df = count.to_dataframe() / count_factor
count_df.plot.bar(ax=axes[1], rot=0)
axes[0].xaxis.set_tick_params(labelbottom=True)
axes[0].tick_params(axis='y', colors=cmap[0])
fig.tight_layout()
if secondary_y is not None:
return axes, ax2mean
else:
return axes
def plot_seasonal_histogram(da, dim='sound_time', xlim=None, xlabel=None,
suptitle=''):
fig_hist, axs = plt.subplots(2, 2, sharex=False, sharey=True,
figsize=(10, 8))
seasons = ['DJF', 'MAM', 'JJA', 'SON']
cmap = sns.color_palette("colorblind", 4)
for i, ax in enumerate(axs.flatten()):
da_season = da.sel(
{dim: da['{}.season'.format(dim)] == seasons[i]}).dropna(dim)
ax = sns.distplot(da_season, ax=ax, norm_hist=False,
color=cmap[i], hist_kws={'edgecolor': 'k'},
axlabel=xlabel,
label=seasons[i])
ax.set_xlim(xlim)
ax.legend()
# axes.set_xlabel('MLH [m]')
ax.set_ylabel('Frequency')
fig_hist.suptitle(suptitle)
fig_hist.tight_layout()
return axs
def plot_two_histograms_comparison(x, y, bins=None, labels=['x', 'y'],
ax=None, colors=['b', 'r']):
import numpy as np
import matplotlib.pyplot as plt
x_w = np.empty(x.shape)
x_w.fill(1/x.shape[0])
y_w = np.empty(y.shape)
y_w.fill(1/y.shape[0])
if ax is None:
fig, ax = plt.subplots()
ax.hist([x, y], bins=bins, weights=[x_w, y_w], color=colors,
label=labels)
ax.legend()
return ax
def plot_diurnal_wind_hodograph(path=ims_path, station='TEL-AVIV-COAST',
season=None, cmax=None, ax=None):
import xarray as xr
from metpy.plots import Hodograph
# import matplotlib
import numpy as np
colorbar = False
# from_list = matplotlib.colors.LinearSegmentedColormap.from_list
cmap = plt.cm.get_cmap('hsv', 24)
# cmap = from_list(None, plt.cm.jet(range(0,24)), 24)
U = xr.open_dataset(path / 'IMS_U_israeli_10mins.nc')
V = xr.open_dataset(path / 'IMS_V_israeli_10mins.nc')
u_sta = U[station]
v_sta = V[station]
u_sta.load()
v_sta.load()
if season is not None:
print('{} season selected'.format(season))
u_sta = u_sta.sel(time=u_sta['time.season'] == season)
v_sta = v_sta.sel(time=v_sta['time.season'] == season)
u = u_sta.groupby('time.hour').mean()
v = v_sta.groupby('time.hour').mean()
if ax is None:
colorbar = True
fig, ax = plt.subplots()
max_uv = max(max(u.values), max(v.values)) + 1
if cmax is None:
max_uv = max(max(u.values), max(v.values)) + 1
else:
max_uv = cmax
h = Hodograph(component_range=max_uv, ax=ax)
h.add_grid(increment=0.5)
# hours = np.arange(0, 25)
lc = h.plot_colormapped(u, v, u.hour, cmap=cmap,
linestyle='-', linewidth=2)
#ticks = np.arange(np.min(hours), np.max(hours))
# cb = fig.colorbar(lc, ticks=range(0,24), label='Time of Day [UTC]')
if colorbar:
cb = ax.figure.colorbar(lc, ticks=range(
0, 24), label='Time of Day [UTC]')
# cb.ax.tick_params(length=0)
if season is None:
ax.figure.suptitle('{} diurnal wind Hodograph'.format(station))
else:
ax.figure.suptitle(
'{} diurnal wind Hodograph {}'.format(station, season))
ax.set_xlabel('North')
ax.set_ylabel('East')
ax.set_title('South')
ax2 = ax.twinx()
ax2.tick_params(axis='y', right=False, labelright=False)
ax2.set_ylabel('West')
# axcb = fig.colorbar(lc)
return ax
def plot_MLR_GNSS_PW_harmonics_facetgrid(path=work_yuval, season='JJA',
n_max=2, ylim=None, scope='diurnal',
save=True, era5=False, leg_size=15):
"""
Parameters
----------
path : TYPE, optional
DESCRIPTION. The default is work_yuval.
season : TYPE, optional
DESCRIPTION. The default is 'JJA'.
n_max : TYPE, optional
DESCRIPTION. The default is 2.
ylim : TYPE, optional
the ylimits of each panel use [-6,8] for annual. The default is None.
scope : TYPE, optional
DESCRIPTION. The default is 'diurnal'.
save : TYPE, optional
DESCRIPTION. The default is True.
era5 : TYPE, optional
DESCRIPTION. The default is False.
leg_size : TYPE, optional
DESCRIPTION. The default is 15.
Returns
-------
None.
"""
import xarray as xr
from aux_gps import run_MLR_harmonics
from matplotlib.ticker import AutoMinorLocator
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
sns.set_style('whitegrid')
sns.set_style('ticks')
geo = produce_geo_gnss_solved_stations(add_distance_to_coast=True, plot=False)
if scope == 'diurnal':
cunits = 'cpd'
ticks = np.arange(0, 23, 3)
xlabel = 'Hour of day [UTC]'
elif scope == 'annual':
cunits = 'cpy'
ticks = np.arange(1, 13, 1)
xlabel = 'month'
print('producing {} harmonics plot.'.format(scope))
if era5:
harmonics = xr.load_dataset(path / 'GNSS_PW_era5_harmonics_{}.nc'.format(scope))
else:
harmonics = xr.load_dataset(path / 'GNSS_PW_harmonics_{}.nc'.format(scope))
# sites = sorted(list(set([x.split('_')[0] for x in harmonics])))
# da = xr.DataArray([x for x in range(len(sites))], dims='GNSS')
# da['GNSS'] = sites
sites = group_sites_to_xarray(upper=False, scope=scope)
sites_flat = [x for x in sites.values.flatten()]
da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
da['GNSS'] = [x for x in range(len(da))]
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
site = sites.values[i, j]
ax = fg.axes[i, j]
try:
harm_site = harmonics[[x for x in harmonics if site in x]]
if site in ['nrif']:
leg_loc = 'upper center'
elif site in ['yrcm', 'ramo']:
leg_loc = 'lower center'
# elif site in ['katz']:
# leg_loc = 'upper right'
else:
leg_loc = None
if scope == 'annual':
leg_loc = 'upper left'
ax, handles, labels = run_MLR_harmonics(harm_site, season=season,
cunits=cunits,
n_max=n_max, plot=True, ax=ax,
legend_loc=leg_loc, ncol=1,
legsize=leg_size, lw=2.5,
legend_S_only=True)
ax.set_xlabel(xlabel, fontsize=16)
if ylim is not None:
ax.set_ylim(*ylim)
ax.tick_params(axis='x', which='major', labelsize=18)
# if scope == 'diurnal':
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(axis='y', which='major', labelsize=18)
ax.yaxis.tick_left()
ax.xaxis.set_ticks(ticks)
ax.grid()
ax.set_title('')
ax.set_ylabel('')
ax.grid(axis='y', which='minor', linestyle='--')
# get this for upper legend:
# handles, labels = ax.get_legend_handles_labels()
if scope == 'annual':
site_label = '{} ({:.0f})'.format(
site.upper(), geo.loc[site].alt)
label_coord = [0.52, 0.87]
fs = 18
elif scope == 'diurnal':
site_label = site.upper()
label_coord = [0.1, 0.85]
fs = 20
ax.text(*label_coord, site_label,
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes, fontsize=fs)
if j == 0:
ax.set_ylabel('PWV anomalies [mm]', fontsize=16)
# if j == 0:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# elif j == 1:
# if i>5:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
except TypeError:
print('{}, {} axis off'.format(i, j))
ax.set_axis_off()
# for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
# harm_site = harmonics[[x for x in harmonics if sites[i] in x]]
# if site in ['elat', 'nrif']:
# loc = 'upper center'
# text = 0.1
# elif site in ['elro', 'yrcm', 'ramo', 'slom', 'jslm']:
# loc = 'upper right'
# text = 0.1
# else:
# loc = None
# text = 0.1
# ax = run_MLR_diurnal_harmonics(harm_site, season=season, n_max=n_max, plot=True, ax=ax, legend_loc=loc)
# ax.set_title('')
# ax.set_ylabel('PW anomalies [mm]')
# if ylim is not None:
# ax.set_ylim(ylim[0], ylim[1])
# ax.text(text, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
# for i, ax in enumerate(fg.axes.flatten()):
# if i > (da.GNSS.telasize-1):
# ax.set_axis_off()
# pass
# add upper legend for all factes:
S_labels = labels[:-2]
S_labels = [x.split(' ')[0] for x in S_labels]
last_label = 'Mean PWV anomalies'
sum_label = labels[-2].split("'")[1]
S_labels.append(sum_label)
S_labels.append(last_label)
fg.fig.legend(handles=handles, labels=S_labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.032,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
if save:
if era5:
filename = 'pw_era5_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
else:
filename = 'pw_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_gustiness(path=work_yuval, ims_path=ims_path, site='tela',
ims_site='HAIFA-TECHNION', season='JJA', month=None, pts=7,
ax=None):
import xarray as xr
import numpy as np
g = xr.open_dataset(
ims_path / 'IMS_G{}_israeli_10mins_daily_anoms.nc'.format(pts))[ims_site]
g.load()
if season is not None:
g = g.sel(time=g['time.season'] == season)
label = 'Gustiness {} IMS station in {} season'.format(
site, season)
elif month is not None:
g = g.sel(time=g['time.month'] == month)
label = 'Gustiness {} IMS station in {} month'.format(
site, month)
elif season is not None and month is not None:
raise('pls pick either season or month...')
# date = groupby_date_xr(g)
# # g_anoms = g.groupby('time.month') - g.groupby('time.month').mean('time')
# g_anoms = g.groupby(date) - g.groupby(date).mean('time')
# g_anoms = g_anoms.reset_coords(drop=True)
G = g.groupby('time.hour').mean('time') * 100.0
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
Gline = G.plot(ax=ax, color='b', marker='o', label='Gustiness')
ax.set_title(label)
ax.axhline(0, color='b', linestyle='--')
ax.set_ylabel('Gustiness anomalies [dimensionless]', color='b')
ax.set_xlabel('Time of day [UTC]')
# ax.set_xticks(np.arange(0, 24, step=1))
ax.yaxis.label.set_color('b')
ax.tick_params(axis='y', colors='b')
ax.xaxis.set_ticks(np.arange(0, 23, 3))
ax.grid()
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_hourly_anoms_thresh_50_homogenized.nc')[site]
pw.load().dropna('time')
if season is not None:
pw = pw.sel(time=pw['time.season'] == season)
elif month is not None:
pw = pw.sel(time=pw['time.month'] == month)
# date = groupby_date_xr(pw)
# pw = pw.groupby(date) - pw.groupby(date).mean('time')
# pw = pw.reset_coords(drop=True)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
PWline = pw.plot.line(ax=axpw, color='tab:green',
marker='s', label='PW ({})'.format(season))
axpw.axhline(0, color='k', linestyle='--')
lns = Gline + PWline
axpw.set_ylabel('PW anomalies [mm]')
align_yaxis(ax, 0, axpw, 0)
return lns
def plot_gustiness_facetgrid(path=work_yuval, ims_path=ims_path,
season='JJA', month=None, save=True):
import xarray as xr
gnss_ims_dict = {
'alon': 'ASHQELON-PORT', 'bshm': 'HAIFA-TECHNION', 'csar': 'HADERA-PORT',
'tela': 'TEL-AVIV-COAST', 'slom': 'BESOR-FARM', 'kabr': 'SHAVE-ZIYYON',
'nzrt': 'DEIR-HANNA', 'katz': 'GAMLA', 'elro': 'MEROM-GOLAN-PICMAN',
'mrav': 'MAALE-GILBOA', 'yosh': 'ARIEL', 'jslm': 'JERUSALEM-GIVAT-RAM',
'drag': 'METZOKE-DRAGOT', 'dsea': 'SEDOM', 'ramo': 'MIZPE-RAMON-20120927',
'nrif': 'NEOT-SMADAR', 'elat': 'ELAT', 'klhv': 'SHANI',
'yrcm': 'ZOMET-HANEGEV', 'spir': 'PARAN-20060124'}
da = xr.DataArray([x for x in gnss_ims_dict.values()], dims=['GNSS'])
da['GNSS'] = [x for x in gnss_ims_dict.keys()]
to_remove = ['kabr', 'nzrt', 'katz', 'elro', 'klhv', 'yrcm', 'slom']
sites = [x for x in da['GNSS'].values if x not in to_remove]
da = da.sel(GNSS=sites)
gnss_order = ['bshm', 'mrav', 'drag', 'csar', 'yosh', 'dsea', 'tela', 'jslm',
'nrif', 'alon', 'ramo', 'elat']
df = da.to_dataframe('gnss')
da = df.reindex(gnss_order).to_xarray()['gnss']
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
lns = plot_gustiness(path=path, ims_path=ims_path,
ims_site=gnss_ims_dict[site],
site=site, season=season, month=month, ax=ax)
labs = [l.get_label() for l in lns]
if site in ['tela', 'alon', 'dsea', 'csar', 'elat', 'nrif']:
ax.legend(lns, labs, loc='upper center', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
elif site in ['drag']:
ax.legend(lns, labs, loc='upper right', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
else:
ax.legend(lns, labs, loc='best', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
ax.set_title('')
ax.set_ylabel(r'G anomalies $\times$$10^{2}$')
# ax.text(.8, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
for i, ax in enumerate(fg.axes.flatten()):
if i > (da.GNSS.size-1):
ax.set_axis_off()
pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.974,
bottom=0.053,
left=0.041,
right=0.955,
hspace=0.15,
wspace=0.3)
filename = 'gustiness_israeli_gnss_pw_diurnal_{}.png'.format(season)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_fft_diurnal(path=work_yuval, save=True):
import xarray as xr
import numpy as np
import matplotlib.ticker as tck
sns.set_style("whitegrid",
{'axes.grid': True,
'xtick.bottom': True,
'font.family': 'serif',
'ytick.left': True})
sns.set_context('paper')
power = xr.load_dataset(path / 'GNSS_PW_power_spectrum_diurnal.nc')
power = power.to_array('site')
sites = [x for x in power.site.values]
fg = power.plot.line(col='site', col_wrap=4,
sharex=False, figsize=(20, 18))
fg.set_xlabels('Frequency [cpd]')
fg.set_ylabels('PW PSD [dB]')
ticklabels = np.arange(0, 7)
for ax, site in zip(fg.axes.flatten(), sites):
sns.despine()
ax.set_title('')
ax.set_xticklabels(ticklabels)
# ax.tick_params(axis='y', which='minor')
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.set_xlim(0, 6.5)
ax.set_ylim(70, 125)
ax.grid(True)
ax.grid(which='minor', axis='y')
ax.text(.8, .85, site.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
fg.fig.tight_layout()
filename = 'power_pw_diurnal.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_rinex_availability_with_map(path=work_yuval, gis_path=gis_path,
scope='diurnal', ims=True,
dem_path=dem_path, fontsize=18, save=True):
# TODO: add box around merged stations and removed stations
# TODO: add color map labels to stations removed and merged
from aux_gps import gantt_chart
import xarray as xr
import pandas as pd
import geopandas as gpd
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
from matplotlib.colors import ListedColormap
from aux_gps import path_glob
sns.set_style('whitegrid')
sns.set_style('ticks')
print('{} scope selected.'.format(scope))
fig = plt.figure(figsize=(20, 15))
# grid = plt.GridSpec(1, 2, width_ratios=[
# 5, 2], wspace=0.1)
grid = plt.GridSpec(1, 2, width_ratios=[
5, 3], wspace=0.05)
ax_gantt = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_map = fig.add_subplot(grid[0, 1]) # plt.subplot(122)
# fig, ax = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(20, 6))
# RINEX gantt chart:
if scope == 'diurnal':
file = path_glob(path, 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')[-1]
elif scope == 'annual':
file = path / 'GNSS_PW_monthly_thresh_50.nc'
ds = xr.open_dataset(file)
just_pw = [x for x in ds if 'error' not in x]
ds = ds[just_pw]
da = ds.to_array('station').sel(time=slice(None,'2019'))
da['station'] = [x.upper() for x in da.station.values]
ds = da.to_dataset('station')
# reorder for annual, coastal, highland and eastern:
stns = group_sites_to_xarray(scope='annual', upper=True).T.values.ravel()
stns = stns[~pd.isnull(stns)]
ds = ds[stns]
# colors:
colors = produce_colors_for_pwv_station(scope=scope, zebra=False)
title = 'Daily RINEX files availability for the Israeli GNSS stations'
ax_gantt = gantt_chart(
ds,
ax=ax_gantt,
fw='bold', grid=True,
title='', colors=colors,
pe_dict=None, fontsize=fontsize, linewidth=24, antialiased=False)
years_fmt = mdates.DateFormatter('%Y')
# ax_gantt.xaxis.set_major_locator(mdates.YearLocator())
ax_gantt.xaxis.set_major_locator(mdates.YearLocator(4))
ax_gantt.xaxis.set_minor_locator(mdates.YearLocator(1))
ax_gantt.xaxis.set_major_formatter(years_fmt)
# ax_gantt.xaxis.set_minor_formatter(years_fmt)
ax_gantt.tick_params(axis='x', labelrotation=0)
# Israel gps ims map:
ax_map = plot_israel_map(
gis_path=gis_path, ax=ax_map, ticklabelsize=fontsize)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
# scale_bar(ax_map, 50)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level',
size=fontsize, weight='normal')
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
# removed = ['hrmn', 'nizn', 'spir']
# removed = ['hrmn']
if scope == 'diurnal':
removed = ['hrmn', 'gilb', 'lhav']
elif scope == 'annual':
removed = ['hrmn', 'gilb', 'lhav']
print('removing {} stations from map.'.format(removed))
# merged = ['klhv', 'lhav', 'mrav', 'gilb']
merged = []
gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.loc[gps_list, :].plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=35, facecolor="None", linewidth=2, zorder=3)
# gps.loc[removed, :].plot(ax=ax_map, color='black', edgecolor='black', marker='s',
# alpha=1.0, markersize=25, facecolor='white')
# gps.loc[merged, :].plot(ax=ax_map, color='black', edgecolor='r', marker='s',
# alpha=0.7, markersize=25)
gps_stations = gps_list # [x for x in gps.index]
# to_plot_offset = ['mrav', 'klhv', 'nzrt', 'katz', 'elro']
to_plot_offset = []
for x, y, label in zip(gps.loc[gps_stations, :].lon, gps.loc[gps_stations,
:].lat, gps.loc[gps_stations, :].index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -6),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
# geo_annotate(ax_map, gps_normal_anno.lon, gps_normal_anno.lat,
# gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# geo_annotate(ax_map, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax_map, color='black', edgecolor='black',
marker='x', linewidth=2, zorder=2)
geo_annotate(ax_map, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=fontsize - 2, colorupdown=False)
# plt.legend(['GNSS \nreceiver sites',
# 'removed \nGNSS sites',
# 'merged \nGNSS sites',
# 'radiosonde\nstation'],
# loc='upper left', framealpha=0.7, fancybox=True,
# handletextpad=0.2, handlelength=1.5)
if ims:
print('getting IMS temperature stations metadata...')
ims = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims.plot(ax=ax_map, marker='o', edgecolor='tab:orange', alpha=1.0,
markersize=35, facecolor="tab:orange", zorder=1)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
print('getting solved GNSS israeli stations metadata...')
plt.legend(['GNSS \nstations',
'radiosonde\nstation', 'IMS stations'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
else:
plt.legend(['GNSS \nstations',
'radiosonde\nstation'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
fig.subplots_adjust(top=0.95,
bottom=0.11,
left=0.05,
right=0.95,
hspace=0.2,
wspace=0.2)
# plt.legend(['IMS stations', 'GNSS stations'], loc='upper left')
filename = 'rinex_israeli_gnss_map_{}.png'.format(scope)
# caption('Daily RINEX files availability for the Israeli GNSS station network at the SOPAC/GARNER website')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_means_box_plots(path=work_yuval, thresh=50, kind='box',
x='month', col_wrap=5, ylimits=None, twin=None,
twin_attrs=None,
xlimits=None, anoms=True, bins=None,
season=None, attrs_plot=True, save=True, ds_input=None):
import xarray as xr
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
attrs = [x.attrs for x in pw.data_vars.values()]
if x == 'month':
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# pw = pw.resample(time='MS').mean('time')
elif x == 'hour':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
if twin is not None:
twin = twin.groupby('time.month') - \
twin.groupby('time.month').mean('time')
twin = twin.reset_coords(drop=True)
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
elif x == 'day':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_daily_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
pw = pw.groupby('time.dayofyear') - \
pw.groupby('time.dayodyear').mean('time')
if season is not None:
if season != 'all':
print('{} season is selected'.format(season))
pw = pw.sel(time=pw['time.season'] == season)
all_seas = False
if twin is not None:
twin = twin.sel(time=twin['time.season'] == season)
else:
print('all seasons selected')
all_seas = True
else:
all_seas = False
for i, da in enumerate(pw.data_vars):
pw[da].attrs = attrs[i]
if not attrs_plot:
attrs = None
if ds_input is not None:
# be carful!:
pw = ds_input
fg = plot_multi_box_xr(pw, kind=kind, x=x, col_wrap=col_wrap,
ylimits=ylimits, xlimits=xlimits, attrs=attrs,
bins=bins, all_seasons=all_seas, twin=twin,
twin_attrs=twin_attrs)
attrs = [x.attrs for x in pw.data_vars.values()]
for i, ax in enumerate(fg.axes.flatten()):
try:
mean_years = float(attrs[i]['mean_years'])
# print(i)
# print(mean_years)
except IndexError:
ax.set_axis_off()
pass
if kind != 'hist':
[fg.axes[x, 0].set_ylabel('PW [mm]')
for x in range(len(fg.axes[:, 0]))]
# [fg.axes[-1, x].set_xlabel('month') for x in range(len(fg.axes[-1, :]))]
fg.fig.subplots_adjust(top=0.98,
bottom=0.05,
left=0.025,
right=0.985,
hspace=0.27,
wspace=0.215)
if season is not None:
filename = 'pw_{}ly_means_{}_seas_{}.png'.format(x, kind, season)
else:
filename = 'pw_{}ly_means_{}.png'.format(x, kind)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_interannual_MLR_results(path=climate_path, fontsize=16, save=True):
import matplotlib.pyplot as plt
from climate_works import run_best_MLR
# rds = xr.load_dataset(path / 'best_MLR_interannual_gnss_pwv.nc')
model_lci, rdf_lci = run_best_MLR(plot=False, heatmap=False, keep='lci',
add_trend=True)
rds_lci = model_lci.results_
model_eofi, rdf_eofi = run_best_MLR(plot=False, heatmap=False, keep='eofi',
add_trend=False)
rds_eofi = model_eofi.results_
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 7))
origln = rds_lci['original'].plot.line('k-.', ax=axes[0], linewidth=1.5)
predln_lci = rds_lci['predict'].plot.line('b-', ax=axes[0], linewidth=1.5)
predln_eofi = rds_eofi['predict'].plot.line(
'g-', ax=axes[0], linewidth=1.5)
r2_lci = rds_lci['r2_adj'].item()
r2_eofi = rds_eofi['r2_adj'].item()
axes[0].legend(origln+predln_lci+predln_eofi, ['mean PWV (12m-mean)', 'MLR with LCI (Adj R$^2$:{:.2f})'.format(
r2_lci), 'MLR with EOFs (Adj R$^2$:{:.2f})'.format(r2_eofi)], fontsize=fontsize-2)
axes[0].grid()
axes[0].set_xlabel('')
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[0].grid(which='minor', color='k', linestyle='--')
residln_lci = rds_lci['resid'].plot.line('b-', ax=axes[1])
residln_eofi = rds_eofi['resid'].plot.line('g-', ax=axes[1])
axes[1].legend(residln_lci+residln_eofi, ['MLR with LCI',
'MLR with EOFs'], fontsize=fontsize-2)
axes[1].grid()
axes[1].set_ylabel('Residuals [mm]', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('')
years_fmt = mdates.DateFormatter('%Y')
# ax.figure.autofmt_xdate()
axes[1].xaxis.set_major_locator(mdates.YearLocator(2))
axes[1].xaxis.set_minor_locator(mdates.YearLocator(1))
axes[1].xaxis.set_major_formatter(years_fmt)
axes[1].grid(which='minor', color='k', linestyle='--')
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
axes[1].figure.autofmt_xdate()
fig.tight_layout()
fig.subplots_adjust()
if save:
filename = 'pw_interannual_MLR_comparison.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_annual_pw(path=work_yuval, fontsize=20, labelsize=18, compare='uerra',
ylim=[7.5, 40], save=True, kind='violin', bins=None, ds=None,
add_temperature=False):
"""kind can be violin or hist, for violin choose ylim=7.5,40 and for hist
choose ylim=0,0.3"""
import xarray as xr
import pandas as pd
import numpy as np
from synoptic_procedures import slice_xr_with_synoptic_class
gnss_filename = 'GNSS_PW_monthly_thresh_50.nc'
# gnss_filename = 'first_climatol_try.nc'
pw = xr.load_dataset(path / gnss_filename)
df_annual = pw.to_dataframe()
hue = None
if compare is not None:
df_annual = prepare_reanalysis_monthly_pwv_to_dataframe(
path, re=compare, ds=ds)
hue = 'source'
if not add_temperature:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind=kind,
fg=None,
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, hue=hue,
save=False, bins=bins)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.029,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
filename = 'pw_annual_means_{}.png'.format(kind)
else:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind='mean_month',
fg=None, ticklabelcolor='tab:blue',
ylim=[10, 31], color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, hue=None,
save=False, bins=None)
# tmm = xr.load_dataset(path / 'GNSS_TD_monthly_1996_2020.nc')
tmm = xr.load_dataset(path / 'IMS_T/GNSS_TD_daily.nc')
tmm = tmm.groupby('time.month').mean()
dftm = tmm.to_dataframe()
# dftm.columns = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
sites = group_sites_to_xarray(scope='annual')
sites_flat = sites.values.ravel()
# sites = sites[~pd.isnull(sites)]
for i, ax in enumerate(fg.axes.flat):
if pd.isnull(sites_flat[i]):
continue
twinax = ax.twinx()
twinax.plot(dftm.index.values, dftm[sites_flat[i]].values, color='tab:red',
markersize=10, marker='s', lw=1, markerfacecolor="None",
label='Temperature')
# dftm[sites[i]].plot(ax=twinax, color='r', markersize=10,
# marker='s', lw=1, markerfacecolor="None")
twinax.set_ylim(5, 37)
twinax.set_yticks(np.arange(5, 40, 10))
twinax.tick_params(axis='y', which='major', labelcolor='tab:red',
labelsize=labelsize)
if sites_flat[i] in sites.sel(group='eastern'):
twinax.set_ylabel(r'Temperature [$\degree$ C]',
fontsize=labelsize)
# fg.fig.canvas.draw()
# twinax.xaxis.set_ticks(np.arange(1, 13))
# twinax.tick_params(axis='x', which='major', labelsize=labelsize-2)
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = twinax.get_legend_handles_labels()
labels = ['PWV', 'Surface Temperature']
fg.fig.legend(handles=lines+lines2, labels=labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.97,
bottom=0.029,
left=0.049,
right=0.96,
hspace=0.15,
wspace=0.17)
filename = 'pw_annual_means_temperature.png'
if save:
if compare is not None:
filename = 'pw_annual_means_{}_with_{}.png'.format(kind, compare)
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_multi_box_xr(pw, kind='violin', x='month', sharex=False, sharey=False,
col_wrap=5, ylimits=None, xlimits=None, attrs=None,
bins=None, all_seasons=False, twin=None, twin_attrs=None):
import xarray as xr
pw = pw.to_array('station')
if twin is not None:
twin = twin.to_array('station')
fg = xr.plot.FacetGrid(pw, col='station', col_wrap=col_wrap, sharex=sharex,
sharey=sharey)
for i, (sta, ax) in enumerate(zip(pw['station'].values, fg.axes.flatten())):
pw_sta = pw.sel(station=sta).reset_coords(drop=True)
if all_seasons:
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'DJF')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='o')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'MAM')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='^')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'JJA')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='s')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'SON')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='x')
df = pw_sta.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='d')
if sta == 'nrif' or sta == 'elat':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper center', framealpha=0.5, fancybox=True)
elif sta == 'yrcm' or sta == 'ramo':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper right', framealpha=0.5, fancybox=True)
else:
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='best', framealpha=0.5, fancybox=True)
else:
# if x == 'hour':
# # remove seasonal signal:
# pw_sta = pw_sta.groupby('time.dayofyear') - pw_sta.groupby('time.dayofyear').mean('time')
# elif x == 'month':
# # remove daily signal:
# pw_sta = pw_sta.groupby('time.hour') - pw_sta.groupby('time.hour').mean('time')
df = pw_sta.to_dataframe(sta)
if twin is not None:
twin_sta = twin.sel(station=sta).reset_coords(drop=True)
twin_df = twin_sta.to_dataframe(sta)
else:
twin_df = None
if attrs is not None:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i],
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
else:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None,
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
return fg
def plot_box_df(df, x='month', title='TELA', marker='o',
ylabel=r'IWV [kg$\cdot$m$^{-2}$]', ax=None, kind='violin',
ylimits=(5, 40), xlimits=None, attrs=None, bins=None, twin_df=None,
twin_attrs=None):
# x=hour is experimental
import seaborn as sns
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
# df = da_ts.to_dataframe()
if x == 'month':
df[x] = df.index.month
pal = sns.color_palette("Paired", 12)
elif x == 'hour':
df[x] = df.index.hour
if twin_df is not None:
twin_df[x] = twin_df.index.hour
# df[x] = df.index
pal = sns.color_palette("Paired", 12)
y = df.columns[0]
if ax is None:
fig, ax = plt.subplots()
if kind is None:
df = df.groupby(x).mean()
df.plot(ax=ax, legend=False, marker=marker)
if twin_df is not None:
twin_df = twin_df.groupby(x).mean()
twinx = ax.twinx()
twin_df.plot.line(ax=twinx, color='r', marker='s')
ax.axhline(0, color='k', linestyle='--')
if twin_attrs is not None:
twinx.set_ylabel(twin_attrs['ylabel'])
align_yaxis(ax, 0, twinx, 0)
ax.set_xlabel('Time of day [UTC]')
elif kind == 'violin':
sns.violinplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
gridsize=250, inner='quartile', scale='area')
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'box':
kwargs = dict(markerfacecolor='r', marker='o')
sns.boxplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
whis=1.0, flierprops=kwargs, showfliers=False)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'hist':
if bins is None:
bins = 15
a = df[y].dropna()
sns.distplot(ax=ax, a=a, norm_hist=True, bins=bins, axlabel='PW [mm]')
xmean = df[y].mean()
xmedian = df[y].median()
std = df[y].std()
sk = skew(df[y].dropna().values)
kurt = kurtosis(df[y].dropna().values)
# xmode = df[y].mode().median()
data_x, data_y = ax.lines[0].get_data()
ymean = np.interp(xmean, data_x, data_y)
ymed = np.interp(xmedian, data_x, data_y)
# ymode = np.interp(xmode, data_x, data_y)
ax.vlines(x=xmean, ymin=0, ymax=ymean, color='r', linestyle='--')
ax.vlines(x=xmedian, ymin=0, ymax=ymed, color='g', linestyle='-')
# ax.vlines(x=xmode, ymin=0, ymax=ymode, color='k', linestyle='-')
# ax.legend(['Mean:{:.1f}'.format(xmean),'Median:{:.1f}'.format(xmedian),'Mode:{:.1f}'.format(xmode)])
ax.legend(['Mean: {:.1f}'.format(xmean),
'Median: {:.1f}'.format(xmedian)])
ax.text(0.55, 0.45, "Std-Dev: {:.1f}\nSkewness: {:.1f}\nKurtosis: {:.1f}".format(
std, sk, kurt), transform=ax.transAxes)
ax.yaxis.set_minor_locator(MultipleLocator(5))
ax.yaxis.grid(True, which='minor', linestyle='--', linewidth=1, alpha=0.7)
ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
title = ax.get_title().split('=')[-1].strip(' ')
if attrs is not None:
mean_years = float(attrs['mean_years'])
ax.set_title('')
ax.text(.2, .85, y.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
if kind is not None:
if kind != 'hist':
ax.text(.22, .72, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
ax.yaxis.tick_left()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
if ylimits is not None:
ax.set_ylim(*ylimits)
if twin_attrs is not None:
twinx.set_ylim(*twin_attrs['ylimits'])
align_yaxis(ax, 0, twinx, 0)
if xlimits is not None:
ax.set_xlim(*xlimits)
return ax
def plot_means_pw(load_path=work_yuval, ims_path=ims_path, thresh=50,
col_wrap=5, means='hour', save=True):
import xarray as xr
import numpy as np
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
if means == 'hour':
# remove long term monthly means:
pw_clim = pw.groupby('time.month') - \
pw.groupby('time.month').mean('time')
pw_clim = pw_clim.groupby('time.{}'.format(means)).mean('time')
else:
pw_clim = pw.groupby('time.{}'.format(means)).mean('time')
# T = xr.load_dataset(
# ims_path /
# 'GNSS_5mins_TD_ALL_1996_2020.nc')
# T_clim = T.groupby('time.month').mean('time')
attrs = [x.attrs for x in pw.data_vars.values()]
fg = pw_clim.to_array('station').plot(col='station', col_wrap=col_wrap,
color='b', marker='o', alpha=0.7,
sharex=False, sharey=True)
col_arr = np.arange(0, len(pw_clim))
right_side = col_arr[col_wrap-1::col_wrap]
for i, ax in enumerate(fg.axes.flatten()):
title = ax.get_title().split('=')[-1].strip(' ')
try:
mean_years = float(attrs[i]['mean_years'])
ax.set_title('')
ax.text(.2, .85, title.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
ax.text(.2, .73, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
# ax_t = ax.twinx()
# T_clim['{}'.format(title)].plot(
# color='r', linestyle='dashed', marker='s', alpha=0.7,
# ax=ax_t)
# ax_t.set_ylim(0, 30)
fg.fig.canvas.draw()
# labels = [item.get_text() for item in ax_t.get_yticklabels()]
# ax_t.yaxis.set_ticklabels([])
# ax_t.tick_params(axis='y', color='r')
# ax_t.set_ylabel('')
# if i in right_side:
# ax_t.set_ylabel(r'Surface temperature [$\degree$C]', fontsize=10)
# ax_t.yaxis.set_ticklabels(labels)
# ax_t.tick_params(axis='y', labelcolor='r', color='r')
# show months ticks and grid lines for pw:
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.yaxis.grid()
# ax.legend([ax.lines[0], ax_t.lines[0]], ['PW', 'T'],
# loc='upper right', fontsize=10, prop={'size': 8})
# ax.legend([ax.lines[0]], ['PW'],
# loc='upper right', fontsize=10, prop={'size': 8})
except IndexError:
pass
# change bottom xticks to 1-12 and show them:
# fg.axes[-1, 0].xaxis.set_ticks(np.arange(1, 13))
[fg.axes[x, 0].set_ylabel('PW [mm]') for x in range(len(fg.axes[:, 0]))]
# adjust subplots:
fg.fig.subplots_adjust(top=0.977,
bottom=0.039,
left=0.036,
right=0.959,
hspace=0.185,
wspace=0.125)
filename = 'PW_{}_climatology.png'.format(means)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_gnss_radiosonde_monthly_means(sound_path=sound_path, path=work_yuval,
times=['2014', '2019'], sample='MS',
gps_station='tela', east_height=5000):
import xarray as xr
from aux_gps import path_glob
import pandas as pd
file = path_glob(sound_path, 'bet_dagan_phys_PW_Tm_Ts_*.nc')
phys = xr.load_dataset(file[0])['PW']
if east_height is not None:
file = path_glob(sound_path, 'bet_dagan_edt_sounding*.nc')
east = xr.load_dataset(file[0])['east_distance']
east = east.resample(sound_time=sample).mean().sel(
Height=east_height, method='nearest')
east_df = east.reset_coords(drop=True).to_dataframe()
if times is not None:
phys = phys.sel(sound_time=slice(*times))
ds = phys.resample(sound_time=sample).mean(
).to_dataset(name='Bet-dagan-radiosonde')
ds = ds.rename({'sound_time': 'time'})
gps = xr.load_dataset(
path / 'GNSS_PW_thresh_50_homogenized.nc')[gps_station]
if times is not None:
gps = gps.sel(time=slice(*times))
ds[gps_station] = gps.resample(time=sample).mean()
df = ds.to_dataframe()
# now plot:
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
# [x.set_xlim([pd.to_datetime(times[0]), pd.to_datetime(times[1])])
# for x in axes]
df.columns = ['Bet dagan soundings', '{} GNSS station'.format(gps_station)]
sns.lineplot(data=df, markers=['o', 's'], linewidth=2.0, ax=axes[0])
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 1] - df.iloc[:, 0]
df_r.columns = ['Residual distribution']
sns.lineplot(data=df_r, color='k', marker='o', linewidth=1.5, ax=axes[1])
if east_height is not None:
ax_east = axes[1].twinx()
sns.lineplot(data=east_df, color='red',
marker='x', linewidth=1.5, ax=ax_east)
ax_east.set_ylabel(
'East drift at {} km altitude [km]'.format(east_height / 1000.0))
axes[1].axhline(y=0, color='r')
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
axes[0].set_ylabel('Precipitable Water [mm]')
axes[1].set_ylabel('Residuals [mm]')
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
return ds
def plot_wetz_example(path=tela_results_path, plot='WetZ', fontsize=16,
save=True):
from aux_gps import path_glob
import matplotlib.pyplot as plt
from gipsyx_post_proc import process_one_day_gipsyx_output
filepath = path_glob(path, 'tela*_smoothFinal.tdp')[3]
if plot is None:
df, meta = process_one_day_gipsyx_output(filepath, True)
return df, meta
else:
df, meta = process_one_day_gipsyx_output(filepath, False)
if not isinstance(plot, str):
raise ValueError('pls pick only one field to plot., e.g., WetZ')
error_plot = '{}_error'.format(plot)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
desc = meta['desc'][plot]
unit = meta['units'][plot]
df[plot].plot(ax=ax, legend=False, color='k')
ax.fill_between(df.index, df[plot] - df[error_plot],
df[plot] + df[error_plot], alpha=0.5)
ax.grid()
# ax.set_title('{} from station TELA in {}'.format(
# desc, df.index[100].strftime('%Y-%m-%d')))
ax.set_ylabel('WetZ [{}]'.format(unit), fontsize=fontsize)
ax.set_xlabel('Time [UTC]', fontsize=fontsize)
ax.tick_params(which='both', labelsize=fontsize)
ax.grid('on')
fig.tight_layout()
filename = 'wetz_tela_daily.png'
caption('{} from station TELA in {}. Note the error estimation from the GipsyX software(filled)'.format(
desc, df.index[100].strftime('%Y-%m-%d')))
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_figure_3(path=tela_solutions, year=2004, field='WetZ',
middle_date='11-25', zooms=[10, 3, 0.5], save=True):
from gipsyx_post_proc import analyse_results_ds_one_station
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
dss = xr.open_dataset(path / 'TELA_ppp_raw_{}.nc'.format(year))
nums = sorted(list(set([int(x.split('-')[1])
for x in dss if x.split('-')[0] == field])))
ds = dss[['{}-{}'.format(field, i) for i in nums]]
da = analyse_results_ds_one_station(dss, field=field, plot=False)
fig, axes = plt.subplots(ncols=1, nrows=3, sharex=False, figsize=(16, 10))
for j, ax in enumerate(axes):
start = pd.to_datetime('{}-{}'.format(year, middle_date)
) - pd.Timedelta(zooms[j], unit='D')
end = pd.to_datetime('{}-{}'.format(year, middle_date)
) + pd.Timedelta(zooms[j], unit='D')
daa = da.sel(time=slice(start, end))
for i, ppp in enumerate(ds):
ds['{}-{}'.format(field, i)].plot(ax=ax, linewidth=3.0)
daa.plot.line(marker='.', linewidth=0., ax=ax, color='k')
axes[j].set_xlim(start, end)
axes[j].set_ylim(daa.min() - 0.5, daa.max() + 0.5)
try:
axes[j - 1].axvline(x=start, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
axes[j - 1].axvline(x=end, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
except IndexError:
pass
units = ds.attrs['{}>units'.format(field)]
sta = da.attrs['station']
desc = da.attrs['{}>desc'.format(field)]
ax.set_ylabel('{} [{}]'.format(field, units))
ax.set_xlabel('')
ax.grid()
# fig.suptitle(
# '30 hours stitched {} for GNSS station {}'.format(
# desc, sta), fontweight='bold')
fig.tight_layout()
caption('20, 6 and 1 days of zenith wet delay in 2004 from the TELA GNSS station for the top, middle and bottom figures respectively. The colored segments represent daily solutions while the black dots represent smoothed mean solutions.')
filename = 'zwd_tela_discon_panel.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
# fig.subplots_adjust(top=0.95)
return axes
def plot_figure_3_1(path=work_yuval, data='zwd'):
import xarray as xr
from aux_gps import plot_tmseries_xarray
from PW_stations import load_gipsyx_results
if data == 'zwd':
tela = load_gipsyx_results('tela', sample_rate='1H', plot_fields=None)
label = 'ZWD [cm]'
title = 'Zenith wet delay derived from GPS station TELA'
ax = plot_tmseries_xarray(tela, 'WetZ')
elif data == 'pw':
ds = xr.open_dataset(path / 'GNSS_hourly_PW.nc')
tela = ds['tela']
label = 'PW [mm]'
title = 'Precipitable water derived from GPS station TELA'
ax = plot_tmseries_xarray(tela)
ax.set_ylabel(label)
ax.set_xlim('1996-02', '2019-07')
ax.set_title(title)
ax.set_xlabel('')
ax.figure.tight_layout()
return ax
def plot_ts_tm(path=sound_path, model='TSEN',
times=['2007', '2019'], fontsize=14, save=True):
"""plot ts-tm relashonship"""
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
from PW_stations import ML_Switcher
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from sounding_procedures import get_field_from_radiosonde
models_dict = {'LR': 'Linear Regression',
'TSEN': 'Theil–Sen Regression'}
# sns.set_style('whitegrid')
pds = xr.Dataset()
Ts = get_field_from_radiosonde(path=sound_path, field='Ts',
data_type='phys', reduce=None, times=times,
plot=False)
Tm = get_field_from_radiosonde(path=sound_path, field='Tm',
data_type='phys', reduce='min', times=times,
plot=False)
pds['Tm'] = Tm
pds['Ts'] = Ts
pds = pds.dropna('sound_time')
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
pds.plot.scatter(
x='Ts',
y='Tm',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.grid()
ml = ML_Switcher()
fit_model = ml.pick_model(model)
X = pds.Ts.values.reshape(-1, 1)
y = pds.Tm.values
fit_model.fit(X, y)
predict = fit_model.predict(X)
coef = fit_model.coef_[0]
inter = fit_model.intercept_
ax.plot(X, predict, c='r')
bevis_tm = pds.Ts.values * 0.72 + 70.0
ax.plot(pds.Ts.values, bevis_tm, c='purple')
ax.legend(['{} ({:.2f}, {:.2f})'.format(models_dict.get(model),
coef, inter), 'Bevis 1992 et al. (0.72, 70.0)'], fontsize=fontsize-4)
# ax.set_xlabel('Surface Temperature [K]')
# ax.set_ylabel('Water Vapor Mean Atmospheric Temperature [K]')
ax.set_xlabel('Ts [K]', fontsize=fontsize)
ax.set_ylabel('Tm [K]', fontsize=fontsize)
ax.set_ylim(265, 320)
ax.tick_params(labelsize=fontsize)
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = predict - y
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", 'edgecolor': 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(y, predict))
print(rmean, rmse)
r2 = r2_score(y, predict)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[K]')
textstr = '\n'.join(['n={}'.format(pds.Ts.size),
'RMSE: ', '{:.2f} K'.format(rmse)]) # ,
# r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
# axin1.text(0.2, 0.9, 'n={}'.format(pds.Ts.size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.78, 0.9, 'RMSE: {:.2f} K'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
axin1.set_xlim(-15, 15)
fig.tight_layout()
filename = 'Bet_dagan_ts_tm_fit_{}-{}.png'.format(times[0], times[1])
caption('Water vapor mean temperature (Tm) vs. surface temperature (Ts) of the Bet-Dagan radiosonde station. Ordinary least squares linear fit(red) yields the residual distribution with RMSE of 4 K. Bevis(1992) model is plotted(purple) for comparison.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_pw_tela_bet_dagan_scatterplot(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
cats=None,
times=['2007', '2019'], wv_name='pw',
r2=False, fontsize=14,
save=True):
"""plot the PW of Bet-Dagan vs. PW of gps station"""
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# sns.set_style('white')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path, sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
tpw = 'tpw_bet_dagan'
ds = ds[[tpw, 'tela_pw']].dropna('time')
ds = ds.sel(time=slice(*times))
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
ds.plot.scatter(x=tpw,
y='tela_pw',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.plot(ds[tpw], ds[tpw], c='r')
ax.legend(['y = x'], loc='upper right', fontsize=fontsize)
if wv_name == 'pw':
ax.set_xlabel('PWV from Bet-Dagan [mm]', fontsize=fontsize)
ax.set_ylabel('PWV from TELA GPS station [mm]', fontsize=fontsize)
elif wv_name == 'iwv':
ax.set_xlabel(
r'IWV from Bet-Dagan station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.set_ylabel(
r'IWV from TELA GPS station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.grid()
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = ds.tela_pw.values - ds[tpw].values
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", "edgecolor": 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(ds[tpw].values, ds.tela_pw.values))
r2s = r2_score(ds[tpw].values, ds.tela_pw.values)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[mm]')
ax.tick_params(labelsize=fontsize)
if wv_name == 'pw':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse)])
elif wv_name == 'iwv':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(rmse)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
#
# axin1.text(0.2, 0.95, 'n={}'.format(ds[tpw].size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.3, 0.85, 'bias: {:.2f} mm'.format(rmean),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.35, 0.75, 'RMSE: {:.2f} mm'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# fig.suptitle('Precipitable Water comparison for the years {} to {}'.format(*times))
fig.tight_layout()
caption(
'PW from TELA GNSS station vs. PW from Bet-Dagan radiosonde station in {}-{}. A 45 degree line is plotted(red) for comparison. Note the skew in the residual distribution with an RMSE of 4.37 mm.'.format(times[0], times[1]))
# fig.subplots_adjust(top=0.95)
filename = 'Bet_dagan_tela_pw_compare_{}-{}.png'.format(times[0], times[1])
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ds
def plot_tela_bet_dagan_comparison(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
times=['2007', '2020'], cats=None,
compare='pwv',
save=True):
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.dates as mdates
# sns.set_style('whitegrid')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path,
sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
ds = ds.dropna('time')
ds = ds.sel(time=slice(*times))
if compare == 'zwd':
df = ds[['zwd_bet_dagan', 'tela']].to_dataframe()
elif compare == 'pwv':
df = ds[['tpw_bet_dagan', 'tela_pw']].to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
df.columns = ['Bet-Dagan soundings', 'TELA GNSS station']
sns.scatterplot(
data=df,
s=20,
ax=axes[0],
style='x',
linewidth=0,
alpha=0.8)
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residual distribution']
sns.scatterplot(
data=df_r,
color='k',
s=20,
ax=axes[1],
linewidth=0,
alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
if compare == 'zwd':
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
elif compare == 'pwv':
axes[0].set_ylabel('Precipitable Water Vapor [mm]')
axes[1].set_ylabel('Residuals [mm]')
# axes[0].set_title('Zenith wet delay from Bet-Dagan radiosonde station and TELA GNSS satation')
sonde_change_x = pd.to_datetime('2013-08-20')
axes[1].axvline(sonde_change_x, color='red')
axes[1].annotate(
'changed sonde type from VIZ MK-II to PTU GPS',
(mdates.date2num(sonde_change_x),
10),
xytext=(
15,
15),
textcoords='offset points',
arrowprops=dict(
arrowstyle='fancy',
color='red'),
color='red')
# axes[1].set_aspect(3)
[x.set_xlim(*[pd.to_datetime(times[0]), pd.to_datetime(times[1])])
for x in axes]
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
filename = 'Bet_dagan_tela_{}_compare.png'.format(compare)
caption('Top: zenith wet delay from Bet-dagan radiosonde station(blue circles) and from TELA GNSS station(orange x) in 2007-2019. Bottom: residuals. Note the residuals become constrained from 08-2013 probebly due to an equipment change.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def plot_israel_map_from_shape_file(gis_path=gis_path):
import geopandas as gpd
agr = gpd.read_file(gis_path/'ISR_agriculture_districts.shp')
isr = gpd.GeoSeries(agr.geometry.unary_union)
isr.crs = agr.crs
isr = isr.to_crs(epsg=4326)
return isr
def plot_israel_map(gis_path=gis_path, rc=rc, ticklabelsize=12, ax=None):
"""general nice map for israel, need that to plot stations,
and temperature field on top of it"""
import geopandas as gpd
import contextily as ctx
import seaborn as sns
import cartopy.crs as ccrs
sns.set_style("ticks", rc=rc)
isr_with_yosh = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
isr_with_yosh.crs = {'init': 'epsg:4326'}
# isr_with_yosh = isr_with_yosh.to_crs(epsg=3857)
crs_epsg = ccrs.epsg('3857')
# crs_epsg = ccrs.epsg('2039')
if ax is None:
# fig, ax = plt.subplots(subplot_kw={'projection': crs_epsg},
# figsize=(6, 15))
bounds = isr_with_yosh.geometry.total_bounds
extent = [bounds[0], bounds[2], bounds[1], bounds[3]]
# ax.set_extent([bounds[0], bounds[2], bounds[1], bounds[3]], crs=crs_epsg)
# ax.add_geometries(isr_with_yosh.geometry, crs=crs_epsg)
ax = isr_with_yosh.plot(alpha=0.0, figsize=(6, 15))
else:
isr_with_yosh.plot(alpha=0.0, ax=ax)
ctx.add_basemap(
ax,
source=ctx.providers.Stamen.TerrainBackground,
crs='epsg:4326')
ax.xaxis.set_major_locator(ticker.MaxNLocator(2))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
ax.yaxis.set_major_formatter(lat_formatter)
ax.xaxis.set_major_formatter(lon_formatter)
ax.tick_params(top=True, bottom=True, left=True, right=True,
direction='out', labelsize=ticklabelsize)
# scale_bar(ax, ccrs.Mercator(), 50, bounds=bounds)
return ax
def plot_israel_with_stations(gis_path=gis_path, dem_path=dem_path, ims=True,
gps=True, radio=True, terrain=True, alt=False,
ims_names=False, gps_final=False, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import geopandas as gpd
ax = plot_israel_map(gis_path)
station_names = []
legend = []
if ims:
print('getting IMS temperature stations metadata...')
ims_t = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims_t.plot(ax=ax, color='red', edgecolor='black', alpha=0.5)
station_names.append('ims')
legend.append('IMS stations')
if ims_names:
geo_annotate(ax, ims_t.lon, ims_t.lat,
ims_t['name_english'], xytext=(3, 3), fmt=None,
c='k', fw='normal', fs=7, colorupdown=False)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
if gps:
print('getting solved GNSS israeli stations metadata...')
gps_df = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
if gps_final:
to_drop = ['gilb', 'lhav', 'hrmn', 'nizn', 'spir']
gps_final_stations = [x for x in gps_df.index if x not in to_drop]
gps = gps_df.loc[gps_final_stations, :]
gps.plot(ax=ax, color='k', edgecolor='black', marker='s')
gps_stations = [x for x in gps.index]
to_plot_offset = ['gilb', 'lhav']
# [gps_stations.remove(x) for x in to_plot_offset]
gps_normal_anno = gps.loc[gps_stations, :]
# gps_offset_anno = gps.loc[to_plot_offset, :]
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
if alt:
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.alt, xytext=(4, -6), fmt='{:.0f}',
c='k', fw='bold', fs=9, colorupdown=False)
# geo_annotate(ax, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('gps')
legend.append('GNSS stations')
if terrain:
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax.set_xlabel('')
ax.set_ylabel('')
if radio: # plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax, color='black', edgecolor='black',
marker='+')
geo_annotate(ax, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('radio')
legend.append('radiosonde')
if legend:
plt.legend(legend, loc='upper left')
plt.tight_layout()
plt.subplots_adjust(bottom=0.05)
if station_names:
station_names = '_'.join(station_names)
else:
station_names = 'no_stations'
filename = 'israel_map_{}.png'.format(station_names)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_zwd_lapse_rate(path=work_yuval, fontsize=18, model='TSEN', save=True):
from PW_stations import calculate_zwd_altitude_fit
df, zwd_lapse_rate = calculate_zwd_altitude_fit(path=path, model=model,
plot=True, fontsize=fontsize)
if save:
filename = 'zwd_lapse_rate.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_ims_T_lapse_rate(ims_path=ims_path, dt='2013-10-19T22:00:00',
fontsize=16, save=True):
from aux_gps import path_glob
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# from matplotlib import rc
def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):
ts = tdf.loc[dt, :]
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
# ts.name = dt_col
# Tloc_df = Tloc_df.join(ts, how='right')
# Tloc_df = Tloc_df.dropna(axis=0)
ts_vs_alt = pd.Series(ts.values, index=T_alts)
ts_vs_alt_for_fit = ts_vs_alt.dropna()
[a, b] = np.polyfit(ts_vs_alt_for_fit.index.values,
ts_vs_alt_for_fit.values, 1)
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
if lapse_rate < 5.0:
lapse_rate = 5.0
elif lapse_rate > 10.0:
lapse_rate = 10.0
return ts_vs_alt, lapse_rate
# rc('text', usetex=False)
# rc('text',latex.unicode=False)
glob_str = 'IMS_TD_israeli_10mins*.nc'
file = path_glob(ims_path, glob_str=glob_str)[0]
ds = xr.open_dataset(file)
time_dim = list(set(ds.dims))[0]
# slice to a starting year(1996?):
ds = ds.sel({time_dim: slice('1996', None)})
# years = sorted(list(set(ds[time_dim].dt.year.values)))
# get coords and alts of IMS stations:
T_alts = np.array([ds[x].attrs['station_alt'] for x in ds])
# T_lats = np.array([ds[x].attrs['station_lat'] for x in ds])
# T_lons = np.array([ds[x].attrs['station_lon'] for x in ds])
print('loading IMS_TD of israeli stations 10mins freq..')
# transform to dataframe and add coords data to df:
tdf = ds.to_dataframe()
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
dt = pd.to_datetime(dt)
# prepare the ims coords and temp df(Tloc_df) and the lapse rate:
ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(tdf, dt, T_alts, 'auto')
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(x=ts_vs_alt.index, y=ts_vs_alt.values, color='r',
scatter_kws={'color': 'k'}, ax=ax_lapse)
# suptitle = dt.strftime('%Y-%m-%d %H:%M')
ax_lapse.set_xlabel('Altitude [m]', fontsize=fontsize)
ax_lapse.set_ylabel(r'Temperature [$\degree$C]', fontsize=fontsize)
ax_lapse.text(0.5, 0.95, r'Lapse rate: {:.2f} $\degree$C/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize,
transform=ax_lapse.transAxes, color='k')
ax_lapse.grid()
ax_lapse.tick_params(labelsize=fontsize)
# ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
fig.tight_layout()
filename = 'ims_lapse_rate_example.png'
caption('Temperature vs. altitude for 10 PM in 2013-10-19 for all automated 10 mins IMS stations. The lapse rate is calculated using ordinary least squares linear fit.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax_lapse
def plot_figure_9(hydro_path=hydro_path, gis_path=gis_path, pw_anom=False,
max_flow_thresh=None, wv_name='pw', save=True):
from hydro_procedures import get_hydro_near_GNSS
from hydro_procedures import loop_over_gnss_hydro_and_aggregate
import matplotlib.pyplot as plt
df = get_hydro_near_GNSS(
radius=5,
hydro_path=hydro_path,
gis_path=gis_path,
plot=False)
ds = loop_over_gnss_hydro_and_aggregate(df, pw_anom=pw_anom,
max_flow_thresh=max_flow_thresh,
hydro_path=hydro_path,
work_yuval=work_yuval, ndays=3,
plot=False, plot_all=False)
names = [x for x in ds.data_vars]
fig, ax = plt.subplots(figsize=(10, 6))
for name in names:
ds.mean('station').mean('tide_start')[name].plot.line(
marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days before tide event')
ax.grid()
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x, y, z)
for x, y, z in fmt])
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_xticklabels()]
xlabels = [x.replace('−', '') for x in labels]
ax.set_xticklabels(xlabels)
fig.canvas.draw()
if wv_name == 'pw':
if pw_anom:
ax.set_ylabel('PW anomalies [mm]')
else:
ax.set_ylabel('PW [mm]')
elif wv_name == 'iwv':
if pw_anom:
ax.set_ylabel(r'IWV anomalies [kg$\cdot$m$^{-2}$]')
else:
ax.set_ylabel(r'IWV [kg$\cdot$m$^{-2}$]')
fig.tight_layout()
# if pw_anom:
# title = 'Mean PW anomalies for tide stations near all GNSS stations'
# else:
# title = 'Mean PW for tide stations near all GNSS stations'
# if max_flow_thresh is not None:
# title += ' (max_flow > {} m^3/sec)'.format(max_flow_thresh)
# ax.set_title(title)
if pw_anom:
filename = 'hydro_tide_lag_pw_anom.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
else:
filename = 'hydro_tide_lag_pw.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def produce_table_1(removed=['hrmn', 'nizn', 'spir'], merged={'klhv': ['klhv', 'lhav'],
'mrav': ['gilb', 'mrav']}, add_location=False,
scope='annual', remove_distance=True):
"""for scope='diurnal' use removed=['hrmn'], add_location=True
and remove_distance=False"""
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
sites = group_sites_to_xarray(upper=False, scope=scope)
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
new = sites.T.values.ravel()
if scope == 'annual':
new = [x for x in new.astype(str) if x != 'nan']
df_gnss = df_gnss.reindex(new)
df_gnss['ID'] = df_gnss.index.str.upper()
pd.options.display.float_format = '{:.2f}'.format
df = df_gnss[['name', 'ID', 'lat', 'lon', 'alt', 'distance']]
df['alt'] = df['alt'].map('{:,.0f}'.format)
df['distance'] = df['distance'].astype(int)
cols = ['GNSS Station name', 'Station ID', 'Latitude [N]',
'Longitude [E]', 'Altitude [m a.s.l]', 'Distance from shore [km]']
df.columns = cols
if scope != 'annual':
df.loc['spir', 'GNSS Station name'] = 'Sapir'
if remove_distance:
df = df.iloc[:, 0:-1]
if add_location:
groups = group_sites_to_xarray(upper=False, scope=scope)
coastal = groups.sel(group='coastal').values
coastal = coastal[~pd.isnull(coastal)]
highland = groups.sel(group='highland').values
highland = highland[~pd.isnull(highland)]
eastern = groups.sel(group='eastern').values
eastern = eastern[~pd.isnull(eastern)]
df.loc[coastal, 'Location'] = 'Coastal'
df.loc[highland, 'Location'] = 'Highland'
df.loc[eastern, 'Location'] = 'Eastern'
if removed is not None:
df = df.loc[[x for x in df.index if x not in removed], :]
if merged is not None:
return df
print(df.to_latex(index=False))
return df
def produce_table_stats(thresh=50, add_location=True, add_height=True):
"""add plot sd to height with se_sd errorbars"""
from PW_stations import produce_pw_statistics
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
import xarray as xr
sites = group_sites_to_xarray(upper=False, scope='annual')
new = sites.T.values.ravel()
sites = group_sites_to_xarray(upper=False, scope='annual')
new = [x for x in new.astype(str) if x != 'nan']
pw_mm = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
pw_mm = pw_mm[new]
df = produce_pw_statistics(
thresh=thresh, resample_to_mm=False, pw_input=pw_mm)
if add_location:
cols = [x for x in df.columns]
cols.insert(1, 'Location')
gr_df = sites.to_dataframe('sites')
location = [gr_df[gr_df == x].dropna().index.values.item()[
1].title() for x in new]
df['Location'] = location
df = df[cols]
if add_height:
cols = [x for x in df.columns]
if add_location:
cols.insert(2, 'Height [m a.s.l]')
else:
cols.insert(1, 'Height [m a.s.l]')
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=False)
# pd.options.display.float_format = '{:.2f}'.format
df['Height [m a.s.l]'] = df_gnss['alt'].map('{:.0f}'.format)
df = df[cols]
print(df.to_latex(index=False))
return df
def plot_pwv_longterm_trend(path=work_yuval, model_name='LR', save=True,
fontsize=16, add_era5=True):
import matplotlib.pyplot as plt
from aux_gps import linear_fit_using_scipy_da_ts
# from PW_stations import ML_Switcher
import xarray as xr
from aux_gps import anomalize_xr
"""TSEN and LR for linear fit"""
# load GNSS Israel:
# pw = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50_homogenized.nc')
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_mean = pw_anoms.to_array('station').mean('station')
pw_std = pw_anoms.to_array('station').std('station')
pw_weights = 1 / pw_anoms.to_array('station').count('station')
# add ERA5:
era5 = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
era5_anoms = anomalize_xr(era5, 'MS', verbose=False)
era5_anoms = era5_anoms.sel(time=slice(
pw_mean.time.min(), pw_mean.time.max()))
era5_mean = era5_anoms.to_array('station').mean('station')
era5_std = era5_anoms.to_array('station').std('station')
# init linear models
# ml = ML_Switcher()
# model = ml.pick_model(model_name)
if add_era5:
fig, ax = plt.subplots(2, 1, figsize=(15, 7.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=pw_weights)
pwln = pw_mean.plot(ax=ax[0], color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax[0], color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_lo.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax[0].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[0].grid()
ax[0].set_xlabel('')
ax[0].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[0].tick_params(labelsize=fontsize)
trend1, trend_hi1, trend_lo1, slope1, slope_hi1, slope_lo1 = linear_fit_using_scipy_da_ts(era5_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=era5_std)
era5ln = era5_mean.plot(ax=ax[1], color='k', marker='o', linewidth=1.5)
trendln1 = trend1.plot(ax=ax[1], color='r', linewidth=2)
trend_hi1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_lo1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope1, slope_lo1, slope_hi1)
handles = era5ln+trendln1
labels = ['ERA5-mean']
labels.append(trend_label)
ax[1].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[1].grid()
ax[1].set_xlabel('')
ax[1].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[1].tick_params(labelsize=fontsize)
else:
fig, ax = plt.subplots(1, 1, figsize=(15, 5.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None)
pwln = pw_mean.plot(ax=ax, color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax, color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax, linewidth=1.5)
trend_lo.plot.line('r--', ax=ax, linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax.legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax.grid()
ax.set_xlabel('')
ax.set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
fig.suptitle('PWV mean anomalies and linear trend',
fontweight='bold', fontsize=fontsize)
fig.tight_layout()
if save:
filename = 'pwv_mean_trend_{}.png'.format(model_name)
plt.savefig(savefig_path / filename, orientation='portrait')
return ax
def plot_trend_filled_pwv_and_era5_barh_plot(path=work_yuval):
import xarray as xr
from aux_gps import path_glob
from PW_stations import process_mkt_from_dataset
import pandas as pd
import seaborn as sns
file = sorted(
path_glob(path, 'GNSS_PW_monthly_homogenized_filled_*.nc'))[0]
gnss = xr.load_dataset(path / file)
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5 = era5.sel(time=slice(gnss.time.min(), gnss.time.max()))
era5 = era5[[x for x in era5 if x in gnss]]
df_gnss = process_mkt_from_dataset(
gnss,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_gnss = add_location_to_GNSS_stations_dataframe(df_gnss)
df_gnss['sig'] = df_gnss['p'].astype(float) <= 0.05
df_era5 = process_mkt_from_dataset(
era5,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_era5 = add_location_to_GNSS_stations_dataframe(df_era5)
df_era5['sig'] = df_era5['p'].astype(float) <= 0.05
df = pd.concat([df_gnss, df_era5], keys=['GNSS', 'ERA5'])
df1 = df.unstack(level=0)
df = df1.stack().reset_index()
df.columns = ['station', '', 'p', 'Tau', 'slope', 'intercept', 'CI_5_low',
'CI_5_high', 'Location', 'sig']
sns.barplot(x="slope", y='station', hue='', data=df[df['sig']])
# df['slope'].unstack(level=0).plot(kind='barh', subplots=False, xerr=1)
return df
def produce_filled_pwv_and_era5_mann_kendall_table(path=work_yuval):
import xarray as xr
from aux_gps import path_glob
file = sorted(
path_glob(path, 'GNSS_PW_monthly_homogenized_filled_*.nc'))[0]
gnss = xr.load_dataset(path / file)
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5 = era5.sel(time=slice(gnss.time.min(), gnss.time.max()))
df = add_comparison_to_mann_kendall_table(gnss, era5, 'GNSS', 'ERA5')
print(df.to_latex(header=False, index=False))
return df
def add_comparison_to_mann_kendall_table(ds1, ds2, name1='GNSS', name2='ERA5',
alpha=0.05):
df1 = produce_table_mann_kendall(ds1, alpha=alpha)
df2 = produce_table_mann_kendall(ds2, alpha=alpha)
df = df1['Site ID'].to_frame()
df[name1+'1'] = df1["Kendall's Tau"]
df[name2+'1'] = df2["Kendall's Tau"]
df[name1+'2'] = df1['P-value']
df[name2+'2'] = df2['P-value']
df[name1+'3'] = df1["Sen's slope"]
df[name2+'3'] = df2["Sen's slope"]
df[name1+'4'] = df1["Percent change"]
df[name2+'4'] = df2["Percent change"]
return df
def produce_table_mann_kendall(pwv_ds, alpha=0.05,
sort_by=['groups_annual', 'lat']):
from PW_stations import process_mkt_from_dataset
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import reduce_tail_xr
import xarray as xr
def table_process_df(df, means):
df_sites = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df_sites.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 0]).index
# calculate percent changes from last decade means:
df['CI95'] = '(' + df['CI_95_low'].map('{:.2f}'.format).astype(
str) + ', ' + df['CI_95_high'].map('{:.2f}'.format).astype(str) + ')'
df['means'] = means
df['Pct_change'] = 100 * df['slope'] / df['means']
Pct_high = 100 * df['CI_95_high'] / df['means']
Pct_low = 100 * df['CI_95_low'] / df['means']
df['Pct_change_CI95'] = '(' + Pct_low.map('{:.2f}'.format).astype(
str) + ', ' + Pct_high.map('{:.2f}'.format).astype(str) + ')'
# df['Temperature change'] = df['Percent change'] / 7.0
df.drop(['means', 'CI_95_low', 'CI_95_high'], axis=1, inplace=True)
# station id is big:
df['id'] = df.index.str.upper()
# , 'Temperature change']]
df = df[['id', 'Tau', 'p', 'slope', 'CI95',
'Pct_change', 'Pct_change_CI95']]
# filter for non significant trends:
# df['slope'] = df['slope'][df['p'] < 0.05]
# df['Pct_change'] = df['Pct_change'][df['p'] < 0.05]
# df['CI95'] = df['CI95'][df['p'] < 0.05]
# df['Pct_change_CI95'] = df['Pct_change_CI95'][df['p'] < 0.05]
# higher and better results:
df.loc[:, 'p'][df['p'] < 0.001] = '<0.001'
df['p'][df['p'] != '<0.001'] = df['p'][df['p'] !=
'<0.001'].astype(float).map('{:,.3f}'.format)
df['Tau'] = df['Tau'].map('{:,.3f}'.format)
df['slope'] = df['slope'].map('{:,.2f}'.format)
df['slope'][df['slope'] == 'nan'] = '-'
df.columns = [
'Site ID',
"Kendall's Tau",
'P-value',
"Sen's slope", "Sen's slope CI 95%",
'Percent change', 'Percent change CI 95%'] # , 'Temperature change']
df['Percent change'] = df['Percent change'].map('{:,.1f}'.format)
df['Percent change'] = df[df["Sen's slope"] != '-']['Percent change']
df['Percent change'] = df['Percent change'].fillna('-')
df["Sen's slope CI 95%"] = df["Sen's slope CI 95%"].fillna(' ')
df['Percent change CI 95%'] = df['Percent change CI 95%'].fillna(' ')
df["Sen's slope"] = df["Sen's slope"].astype(
str) + ' ' + df["Sen's slope CI 95%"].astype(str)
df['Percent change'] = df['Percent change'].astype(
str) + ' ' + df['Percent change CI 95%'].astype(str)
df.drop(['Percent change CI 95%', "Sen's slope CI 95%"],
axis=1, inplace=True)
# df['Temperature change'] = df['Temperature change'].map('{:,.1f}'.format)
# df['Temperature change'] = df[df["Sen's slope"] != '-']['Temperature change']
# df['Temperature change'] = df['Temperature change'].fillna('-')
# last, reindex according to geography:
# gr = group_sites_to_xarray(scope='annual')
# new = [x for x in gr.T.values.ravel() if isinstance(x, str)]
new = [x for x in sites if x in df.index]
df = df.reindex(new)
return df
# if load_data == 'pwv-homo':
# print('loading homogenized (RH) pwv dataset.')
# data = xr.load_dataset(work_yuval /
# 'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# elif load_data == 'pwv-orig':
# print('loading original pwv dataset.')
# data = xr.load_dataset(work_yuval /
# 'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
# elif load_data == 'pwv-era5':
# print('loading era5 pwv dataset.')
# data = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
# if pwv_ds is not None:
# print('loading user-input pwv dataset.')
# data = pwv_ds
df = process_mkt_from_dataset(
pwv_ds,
alpha=alpha,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_mean = reduce_tail_xr(pwv_ds, reduce='mean', records=120,
return_df=True)
table = table_process_df(df, df_mean)
# print(table.to_latex(index=False))
return table
def plot_filled_and_unfilled_pwv_monthly_anomalies(pw_da, anomalize=True,
max_gap=6,
method='cubic',
ax=None):
from aux_gps import anomalize_xr
import matplotlib.pyplot as plt
import numpy as np
if anomalize:
pw_da = anomalize_xr(pw_da, 'MS')
max_gap_td = np.timedelta64(max_gap, 'M')
filled = pw_da.interpolate_na('time', method=method, max_gap=max_gap_td)
if ax is None:
fig, ax = plt.subplots(figsize=(15, 5))
filledln = filled.plot.line('b-', ax=ax)
origln = pw_da.plot.line('r-', ax=ax)
ax.legend(origln + filledln,
['original time series',
'filled using {} interpolation with max gap of {} months'.format(method,
max_gap)])
ax.grid()
ax.set_xlabel('')
ax.set_ylabel('PWV [mm]')
ax.set_title('PWV station {}'.format(pw_da.name.upper()))
return ax
def plot_pwv_statistic_vs_height(pwv_ds, stat='mean', x='alt', season=None,
ax=None, color='b'):
from PW_stations import produce_geo_gnss_solved_stations
import matplotlib.pyplot as plt
from aux_gps import calculate_std_error
import pandas as pd
if season is not None:
print('{} season selected'.format(season))
pwv_ds = pwv_ds.sel(time=pwv_ds['time.season'] == season)
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
if stat == 'mean':
pw_stat = pwv_ds.mean()
pw_stat_error = pwv_ds.map(calculate_std_error, statistic=stat)
elif stat == 'std':
pw_stat = pwv_ds.std()
pw_stat_error = pwv_ds.map(calculate_std_error, statistic=stat)
df[stat] = pd.Series(
pw_stat.to_array(
dim='gnss'),
index=pw_stat.to_array('gnss')['gnss'])
df['{}_error'.format(stat)] = pd.Series(pw_stat_error.to_array(
dim='gnss'), index=pw_stat_error.to_array('gnss')['gnss'])
if ax is None:
fig, ax = plt.subplots()
if x == 'alt':
ax.set_xlabel('Altitude [m a.s.l]')
elif x == 'distance':
ax.set_xlabel('Distance to sea shore [km]')
ax.set_ylabel('{} [mm]'.format(stat))
ax.errorbar(df[x],
df[stat],
df['{}_error'.format(stat)],
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color=color)
if season is not None:
ax.set_title('{} season'.format(season))
ax.grid()
return ax
def add_location_to_GNSS_stations_dataframe(df, scope='annual'):
import pandas as pd
# load location data:
gr = group_sites_to_xarray(scope=scope)
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
return df
def plot_peak_amplitude_altitude_long_term_pwv(path=work_yuval, era5=False,
add_a1a2=True, save=True, fontsize=16):
import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from fitting_routines import fit_poly_model_xr
from aux_gps import remove_suffix_from_ds
from PW_stations import produce_geo_gnss_solved_stations
# load alt data, distance etc.,
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
df_geo = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
if era5:
dss = xr.load_dataset(path / 'GNSS_PW_ERA5_harmonics_annual.nc')
else:
dss = xr.load_dataset(path / 'GNSS_PW_harmonics_annual.nc')
dss = dss[[x for x in dss if '_params' in x]]
dss = remove_suffix_from_ds(dss)
df = dss.sel(cpy=1, params='ampl').reset_coords(drop=True).to_dataframe().T
df.columns = ['A1', 'A1std']
df = df.join(dss.sel(cpy=2, params='ampl').reset_coords(drop=True).to_dataframe().T)
# abs bc sometimes the fit get a sine amp negative:
df = np.abs(df)
df.columns =['A1', 'A1std', 'A2', 'A2std']
df['A2A1'] = df['A2'] / df['A1']
a2a1std = np.sqrt((df['A2std']/df['A1'])**2 + (df['A2']*df['A1std']/df['A1']**2)**2)
df['A2A1std'] = a2a1std
# load location data:
gr = group_sites_to_xarray(scope='annual')
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
df['alt'] = df_geo['alt']
df = df.set_index('alt')
df = df.sort_index()
cdict = produce_colors_for_pwv_station(scope='annual', as_cat_dict=True)
cdict = dict(zip([x.capitalize() for x in cdict.keys()], cdict.values()))
if add_a1a2:
fig, axes=plt.subplots(2, 1, sharex=False, figsize=(8, 12))
ax = axes[0]
else:
ax = None
# colors=produce_colors_for_pwv_station(scope='annual')
ax = sns.scatterplot(data=df, y='A1', x='alt', hue='Location',
palette=cdict, ax=ax, s=100, zorder=20)
# ax.legend(prop={'size': fontsize})
x_coords = []
y_coords = []
colors = []
for point_pair in ax.collections:
colors.append(point_pair.get_facecolor())
for x, y in point_pair.get_offsets():
x_coords.append(x)
y_coords.append(y)
ax.errorbar(x_coords, y_coords,
yerr=df['A1std'].values, ecolor=colors[0][:,0:-1],
ls='', capsize=None, fmt=" ")#, zorder=-1)
# linear fit:
x = df.index.values
y = df['A1'].values
p = fit_poly_model_xr(x, y, 1, plot=None, ax=None, return_just_p=True)
fit_label = r'Fitted line, slope: {:.2f} mm$\cdot$km$^{{-1}}$'.format(p[0] * -1000)
fit_poly_model_xr(x,y,1,plot='manual', ax=ax, fit_label=fit_label)
ax.set_ylabel('PWV annual amplitude [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_yticks(np.arange(1, 6, 1))
if add_a1a2:
ax.set_xlabel('')
else:
ax.set_xlabel('GNSS station height [m a.s.l]')
ax.grid(True)
ax.legend(prop={'size': fontsize-3})
if add_a1a2:
# convert to percent:
df['A2A1'] = df['A2A1'].mul(100)
df['A2A1std'] = df['A2A1std'].mul(100)
ax = sns.scatterplot(data=df, y='A2A1', x='alt',
hue='Location', ax=axes[1],
legend=True, palette=cdict,
s=100, zorder=20)
x_coords = []
y_coords = []
colors = []
# ax.legend(prop={'size':fontsize+4}, fontsize=fontsize)
for point_pair in ax.collections:
colors.append(point_pair.get_facecolor())
for x, y in point_pair.get_offsets():
x_coords.append(x)
y_coords.append(y)
ax.errorbar(x_coords, y_coords,
yerr=df['A2A1std'].values, ecolor=colors[0][:,0:-1],
ls='', capsize=None, fmt=" ")#, zorder=-1)
df_upper = df.iloc[9:]
y = df_upper['A2A1'].values
x = df_upper.index.values
p = fit_poly_model_xr(x, y, 1, return_just_p=True)
fit_label = r'Fitted line, slope: {:.1f} %$\cdot$km$^{{-1}}$'.format(p[0] * 1000)
p = fit_poly_model_xr(x, y, 1, plot='manual', ax=ax,
return_just_p=False, color='r',
fit_label=fit_label)
df_lower = df.iloc[:11]
mean = df_lower['A2A1'].mean()
std = df_lower['A2A1'].std()
stderr = std / np.sqrt(len(df_lower))
ci = 1.96 * stderr
ax.hlines(xmin=df_lower.index.min(), xmax=df_lower.index.max(), y=mean,
color='k', label='Mean ratio: {:.1f} %'.format(mean))
ax.fill_between(df_lower.index.values, mean + ci, mean - ci, color="#b9cfe7", edgecolor=None, alpha=0.6)
# y = df_lower['A2A1'].values
# x = df_lower.index.values
# p = fit_poly_model_xr(x, y, 1, return_just_p=True)
# fit_label = 'Linear Fit intercept: {:.2f} %'.format(p[1])
# p = fit_poly_model_xr(x, y, 1, plot='manual', ax=ax,
# return_just_p=False, color='k',
# fit_label=fit_label)
# arrange the legend a bit:
handles, labels = ax.get_legend_handles_labels()
h_stns = handles[1:4]
l_stns = labels[1:4]
h_fits = [handles[0] , handles[-1]]
l_fits = [labels[0], labels[-1]]
ax.legend(handles=h_fits+h_stns, labels=l_fits+l_stns, loc='upper left', prop={'size':fontsize-3})
ax.set_ylabel('PWV semi-annual to annual amplitude ratio [%]', fontsize=fontsize)
ax.set_xlabel('GNSS station height [m a.s.l]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.grid(True)
ax.set_yticks(np.arange(0, 100, 20))
fig.tight_layout()
if save:
filename = 'pwv_peak_amplitude_altitude.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_peak_hour_distance(path=work_yuval, season='JJA',
remove_station='dsea', fontsize=22, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import groupby_half_hour_xr
from aux_gps import xr_reindex_with_date_range
import xarray as xr
import pandas as pd
import seaborn as sns
import numpy as np
from sklearn.metrics import r2_score
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw.load()
pw = pw.sel(time=pw['time.season'] == season)
pw = pw.map(xr_reindex_with_date_range)
df = groupby_half_hour_xr(pw)
halfs = [df.isel(half_hour=x)['half_hour'] for x in df.argmax().values()]
names = [x for x in df]
dfh = pd.DataFrame(halfs, index=names)
geo = produce_geo_gnss_solved_stations(
add_distance_to_coast=True, plot=False)
geo['phase'] = dfh
geo = geo.dropna()
groups = group_sites_to_xarray(upper=False, scope='diurnal')
geo.loc[groups.sel(group='coastal').values, 'group'] = 'coastal'
geo.loc[groups.sel(group='highland').values, 'group'] = 'highland'
geo.loc[groups.sel(group='eastern').values, 'group'] = 'eastern'
fig, ax = plt.subplots(figsize=(14, 10))
ax.grid()
if remove_station is not None:
removed = geo.loc[remove_station].to_frame().T
geo = geo.drop(remove_station, axis=0)
# lnall = sns.scatterplot(data=geo.loc[only], x='distance', y='phase', ax=ax, hue='group', s=100)
# geo['phase'] = pd.to_timedelta(geo['phase'], unit='H')
coast = geo[geo['group'] == 'coastal']
yerr = 1.0
lncoast = ax.errorbar(x=coast.loc[:,
'distance'],
y=coast.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='b')
# lncoast = ax.scatter(coast.loc[:, 'distance'], coast.loc[:, 'phase'], color='b', s=50)
highland = geo[geo['group'] == 'highland']
# lnhighland = ax.scatter(highland.loc[:, 'distance'], highland.loc[:, 'phase'], color='brown', s=50)
lnhighland = ax.errorbar(x=highland.loc[:,
'distance'],
y=highland.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='brown')
eastern = geo[geo['group'] == 'eastern']
# lneastern = ax.scatter(eastern.loc[:, 'distance'], eastern.loc[:, 'phase'], color='green', s=50)
lneastern = ax.errorbar(x=eastern.loc[:,
'distance'],
y=eastern.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='green')
lnremove = ax.scatter(
removed.loc[:, 'distance'], removed.loc[:, 'phase'], marker='x', color='k', s=50)
ax.legend([lncoast,
lnhighland,
lneastern,
lnremove],
['Coastal stations',
'Highland stations',
'Eastern stations',
'DSEA station'],
fontsize=fontsize)
params = np.polyfit(geo['distance'].values, geo.phase.values, 1)
params2 = np.polyfit(geo['distance'].values, geo.phase.values, 2)
x = np.linspace(0, 210, 100)
y = np.polyval(params, x)
y2 = np.polyval(params2, x)
r2 = r2_score(geo.phase.values, np.polyval(params, geo['distance'].values))
ax.plot(x, y, color='k')
textstr = '\n'.join([r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.5, 0.95, textstr, transform=ax.transAxes, fontsize=fontsize,
verticalalignment='top', bbox=props)
# ax.plot(x,y2, color='green')
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel('Distance from shore [km]', fontsize=fontsize)
ax.set_ylabel('Peak hour [UTC]', fontsize=fontsize)
# add sunrise UTC hour
ax.axhline(16.66, color='tab:orange', linewidth=2)
# change yticks to hours minuets:
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_yticklabels()]
labels = [pd.to_timedelta(float(x), unit='H') for x in labels]
labels = ['{}:{}'.format(x.components[1], x.components[2])
if x.components[2] != 0 else '{}:00'.format(x.components[1]) for x in labels]
ax.set_yticklabels(labels)
fig.canvas.draw()
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if save:
filename = 'pw_peak_distance_shore.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_monthly_variability_heatmap_from_pwv_anomalies(load_path=work_yuval,
thresh=50, save=True,
fontsize=16,
sort_by=['groups_annual', 'alt']):
"""sort_by=['group_annual', 'lat'], ascending=[1,0]"""
import xarray as xr
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from calendar import month_abbr
from PW_stations import produce_geo_gnss_solved_stations
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 1]).index
# anoms = xr.load_dataset(
# load_path /
# 'GNSS_PW_monthly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
anoms = xr.load_dataset(
load_path /
'GNSS_PW_monthly_anoms_thresh_{:.0f}.nc'.format(thresh))
df = anoms.groupby('time.month').std().to_dataframe()
# sites = group_sites_to_xarray(upper=True, scope='annual').T
# sites_flat = [x.lower() for x in sites.values.flatten() if isinstance(x, str)]
# df = df[sites_flat]
# cols = [x for x in sites if x in df.columns]
df = df[sites]
df.columns = [x.upper() for x in df.columns]
fig = plt.figure(figsize=(14, 10))
grid = plt.GridSpec(
2, 1, height_ratios=[
2, 1], hspace=0)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
cbar_ax = fig.add_axes([0.91, 0.37, 0.02, 0.62]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
cmap='Reds',
vmin=df.min().min(),
vmax=df.max().max(),
annot=True,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': 'PWV anomalies STD [mm]'},
annot_kws={'fontsize': fontsize}, xticklabels=False)
cbar_ax.set_ylabel('PWV anomalies STD [mm]', fontsize=fontsize)
cbar_ax.tick_params(labelsize=fontsize)
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(
bottom='off',
labelbottom='off',
labelsize=fontsize)
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=fontsize)
df_mean = df.T.mean()
df_mean = df_mean.to_frame()
df_mean[1] = [month_abbr[x] for x in range(1, 13)]
df_mean.columns = ['std', 'month']
g = sns.barplot(data=df_mean, x='month', y='std', ax=ax_group, palette='Reds',
hue='std', dodge=False, linewidth=2.5)
g.legend_.remove()
ax_group.set_ylabel('PWV anomalies STD [mm]', fontsize=fontsize)
ax_group.grid(color='k', linestyle='--',
linewidth=1.5, alpha=0.5, axis='y')
ax_group.xaxis.set_tick_params(labelsize=fontsize)
ax_group.yaxis.set_tick_params(labelsize=fontsize)
ax_group.set_xlabel('', fontsize=fontsize)
# df.T.mean().plot(ax=ax_group, kind='bar', color='k', fontsize=fontsize, rot=0)
fig.tight_layout()
fig.subplots_adjust(right=0.906)
if save:
filename = 'pw_anoms_monthly_variability_heatmap.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_monthly_means_anomalies_with_station_mean(load_path=work_yuval,
thresh=50, save=True,
anoms=None, agg='mean',
fontsize=16, units=None,
remove_stations=['nizn', 'spir'],
sort_by=['groups_annual', 'lat']):
import xarray as xr
import seaborn as sns
from palettable.scientific import diverging as divsci
import numpy as np
import matplotlib.dates as mdates
import pandas as pd
from aux_gps import anomalize_xr
from PW_stations import produce_geo_gnss_solved_stations
sns.set_style('whitegrid')
sns.set_style('ticks')
div_cmap = divsci.Vik_20.mpl_colormap
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 0]).index
if anoms is None:
# anoms = xr.load_dataset(
# load_path /
# 'GNSS_PW_monthly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
anoms = xr.load_dataset(
load_path /
'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
anoms = anomalize_xr(anoms, 'MS', units=units)
if remove_stations is not None:
anoms = anoms[[x for x in anoms if x not in remove_stations]]
df = anoms.to_dataframe()[:'2019']
# sites = group_sites_to_xarray(upper=True, scope='annual').T
# sites_flat = [x.lower() for x in sites.values.flatten() if isinstance(x, str)]
# df = df[sites_flat]
cols = [x for x in sites if x in df.columns]
df = df[cols]
df.columns = [x.upper() for x in df.columns]
weights = df.count(axis=1).shift(periods=-1, freq='15D').astype(int)
fig = plt.figure(figsize=(20, 10))
grid = plt.GridSpec(
2, 1, height_ratios=[
2, 1], hspace=0.0225)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
cbar_ax = fig.add_axes([0.95, 0.43, 0.0125, 0.45]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
center=0.0,
cmap=div_cmap,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': 'PWV anomalies [mm]'}, xticklabels=False)
cbar_ax.set_ylabel('PWV anomalies [mm]', fontsize=fontsize-4)
cbar_ax.tick_params(labelsize=fontsize)
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(
bottom='off', labelbottom='off', labelsize=fontsize)
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=fontsize)
ax_heat.set_xlabel('')
if agg == 'mean':
ts = df.T.mean().shift(periods=-1, freq='15D')
elif agg == 'median':
ts = df.T.median().shift(periods=-1, freq='15D')
ts.index.name = ''
# dt_as_int = [x for x in range(len(ts.index))]
# xticks_labels = ts.index.strftime('%Y-%m').values[::6]
# xticks = dt_as_int[::6]
# xticks = ts.index
# ts.index = dt_as_int
ts.plot(ax=ax_group, color='k', fontsize=fontsize, lw=2)
barax = ax_group.twinx()
barax.bar(ts.index, weights.values, width=35, color='k', alpha=0.2)
barax.yaxis.set_major_locator(ticker.MaxNLocator(6))
barax.set_ylabel('Stations [#]', fontsize=fontsize-4)
barax.tick_params(labelsize=fontsize)
ax_group.set_xlim(ts.index.min(), ts.index.max() +
pd.Timedelta(15, unit='D'))
ax_group.set_ylabel('PWV {} anomalies [mm]'.format(agg), fontsize=fontsize-4)
# set ticks and align with heatmap axis (move by 0.5):
# ax_group.set_xticks(dt_as_int)
# offset = 1
# ax_group.xaxis.set(ticks=np.arange(offset / 2.,
# max(dt_as_int) + 1 - min(dt_as_int),
# offset),
# ticklabels=dt_as_int)
# move the lines also by 0.5 to align with heatmap:
# lines = ax_group.lines # get the lines
# [x.set_xdata(x.get_xdata() - min(dt_as_int) + 0.5) for x in lines]
# ax_group.xaxis.set(ticks=xticks, ticklabels=xticks_labels)
# ax_group.xaxis.set(ticks=xticks)
years_fmt = mdates.DateFormatter('%Y')
ax_group.xaxis.set_major_locator(mdates.YearLocator())
ax_group.xaxis.set_major_formatter(years_fmt)
ax_group.xaxis.set_minor_locator(mdates.MonthLocator())
# ax_group.xaxis.tick_top()
# ax_group.xaxis.set_ticks_position('both')
# ax_group.tick_params(axis='x', labeltop='off', top='on',
# bottom='on', labelbottom='on')
ax_group.grid()
# ax_group.axvline('2015-09-15')
# ax_group.axhline(2.5)
# plt.setp(ax_group.xaxis.get_majorticklabels(), rotation=45 )
fig.tight_layout()
fig.subplots_adjust(right=0.946)
if save:
filename = 'pw_monthly_means_anomaly_heatmap.png'
plt.savefig(savefig_path / filename, bbox_inches='tight', pad_inches=0.1)
return ts
def plot_grp_anomlay_heatmap(load_path=work_yuval, gis_path=gis_path,
thresh=50, grp='hour', remove_grp=None, season=None,
n_clusters=4, save=True, title=False):
import xarray as xr
import seaborn as sns
import numpy as np
from PW_stations import group_anoms_and_cluster
from aux_gps import geo_annotate
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.colors import ListedColormap
from palettable.scientific import diverging as divsci
from PW_stations import produce_geo_gnss_solved_stations
div_cmap = divsci.Vik_20.mpl_colormap
dem_path = load_path / 'AW3D30'
def weighted_average(grp_df, weights_col='weights'):
return grp_df._get_numeric_data().multiply(
grp_df[weights_col], axis=0).sum() / grp_df[weights_col].sum()
df, labels_sorted, weights = group_anoms_and_cluster(
load_path=load_path, thresh=thresh, grp=grp, season=season,
n_clusters=n_clusters, remove_grp=remove_grp)
# create figure and subplots axes:
fig = plt.figure(figsize=(15, 10))
if title:
if season is not None:
fig.suptitle(
'Precipitable water {}ly anomalies analysis for {} season'.format(grp, season))
else:
fig.suptitle('Precipitable water {}ly anomalies analysis (Weighted KMeans {} clusters)'.format(
grp, n_clusters))
grid = plt.GridSpec(
2, 2, width_ratios=[
3, 2], height_ratios=[
4, 1], wspace=0.1, hspace=0)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
ax_map = fig.add_subplot(grid[0:, 1]) # plt.subplot(122)
# get the camp and zip it to groups and produce dictionary:
cmap = plt.get_cmap("Accent")
cmap = qualitative_cmap(n_clusters)
# cmap = plt.get_cmap("Set2_r")
# cmap = ListedColormap(cmap.colors[::-1])
groups = list(set(labels_sorted.values()))
palette = dict(zip(groups, [cmap(x) for x in range(len(groups))]))
label_cmap_dict = dict(zip(labels_sorted.keys(),
[palette[x] for x in labels_sorted.values()]))
cm = ListedColormap([x for x in palette.values()])
# plot heatmap and colorbar:
cbar_ax = fig.add_axes([0.57, 0.24, 0.01, 0.69]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
center=0.0,
cmap=div_cmap,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': '[mm]'})
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(top='on', labeltop='on')
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=10)
# paint ytick labels with categorical cmap:
boxes = [dict(facecolor=x, boxstyle="square,pad=0.7", alpha=0.6)
for x in label_cmap_dict.values()]
ylabels = [x for x in ax_heat.yaxis.get_ticklabels()]
for label, box in zip(ylabels, boxes):
label.set_bbox(box)
# rotate xtick_labels:
# ax_heat.set_xticklabels(ax_heat.get_xticklabels(), rotation=0,
# fontsize=10)
# plot summed groups (with weights):
df_groups = df.T
df_groups['groups'] = pd.Series(labels_sorted)
df_groups['weights'] = weights
df_groups = df_groups.groupby('groups').apply(weighted_average)
df_groups.drop(['groups', 'weights'], axis=1, inplace=True)
df_groups.T.plot(ax=ax_group, linewidth=2.0, legend=False, cmap=cm)
if grp == 'hour':
ax_group.set_xlabel('hour (UTC)')
ax_group.grid()
group_limit = ax_heat.get_xlim()
ax_group.set_xlim(group_limit)
ax_group.set_ylabel('[mm]')
# set ticks and align with heatmap axis (move by 0.5):
ax_group.set_xticks(df.index.values)
offset = 1
ax_group.xaxis.set(ticks=np.arange(offset / 2.,
max(df.index.values) + 1 -
min(df.index.values),
offset),
ticklabels=df.index.values)
# move the lines also by 0.5 to align with heatmap:
lines = ax_group.lines # get the lines
[x.set_xdata(x.get_xdata() - min(df.index.values) + 0.5) for x in lines]
# plot israel map:
ax_map = plot_israel_map(gis_path=gis_path, ax=ax_map)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
im = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = fig.colorbar(im, ax=ax_map, **cbar_kwargs)
# cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
print('getting solved GNSS israeli stations metadata...')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
gps.index = gps.index.str.upper()
gps = gps.loc[[x for x in df.columns], :]
gps['group'] = | pd.Series(labels_sorted) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
# Copyright (C) 2020 <NAME>
# Use of this source code is governed by the MIT License
###############################################################################
from . import config
from .metadata import metadata
from . import linesholder
from . import linesops
from .. import SEED_AVG, SEED_LAST, SEED_SUM, SEED_NONE, SEED_ZERO, SEED_ZFILL
import numpy as np
import pandas as pd
__all__ = ['Line', 'Lines']
def _generate(cls, bases, dct, name='', klass=None, **kwargs):
# If "name" is defined (inputs, outputs) it overrides any previous
# definition from the base clases.
# An extension can be done by using "name_extend" (inputs_extend) in which
# case the definition will be appended to that of the base classes
# In case of a redefinition, automatic mappings to the existing definitions
# (by index) will be done to ensure "instances" do still work in base
# classes when going the super route
# Manual mappings can also be defined if a definition is a dictionary like
# in:
# outputs = {'atr': 'tr'}
# In this case 'atr' is the new output and the base class had a 'tr' output
# and now whenenver 'tr' is referenced it will point to 'atr'
# Get actual lines definition and that of the bases
clsdefs = dct.get(name, ()) # new defs
# support remapping lines in subclasses
cdefs = [] # collect final single new definitions
defmappings = {} # collect any mappings
# one can specify a single input (str) or single remapping (dict)
if isinstance(clsdefs, (dict, str,)):
clsdefs = [clsdefs] # unpacked below
for clsdef in clsdefs:
# if a "line" def contains a list or a tuple, it is expected to have 2
# elements defining a remapping. key=>val where key is the new name and
# value is the old name, defined in the base class. Make it a dict to
# support the general case in which it was already a dict
if isinstance(clsdef, (list, tuple,)):
clsdef = dict([clsdef]) # and go to dict case
if isinstance(clsdef, dict):
cdefs.extend(list(clsdef))
defmappings.update(clsdef) # store mapping to genreate properties
else: # assume str or else detect and raise exception if not
cdefs.append(clsdef)
# After having parsed mappings in dict form, create the actual definition
clsdefs = tuple(cdefs)
# Gather base definitions - needed here to do mappings
lbases = (getattr(base, name, ()) for base in bases)
lbdefs = tuple(ldef for lbase in lbases for ldef in lbase)
if clsdefs: # a new definition was made
final_defs = clsdefs
for clsdef, lbdef in zip(clsdefs, lbdefs): # create automappings
if lbdef in clsdefs: # cannot remap if exists in current defs
continue
defmappings.setdefault(clsdef, lbdef)
else:
# no new definition, see if _extend has been put in place
clsdefs = dct.get(name + '_extend', ()) # new defs
if isinstance(clsdefs, str):
clsdefs = (clsdefs,) # unpacked below
final_defs = lbdefs + clsdefs
# removed remapped lines from definitions
remapped = list(defmappings.values())
# retain last inputs defs - super readable and pythonic one-liner
lines = tuple(reversed(list(dict.fromkeys(reversed(final_defs)))))
lines = tuple(x for x in lines if x not in remapped)
setattr(cls, name, lines) # install all lines defs
# Create base dictionary for subclassing via typ
clsdct = dict(__module__=cls.__module__, __slots__=list(lines))
# Create properties for attribute retrieval of old line
propdct = {}
for name, alias in defmappings.items():
def get_alias_to_name(self):
return getattr(self, name)
def set_alias_to_name(self, value):
setattr(self, name, value)
propdct[alias] = property(get_alias_to_name, set_alias_to_name)
clsdct.update(propdct) # add properties for alias remapping
clsname = name.capitalize() + cls.__name__ # decide name
return type(clsname, (klass,), clsdct) # subclass and return
def binary_op(name):
def real_binary_op(self, other, *args, **kwargs):
# Executes a binary operation where self is guaranteed to have a
# _series attribute but other isn't. Example > or +
# The minimum period is taken into account to only apply the operation
# to the proper range and store in the result in that range. The rest
# is a bunch of leading 'NaN'
# See if other has a minperiod, else default to 1
minperiod = max(self._minperiod, getattr(other, '_minperiod', 1))
minidx = minperiod - 1 # minperiod is 1-based, easier for location
# Prepare a result filled with 'Nan'
result = pd.Series(np.nan, index=self._series.index)
# Get and prepare the other operand
other = getattr(other, '_series', other) # get real other operand
other = other[minidx:] if isinstance(other, pd.Series) else other
# Get the operation, exec and store
binop = getattr(self._series[minidx:], name) # get op from series
result[minidx:] = r = binop(other, *args, **kwargs) # exec / store
result = result.astype(r.dtype, copy=False)
return self._clone(result, period=minperiod) # ret new obj w minperiod
linesops.install_cls(name=name, attr=real_binary_op)
def standard_op(name, parg=None, sargs=False, skwargs=False):
def real_standard_op(self, *args, **kwargs):
# Prepare a result filled with 'Nan'
result = | pd.Series(np.nan, index=self._series.index) | pandas.Series |
from __future__ import division
import json
import re
import time
from pandas import DataFrame, isnull, notnull, to_datetime
from pandas_datareader._utils import RemoteDataError
from pandas_datareader.base import _DailyBaseReader
class YahooDailyReader(_DailyBaseReader):
"""
Returns DataFrame of with historical over date range,
start to end.
To avoid being penalized by Yahoo! Finance servers, pauses between
downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, int, date, datetime, Timestamp
Starting date. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980'). Defaults to
5 years before current date.
end : string, int, date, datetime, Timestamp
Ending date
retry_count : int, default 3
Number of times to retry query request.
pause : int, default 0.1
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
session : Session, default None
requests.sessions.Session instance to be used
adjust_price : bool, default False
If True, adjusts all prices in hist_data ('Open', 'High', 'Low',
'Close') based on 'Adj Close' price. Adds 'Adj_Ratio' column and drops
'Adj Close'.
ret_index : bool, default False
If True, includes a simple return index 'Ret_Index' in hist_data.
chunksize : int, default 25
Number of symbols to download consecutively before intiating pause.
interval : string, default 'd'
Time interval code, valid values are 'd' for daily, 'w' for weekly,
'm' for monthly.
get_actions : bool, default False
If True, adds Dividend and Split columns to dataframe.
adjust_dividends: bool, default true
If True, adjusts dividends for splits.
"""
def __init__(
self,
symbols=None,
start=None,
end=None,
retry_count=3,
pause=0.1,
session=None,
adjust_price=False,
ret_index=False,
chunksize=1,
interval="d",
get_actions=False,
adjust_dividends=True,
):
super(YahooDailyReader, self).__init__(
symbols=symbols,
start=start,
end=end,
retry_count=retry_count,
pause=pause,
session=session,
chunksize=chunksize,
)
# Ladder up the wait time between subsequent requests to improve
# probability of a successful retry
self.pause_multiplier = 2.5
self.headers = {
"Connection": "keep-alive",
"Expires": str(-1),
"Upgrade-Insecure-Requests": str(1),
# Google Chrome:
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"
),
}
self.adjust_price = adjust_price
self.ret_index = ret_index
self.interval = interval
self._get_actions = get_actions
if self.interval not in ["d", "wk", "mo", "m", "w"]:
raise ValueError(
"Invalid interval: valid values are 'd', 'wk' and 'mo'. 'm' and 'w' "
"have been implemented for backward compatibility. 'v' has been moved "
"to the yahoo-actions or yahoo-dividends APIs."
)
elif self.interval in ["m", "mo"]:
self.pdinterval = "m"
self.interval = "mo"
elif self.interval in ["w", "wk"]:
self.pdinterval = "w"
self.interval = "wk"
self.interval = "1" + self.interval
self.adjust_dividends = adjust_dividends
@property
def get_actions(self):
return self._get_actions
@property
def url(self):
return "https://finance.yahoo.com/quote/{}/history"
# Test test_get_data_interval() crashed because of this issue, probably
# whole yahoo part of package wasn't
# working properly
def _get_params(self, symbol):
# This needed because yahoo returns data shifted by 4 hours ago.
four_hours_in_seconds = 14400
unix_start = int(time.mktime(self.start.timetuple()))
unix_start += four_hours_in_seconds
day_end = self.end.replace(hour=23, minute=59, second=59)
unix_end = int(time.mktime(day_end.timetuple()))
unix_end += four_hours_in_seconds
params = {
"period1": unix_start,
"period2": unix_end,
"interval": self.interval,
"frequency": self.interval,
"filter": "history",
"symbol": symbol,
}
return params
def _read_one_data(self, url, params):
""" read one data from specified symbol """
symbol = params["symbol"]
del params["symbol"]
url = url.format(symbol)
resp = self._get_response(url, params=params)
ptrn = r"root\.App\.main = (.*?);\n}\(this\)\);"
try:
j = json.loads(re.search(ptrn, resp.text, re.DOTALL).group(1))
data = j["context"]["dispatcher"]["stores"]["HistoricalPriceStore"]
except KeyError:
msg = "No data fetched for symbol {} using {}"
raise RemoteDataError(msg.format(symbol, self.__class__.__name__))
# price data
prices = DataFrame(data["prices"])
prices.columns = [col.capitalize() for col in prices.columns]
prices["Date"] = to_datetime(to_datetime(prices["Date"], unit="s").dt.date)
if "Data" in prices.columns:
prices = prices[prices["Data"].isnull()]
prices = prices[["Date", "High", "Low", "Open", "Close", "Volume", "Adjclose"]]
prices = prices.rename(columns={"Adjclose": "Adj Close"})
prices = prices.set_index("Date")
prices = prices.sort_index().dropna(how="all")
if self.ret_index:
prices["Ret_Index"] = _calc_return_index(prices["Adj Close"])
if self.adjust_price:
prices = _adjust_prices(prices)
# dividends & splits data
if self.get_actions and data["eventsData"]:
actions = DataFrame(data["eventsData"])
actions.columns = [col.capitalize() for col in actions.columns]
actions["Date"] = to_datetime(
to_datetime(actions["Date"], unit="s").dt.date
)
types = actions["Type"].unique()
if "DIVIDEND" in types:
divs = actions[actions.Type == "DIVIDEND"].copy()
divs = divs[["Date", "Amount"]].reset_index(drop=True)
divs = divs.set_index("Date")
divs = divs.rename(columns={"Amount": "Dividends"})
prices = prices.join(divs, how="outer")
if "SPLIT" in types:
def split_ratio(row):
if float(row["Numerator"]) > 0:
if ":" in row["Splitratio"]:
n, m = row["Splitratio"].split(":")
return float(m) / float(n)
else:
return eval(row["Splitratio"])
else:
return 1
splits = actions[actions.Type == "SPLIT"].copy()
splits["SplitRatio"] = splits.apply(split_ratio, axis=1)
splits = splits.reset_index(drop=True)
splits = splits.set_index("Date")
splits["Splits"] = splits["SplitRatio"]
prices = prices.join(splits["Splits"], how="outer")
if "DIVIDEND" in types and not self.adjust_dividends:
# dividends are adjusted automatically by Yahoo
adj = (
prices["Splits"].sort_index(ascending=False).fillna(1).cumprod()
)
prices["Dividends"] = prices["Dividends"] / adj
return prices
def _adjust_prices(hist_data, price_list=None):
"""
Return modifed DataFrame with adjusted prices based on
'Adj Close' price. Adds 'Adj_Ratio' column.
"""
if price_list is None:
price_list = "Open", "High", "Low", "Close"
adj_ratio = hist_data["Adj Close"] / hist_data["Close"]
data = hist_data.copy()
for item in price_list:
data[item] = hist_data[item] * adj_ratio
data["Adj_Ratio"] = adj_ratio
del data["Adj Close"]
return data
def _calc_return_index(price_df):
"""
Return a returns index from a input price df or series. Initial value
(typically NaN) is set to 1.
"""
df = price_df.pct_change().add(1).cumprod()
mask = notnull(df.iloc[1]) & | isnull(df.iloc[0]) | pandas.isnull |
from __future__ import division
from laspy.file import File
import numpy as np
import pandas as pd
import time, math
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print('%s function took %0.3f ms' % (f.func_name, (time2-time1)*1000.0))
return ret
return wrap
@timing
def loadLAS2XYZ(filepath):
'''
Function to load in console the pointcloud of a LAS file
:param filepath: filepath of the LAS file
:return: xyz array containing coordinate of the points
'''
print('Start loading...')
inFile = File(filepath, mode='r')
coords = np.vstack((inFile.x, inFile.y, inFile.z)).transpose()
print('Data loaded')
return coords
@timing
def loadLAS2XYZAIR(filepath):
'''
Function to load in console the pointcloud of a LAS file with points attributes
:param filepath: filepath of the LAS file
:return: xyz array containing coordinate of the points
'''
print('Start loading...')
inFile = File(filepath, mode='r')
coords = np.vstack((inFile.x, inFile.y, inFile.z, inFile.amplitude, inFile.Intensity, inFile.reflectance, inFile.num_returns)).transpose()
print('Data loaded')
return coords
def xyz2binarray(xyz, xstart, xend, ystart, yend, nx=1000, ny=1000, method='min'):
'''
Function to extract projected grid on the XY-plane of point cloud statistics
:param xyz: a 3 column vector containing the point location in cartesian coordinate system
:param xstart: x-minimum of the grid
:param xend: x-maximum of the grid
:param ystart: y-minimm of the grid
:param yend: y-maximum of the grid
:param nx: number of grid cell in the x directions
:param ny: number of grid cell in the y directions
:param method: statistics to extract from each gridcell
:return: returns a 2D array, xmin, and ymax
TO IMPLEMENT:
- being able to choose to input dx dy instead of nx ny
'''
binned, bins_x, bins_y, bin_xmin, bin_ymin = binData2D(xyz, xstart, xend, ystart, yend, nx, ny)
if method == 'min':
ret = binned.Z.min().unstack().T # .iloc[::-1]
elif method == 'max':
ret = binned.Z.max().unstack().T # .iloc[::-1]
elif method == 'mean':
ret = binned.Z.mean().unstack().T # .iloc[::-1]
elif method == 'median':
ret = binned.Z.median().unstack().T # .iloc[::-1]
elif method == 'count':
ret = binned.Z.count().unstack().T # .iloc[::-1]
xmin = bins_x[ret.columns.min().astype(int)]
ymax = bins_y[ret.index.get_values().max().astype(int)]
newIndy = np.arange(ret.index.get_values().min(), ret.index.get_values().max() + 1)
newIndx = np.arange(ret.columns.min(), ret.columns.max() + 1)
a = ret.reindex(newIndy, newIndx)
mat = np.zeros((ny, nx)) * np.nan
mat[bin_ymin:bin_ymin + a.shape[0], bin_xmin:bin_xmin + a.shape[1]] = a
return mat[::-1], xmin, ymax
def LAS2txt(filepath,newfile):
'''
Function to convert a pointcloud save in LAS format into a .txt format
:param filepath: filepath of the LAS file
:param newfile: name of the new file
:return: save data into a text file
'''
inFile = File(filepath, mode='r')
coords = np.vstack((inFile.x, inFile.y, inFile.z)).transpose()
if newfile[-4] != '.txt':
newfile = newfile + '.txt'
np.savetxt(newfile,coords)
print('File saved: ' + newfile)
def xyz_subsample(xyz, length_out):
'''
Function to subsample a 3 columm matrix.
:param xyz: 3 column matrix
:param length_out: number of sample to output
:return: a 3 column matrix
'''
ind = np.random.randint(0,xyz.shape[0],length_out)
xyz_new = xyz[ind,:]
print('xyz subsampled!')
return xyz_new
def xyz_stat(xyz):
print('Shape of array: ' + str(xyz.shape))
print('Min of xyz: ')
print(np.min(xyz, axis=0))
print('Max of xyz: ')
print(np.max(xyz, axis=0))
print('Mean of xyz: ')
print(np.mean(xyz, axis=0))
print('Extent')
print(np.max(xyz, axis=0)-np.min(xyz, axis=0))
def trans(xyz,trans_vec):
'''
Function to translate an xyz 3 column matrix
:param xyz: a 3 column matrix
:param trans_vec: a translation vector of length 3
:return: a 3 column matrix translated
'''
xyz[:,0] = xyz[:,0] - trans_vec[0]
xyz[:,1] = xyz[:,1] - trans_vec[1]
xyz[:,2] = xyz[:,2] - trans_vec[2]
return xyz
def translate_coords(coords, xyz_trans = None ,ask = True):
'''
Function to translate a point cloud
:param coords: an xyz array
:param xyz_trans: vector of translation in [x,y,z]
:param ask: if True (default) brings an interactive console for approving the translation
:return: translated xyz array
'''
if xyz_trans is None:
xyz_trans = [coords[:,0].min(), coords[:,1].min(), coords[:,2].min()]
if ask is True:
print('Default translation:')
print(str(xyz_trans) + '\n')
res = input('Do you want to translate? 0/1')
if res is 0:
print('No Translation applied')
return None
if res is 1:
return trans(coords, xyz_trans)
if ask is not True:
return trans(coords, xyz_trans)
def truncate(xyz, Xextent, Yextent):
'''
Function to truncate a point cloud with a rectangular shape
:param xyz: a 3 column matrix containing the points coordinate
:param Xextent: a vector of Xmin and Xmax (e.g. [Xmin,Xmax])
:param Yextent: a vector of Ymin and Ymax (e.g. [Ymin, Ymax])
:return: a 3 colum matrix containing the points coordiante within the specified rectangle
'''
xcut = xyz[xyz[:,0]>=Xextent[0]]
xcut1 = xcut[xcut[:,0]<Xextent[1]]
ycut = xcut1[xcut1[:,1]>=Yextent[0]]
ycut1 = ycut[ycut[:,1]<Yextent[1]]
return ycut1
def cart2cyl(xyz, xy_axis=None):
'''
function to convert cartesian coordinates to cylindrical
:param xyz: a 3-column matrix containing the points coordinates expressed in a cartesian system
:param xy_axis: an array of x and y coordinate for the center of the new cylindrical coordinate
:return: a 3 colum matrix with the point coordinates are expressed in a cylindrical coordinate system
'''
if xy_axis is not None:
xyz[:,0] = xyz[:,0] - xy_axis[0]
xyz[:,1] = xyz[:,1] - xy_axis[1]
rho = np.sqrt(xyz[:,0]**2 + xyz[:,1]**2)
phi = np.arctan2(xyz[:,1], xyz[:,0])
rpz = np.vstack((rho,phi,xyz[:,2]))
return rpz.transpose()
def cyl2cart(rpz):
'''
convert cylindrical coordinate to cartesian
:param rpz: a 3-column matrix containing the points coordinates expressed in a cylindrical system
:return: a 3-column matrix containing the points coordinates expressed in a cartesian system
'''
x = rpz[:,0] * np.cos(rpz[:,1])
y = rpz[:,0] * np.sin(rpz[:,1])
xyz = np.vstack((x,y,rpz[:,2]))
return xyz.transpose()
def rotate_cloud(xyz, angle, center_coord=None):
'''
Function to rotate a point cloud
:param xyz: n*3 array containing point cloud coordinates in a cartesian system
:param angle: angle of rotation in degrees
:param center_coord: tuple with xy coordiantes of the center of rotation. Default is None
:return: the rotated xyz point cloud
'''
if center_coord is None:
center_coord = [np.mean(xyz[:,0]),np.mean(xyz[:,1])]
rpz = cart2cyl(xyz, xy_axis=center_coord)
rpz[:,1] = rpz[:,1] + angle
xyz = cyl2cart(rpz)
return xyz
def get_slice(xyz, thick, dir=0, center_coord=None):
'''
Function to extract a slice of the point cloud xyz
:param xyz: n*3 array containing point cloud coordinates in a cartesian system
:param thick: thickness of the slice
:param dir: direction of the slice in degrees (default is 0)
:param center_coord: tuple with xy coordinates of the center of rotation. Default is None
:return: return slice in xyz format.
'''
if center_coord is None:
center_coord = [np.mean(xyz[:,0]),np.mean(xyz[:,1])]
print(center_coord)
if dir % 180 != 0:
xyz = rotate_cloud(xyz, (dir*math.pi/180), center_coord= center_coord)
myslice = xyz[xyz[:,0]>=-(thick/2)]
myslice = myslice[myslice[:,0]<=(thick/2)]
return myslice
def get_slice_df(df_xyz, thick, dir=0, center_coord=None):
'''
Function to extract a slice of points from a dataframe
:param xyz: n*3 array containing point cloud coordinates in a cartesian system
:param thick: thickness of the slice
:param dir: direction of the slice in degrees (default is 0)
:param center_coord: tuple with xy coordinates of the center of rotation. Default is None
:return: return slice in xyz format.
'''
df = df_xyz.copy()
df_xyz=None
if center_coord is None:
center_coord = [df['x'].mean(),df['y'].mean()]
print(center_coord)
if dir % 180 != 0:
xyz = rotate_cloud(np.array(df[['x','y','z']]), (dir*math.pi/180), center_coord = center_coord)
df[['x','y']] = xyz[:,[0,1]]
myslice = df[df.x >= - (thick / 2)]
myslice = myslice[df.x <= (thick/2)]
else:
myslice = df[df.x >= (center_coord[0] - thick / 2)]
myslice = myslice[df.x <= (center_coord[0] + thick / 2)]
myslice['x'] = myslice['x'] - center_coord[0]
myslice['y'] = myslice['y'] - center_coord[1]
print('Data Sliced')
return myslice
def center_pc_coord_df(df_xyz, center_coord=None):
if center_coord is None:
center_coord = [(df_xyz['x'].max()-df_xyz['x'].min())/2 + df_xyz['x'].min(),
(df_xyz['y'].max()-df_xyz['y'].min())/2 +df_xyz['y'].min()]
print(center_coord)
df_xyz['x'] = df_xyz['x'] - center_coord[0]
df_xyz['y'] = df_xyz['y'] - center_coord[1]
return df_xyz
@timing
def binData2D(myXYZ, xstart, xend, ystart, yend, nx, ny):
'''
Fucntion to bin a scatter point cloud (xyz) into a 2d array
:param myXYZ: xyz array containings the point cloud coordiantes
:param xstart:
:param xend:
:param ystart:
:param yend:
:param nx: number of cells along the x-axis
:param ny: number of cells along hte y-axis
:return: a group object (pandas library) with all points classified into bins
'''
# note, the division requires: from _future_ import division
x = myXYZ[:,0].ravel()
y = myXYZ[:,1].ravel()
z = myXYZ[:,2].ravel()
df = pd.DataFrame({'X' : x , 'Y' : y , 'Z' : z})
bins_x = np.linspace(xstart, xend, nx+1)
x_cuts = | pd.cut(df.X,bins_x, labels=False) | pandas.cut |
#!/usr/bin/env python
# coding: utf-8
# ## IMT 563
# ### Group 7 | Covid -19 Vaccination Info
# ### Authors - <NAME>, <NAME> and <NAME>
# In[1]:
# Importing useful packages and libraries
import pandas as pd
import numpy as np
import datetime
from datetime import datetime
import snowflake.connector
from snowflake import sqlalchemy
from snowflake.sqlalchemy import URL
from sqlalchemy import create_engine,inspect
import pytz
from calendar import monthrange
import re
from tqdm import tqdm
# In[2]:
# Establishing Snowflake Connection Parameters
engine = create_engine(URL(
account = 'tca69088',
role = 'SYSADMIN',
user = 'Group7',
password = '<PASSWORD>!',
database = 'IMT_DB',
schema = 'PUBLIC',
))
# ### Importing Files
# #### Comment - HERE I HAVE NOT REMOVED UNASSIGNED VALUES
# In[3]:
### Writing a function to do the same
### - So the function should take in file_path,sheet_name,list_non_group
def load_wa_chd(file_path,sheet_name,list_columns,list_group,agg_value):
df = pd.ExcelFile(file_path)
df_s = pd.read_excel(df, sheet_name = sheet_name)
df_s = df_s[list_columns]
df_s = df_s.groupby(list_group,as_index=False)[agg_value].sum()
df_s = df_s[df_s['County'] != 'Unassigned']
df_s = df_s.reset_index(drop=True)
return df_s
# #### 7th MARCH CASES
# In[4]:
file_path_wa_c_7 = '/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_WA_COVID19_Cases_Hospitalizations_Deaths (2).xlsx'
sheet_name_wa_c_7 = 'Cases'
list_columns_wa_c_7 = ['County','TotalCases']
list_group_wa_c_7 = ['County']
agg_value_c = 'TotalCases'
wa_chd_7_cases = load_wa_chd(file_path_wa_c_7,sheet_name_wa_c_7,list_columns_wa_c_7,list_group_wa_c_7
,agg_value_c)
# #### 7th MARCH HOSPITALIZATIONS
# In[5]:
file_path_wa_h_7 = '/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_WA_COVID19_Cases_Hospitalizations_Deaths (2).xlsx'
sheet_name_wa_h_7 = 'Hospitalizations'
list_columns_wa_h_7 = ['County','Hospitalizations']
list_group_wa_h_7 = ['County']
agg_value_h = 'Hospitalizations'
wa_chd_7_hospitalizations = load_wa_chd(file_path_wa_h_7,sheet_name_wa_h_7,list_columns_wa_h_7,list_group_wa_h_7
,agg_value_h)
# #### 7th MARCH DEATHS
# In[6]:
file_path_wa_d_7 = '/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_WA_COVID19_Cases_Hospitalizations_Deaths (2).xlsx'
sheet_name_wa_d_7 = 'Deaths'
list_columns_wa_d_7 = ['County','Deaths']
list_group_wa_d_7 = ['County']
agg_value_d = 'Deaths'
wa_chd_7_deaths = load_wa_chd(file_path_wa_d_7,sheet_name_wa_d_7,list_columns_wa_d_7,list_group_wa_d_7
,agg_value_d)
# #### 21st FEB CASES
# In[7]:
file_path_wa_c_21 = '/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_WA_COVID19_Cases_Hospitalizations_Deaths.xlsx'
sheet_name_wa_c_21 = 'Cases'
list_columns_wa_c_21 = ['County','TotalCases']
list_group_wa_c_21 = ['County']
agg_value_c = 'TotalCases'
wa_chd_21_cases = load_wa_chd(file_path_wa_c_21,sheet_name_wa_c_21,list_columns_wa_c_21,list_group_wa_c_21
,agg_value_c)
# #### 21st FEB HOSPITALIZATIONS
# In[8]:
file_path_wa_h_21 = '/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_WA_COVID19_Cases_Hospitalizations_Deaths.xlsx'
sheet_name_wa_h_21 = 'Hospitalizations'
list_columns_wa_h_21 = ['County','Hospitalizations']
list_group_wa_h_21 = ['County']
agg_value_h = 'Hospitalizations'
wa_chd_21_hospitalizations = load_wa_chd(file_path_wa_h_21,sheet_name_wa_h_21,list_columns_wa_h_21,list_group_wa_h_21
,agg_value_h)
# #### 21st FEB DEATHS
# In[9]:
file_path_wa_d_21 = '/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_WA_COVID19_Cases_Hospitalizations_Deaths.xlsx'
sheet_name_wa_d_21 = 'Deaths'
list_columns_wa_d_21 = ['County','Deaths']
list_group_wa_d_21 = ['County']
agg_value_d = 'Deaths'
wa_chd_21_deaths = load_wa_chd(file_path_wa_d_21,sheet_name_wa_d_21,list_columns_wa_d_21,list_group_wa_d_21
,agg_value_d)
# ### Zip Level Data
# In[10]:
def load_wa_zip_chd(file_path,sheet_name,list_columns):
df = pd.ExcelFile(file_path)
df_s = pd.read_excel(df, sheet_name = sheet_name)
df_s = df_s[list_columns]
df_s = df_s.drop(0)
df_s.reset_index(drop=True,inplace=True)
return df_s
# #### 7th March
# In[11]:
file_path_wa_zip_c_7 = "/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_overall-counts-rates-geography-mar-3.xlsx"
sheet_name_wa_zip_c_7 = 'ZIP'
list_columns_wa_zip_c_7 =['Location_Name','Positives']
wa_chd_zip_7_cases = load_wa_zip_chd(file_path_wa_zip_c_7,sheet_name_wa_zip_c_7,list_columns_wa_zip_c_7)
# In[12]:
file_path_wa_zip_d_7 = "/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_overall-counts-rates-geography-mar-3.xlsx"
sheet_name_wa_zip_d_7 = 'ZIP'
list_columns_wa_zip_d_7 =['Location_Name','Deaths']
wa_chd_zip_7_deaths = load_wa_zip_chd(file_path_wa_zip_d_7,sheet_name_wa_zip_d_7,list_columns_wa_zip_d_7)
# In[13]:
file_path_wa_zip_h_7 = "/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_overall-counts-rates-geography-mar-3.xlsx"
sheet_name_wa_zip_h_7 = 'ZIP'
list_columns_wa_zip_h_7 =['Location_Name','Hospitalizations']
wa_chd_zip_7_hospitalizations = load_wa_zip_chd(file_path_wa_zip_h_7,sheet_name_wa_zip_h_7,list_columns_wa_zip_h_7)
# #### 21st Feb
# In[14]:
file_path_wa_zip_c_21 = "/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_overall-counts-rates-geography-feb-17 (1).xlsx"
sheet_name_wa_zip_c_21 = 'ZIP'
list_columns_wa_zip_c_21 =['Location_Name','Positives']
wa_chd_zip_21_cases = load_wa_zip_chd(file_path_wa_zip_c_21,sheet_name_wa_zip_c_21,list_columns_wa_zip_c_21)
# In[15]:
file_path_wa_zip_d_21 = "/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_overall-counts-rates-geography-feb-17 (1).xlsx"
sheet_name_wa_zip_d_21 = 'ZIP'
list_columns_wa_zip_d_21 =['Location_Name','Deaths']
wa_chd_zip_21_deaths = load_wa_zip_chd(file_path_wa_zip_d_21,sheet_name_wa_zip_d_21,list_columns_wa_zip_d_21)
# In[16]:
file_path_wa_zip_h_21 = "/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_overall-counts-rates-geography-feb-17 (1).xlsx"
sheet_name_wa_zip_h_21 = 'ZIP'
list_columns_wa_zip_h_21 =['Location_Name','Hospitalizations']
wa_chd_zip_21_hospitalizations = load_wa_zip_chd(file_path_wa_zip_h_21,sheet_name_wa_zip_h_21,list_columns_wa_zip_h_21)
# ### Covid County Vaccinations
# ### 7th March
# In[17]:
wa_vacc_7 = pd.read_excel(r'/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_Vaccination_County_Level_Counts.xlsx')
wa_vacc_7 = wa_vacc_7[['County','People Initiating Vaccination']]
# In[18]:
wa_vacc_7 = wa_vacc_7.drop([39,40])
# In[19]:
wa_vacc_7['County'] = wa_vacc_7['County'].apply(lambda x: x+' County')
# ### 21st February
# In[20]:
wa_vacc_21 = pd.read_excel(r'/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_Vaccination_County_Level_Counts.xlsx')
wa_vacc_21 = wa_vacc_21[['County','People Initiating Vaccination']]
# In[21]:
wa_vacc_21 = wa_vacc_21.drop([39,40])
# In[22]:
wa_vacc_21['County'] = wa_vacc_21['County'].apply(lambda x: x+' County')
# # Table Creation
# ### State_Name Creation
# In[23]:
state_data = [['1','Washington State']]
# In[24]:
df_state_name = pd.DataFrame(state_data, columns = ["State_ID", "State_Name"])
# In[25]:
state_name = df_state_name.copy()
# ### County_Name Creation
# In[26]:
county_raw = wa_chd_21_cases.copy()
county_raw.drop(columns=['TotalCases'], inplace = True)
county_raw_primary_key_list = [item for item in range(100,139)]
df_county_name = county_raw.copy()
df_county_name['State_ID'] = 1
df_county_name["County_ID"] = county_raw_primary_key_list
df_county_name = df_county_name[['County_ID','State_ID','County']]
df_county_name.rename(columns = {'County':'County_Name'},inplace = True)
county_name = df_county_name.copy()
# ### Zip_Table
# In[46]:
zip_data_raw = pd.read_excel(r'/Users/rohan20k/Desktop/data_imt_563/Washington_ZipCodes.xlsx')
zip_data_raw['County'] = zip_data_raw['County'].apply(lambda x:str(x) +' County')
zip_data_raw_merged = zip_data_raw.merge(df_county_name, left_on='County', right_on='County_Name')
df_zip_table = zip_data_raw_merged[['County_ID','Zip Code']]
zip_raw_primary_key_list = [item for item in range(1000,1732)]
df_zip_table.loc[:,'ZIP_ID'] = zip_raw_primary_key_list
df_zip_table = df_zip_table[['ZIP_ID','County_ID','Zip Code']]
zip_table = df_zip_table.rename({'Zip Code':'ZIP_Code'},axis = 1)
# ## Data Consistency
# ```
# The various tables we have -
#
# * wa_chd_7_cases
# ['County','TotalCases']
#
# * wa_chd_7_hospitalizations
# ['County','Hospitalizations']
#
# * wa_chd_7_deaths
# ['County','Deaths']
#
# * wa_chd_21_cases
# ['County','TotalCases']
#
# * wa_chd_21_hospitalizations
# ['County','Hospitalizations']
#
# * wa_chd_21_deaths
# ['County','Deaths']
#
# * wa_chd_zip_7_cases
# ['Location_Name','Positives']
#
# * wa_chd_zip_7_hospitalizations
# ['Location_Name','Hospitalizations']
#
# * wa_chd_zip_7_deaths
# ['Location_Name','Deaths']
#
# * wa_chd_zip_21_cases
# ['Location_Name','Positives']
#
# * wa_chd_zip_21_hospitalizations
# ['Location_Name','Hospitalizations']
#
# * wa_chd_zip_21_deaths
# ['Location_Name','Deaths']
#
# ```
# In[28]:
wa_chd_7_cases.loc[16,'TotalCases'], wa_chd_zip_7_cases['Positives'].sum()
wa_chd_7_cases.loc[16,'TotalCases'] = 81654
wa_chd_7_cases.loc[16,'TotalCases'], wa_chd_zip_7_cases['Positives'].sum()
wa_chd_7_hospitalizations.loc[16,'Hospitalizations'], wa_chd_zip_7_hospitalizations['Hospitalizations'].sum()
wa_chd_7_hospitalizations.loc[16,'Hospitalizations'] = 5070
wa_chd_7_hospitalizations.loc[16,'Hospitalizations'], wa_chd_zip_7_hospitalizations['Hospitalizations'].sum()
wa_chd_7_deaths.loc[16,'Deaths'],wa_chd_zip_7_deaths['Deaths'].sum()
wa_chd_7_deaths.loc[16,'Deaths'] = 1394
wa_chd_7_deaths.loc[16,'Deaths'],wa_chd_zip_7_deaths['Deaths'].sum()
wa_chd_21_cases.loc[16,'TotalCases'], wa_chd_zip_21_cases['Positives'].sum()
wa_chd_21_cases.loc[16,'TotalCases'] = 79457
wa_chd_21_cases.loc[16,'TotalCases'], wa_chd_zip_21_cases['Positives'].sum()
wa_chd_21_hospitalizations.loc[16,'Hospitalizations'], wa_chd_zip_21_hospitalizations['Hospitalizations'].sum()
wa_chd_21_hospitalizations.loc[16,'Hospitalizations'] = 4970
wa_chd_21_hospitalizations.loc[16,'Hospitalizations'], wa_chd_zip_21_hospitalizations['Hospitalizations'].sum()
wa_chd_21_deaths.loc[16,'Deaths'],wa_chd_zip_21_deaths['Deaths'].sum()
wa_chd_21_deaths.loc[16,'Deaths'] = 1308
wa_chd_21_deaths.loc[16,'Deaths'],wa_chd_zip_21_deaths['Deaths'].sum()
# ### Table Creation Continued
# In[29]:
### Covid County Cases on 7th March
covid_countycases_7 = wa_chd_7_cases.copy()
covid_countycases_7_primary_key_list = [item for item in range(301,340)]
covid_countycases_7['CountyCases_ID'] = covid_countycases_7_primary_key_list
covid_countycases_7_merged = covid_countycases_7.merge(df_county_name, left_on='County', right_on='County_Name')
covid_countycases_7_merged.rename({'TotalCases':'CountyCases_Sum'},axis = 1,inplace = True)
covid_countycases_7_merged = covid_countycases_7_merged[['CountyCases_ID','County_ID','CountyCases_Sum']]
covid_countycases_7_merged.loc[:,'LastUpdated'] = '2021-03-07'
### Covid County Cases on 21st February
covid_countycases_21 = wa_chd_21_cases.copy()
covid_countycases_21_primary_key_list = [item for item in range(262,301)]
covid_countycases_21['CountyCases_ID'] = covid_countycases_21_primary_key_list
covid_countycases_21_merged = covid_countycases_21.merge(df_county_name, left_on='County', right_on='County_Name')
covid_countycases_21_merged.rename({'TotalCases':'CountyCases_Sum'},axis = 1,inplace = True)
covid_countycases_21_merged = covid_countycases_21_merged[['CountyCases_ID','County_ID','CountyCases_Sum']]
covid_countycases_21_merged.loc[:,'LastUpdated'] = '2021-02-21'
covid_countycases = covid_countycases_7_merged.append(covid_countycases_21_merged)
covid_countycases = covid_countycases.sort_values(by='CountyCases_ID').reset_index(drop=True)
# #### Adding the missing counties to the deaths table
# In[30]:
new_row = [{'County':'San Juan County','Deaths':0},{'County':'Wahkiakum County','Deaths':0}]
# In[31]:
wa_chd_21_deaths = wa_chd_21_deaths.append(new_row,ignore_index=True).sort_values(by='County').reset_index(drop=True)
# In[32]:
wa_chd_7_deaths = wa_chd_7_deaths.append(new_row,ignore_index=True).sort_values(by='County').reset_index(drop=True)
# In[33]:
### Covid County Deaths on 7th March
covid_countydeaths_7 = wa_chd_7_deaths.copy()
covid_countydeaths_7_primary_key_list = [item for item in range(3001,3040)]
covid_countydeaths_7['CountyDeaths_ID'] = covid_countydeaths_7_primary_key_list
covid_countydeaths_7_merged = covid_countydeaths_7.merge(df_county_name, left_on='County', right_on='County_Name')
covid_countydeaths_7_merged.rename({'Deaths':'CountyDeaths_Sum'},axis = 1,inplace = True)
covid_countydeaths_7_merged.columns
covid_countydeaths_7_merged = covid_countydeaths_7_merged[['CountyDeaths_ID','County_ID','CountyDeaths_Sum']]
covid_countydeaths_7_merged.loc[:,'LastUpdated'] = '2021-03-07'
### Covid County Deaths on 21st February
covid_countydeaths_21 = wa_chd_21_deaths.copy()
covid_countydeaths_21_primary_key_list = [item for item in range(2962,3001)]
covid_countydeaths_21['CountyDeaths_ID'] = covid_countydeaths_21_primary_key_list
covid_countydeaths_21_merged = covid_countydeaths_21.merge(df_county_name, left_on='County', right_on='County_Name')
covid_countydeaths_21_merged.rename({'Deaths':'CountyDeaths_Sum'},axis = 1,inplace = True)
covid_countydeaths_21_merged.columns
covid_countydeaths_21_merged = covid_countydeaths_21_merged[['CountyDeaths_ID','County_ID','CountyDeaths_Sum']]
covid_countydeaths_21_merged.loc[:,'LastUpdated'] = '2021-02-21'
covid_countydeaths = covid_countydeaths_7_merged.append(covid_countydeaths_21_merged)
covid_countydeaths = covid_countydeaths.sort_values(by='CountyDeaths_ID').reset_index(drop=True)
# In[34]:
### Covid County Hospitalizations on 7th March
covid_countyhospitalizations_7 = wa_chd_7_hospitalizations.copy()
covid_countyhospitalizations_7_primary_key_list = [item for item in range(30001,30040)]
covid_countyhospitalizations_7['CountyHospitalizations_ID'] = covid_countyhospitalizations_7_primary_key_list
covid_countyhospitalizations_7_merged = covid_countyhospitalizations_7.merge(df_county_name, left_on='County', right_on='County_Name')
covid_countyhospitalizations_7_merged.columns
covid_countyhospitalizations_7_merged.rename({'Hospitalizations':'CountyHospitalizations_Sum'},axis = 1,inplace = True)
covid_countyhospitalizations_7_merged = covid_countyhospitalizations_7_merged[['CountyHospitalizations_ID','County_ID','CountyHospitalizations_Sum']]
covid_countyhospitalizations_7_merged.loc[:,'LastUpdated'] = '2021-03-07'
### Covid County Hospitalizations on 21st February
covid_countyhospitalizations_21 = wa_chd_21_hospitalizations.copy()
covid_countyhospitalizations_21_primary_key_list = [item for item in range(29962,30001)]
covid_countyhospitalizations_21['CountyHospitalizations_ID'] = covid_countyhospitalizations_21_primary_key_list
covid_countyhospitalizations_21_merged = covid_countyhospitalizations_21.merge(df_county_name, left_on='County', right_on='County_Name')
covid_countyhospitalizations_21_merged.columns
covid_countyhospitalizations_21_merged.rename({'Hospitalizations':'CountyHospitalizations_Sum'},axis = 1,inplace = True)
covid_countyhospitalizations_21_merged = covid_countyhospitalizations_21_merged[['CountyHospitalizations_ID','County_ID','CountyHospitalizations_Sum']]
covid_countyhospitalizations_21_merged.loc[:,'LastUpdated'] = '2021-02-21'
covid_countyhospitalizations = covid_countyhospitalizations_7_merged.append(covid_countyhospitalizations_21_merged)
covid_countyhospitalizations = covid_countyhospitalizations.sort_values(by='CountyHospitalizations_ID').reset_index(drop=True)
# ## County Level Zip Data
# In[35]:
wa_chd_zip_7_cases.head(5)
# In[36]:
### Covid Zip Deaths on 7th March
covid_countycases_zip_7 = wa_chd_zip_7_cases.copy()
covid_countycases_zip_7.dtypes
zip_table.dtypes
covid_countycases_zip_7['Location_Name'] = covid_countycases_zip_7['Location_Name'].astype('int')
covid_countycases_zip_7_primary_key_list = [item for item in range(300,384)]
covid_countycases_zip_7['ZIPCases_ID'] = covid_countycases_zip_7_primary_key_list
covid_countycases_zip_7_merged = covid_countycases_zip_7.merge(zip_table, left_on='Location_Name', right_on='ZIP_Code')
covid_countycases_zip_7_merged.columns
covid_countycases_zip_7_merged.rename({'Positives':'ZIPCases_Sum'},axis = 1,inplace = True)
covid_countycases_zip_7_merged = covid_countycases_zip_7_merged[['ZIPCases_ID','ZIP_ID','ZIPCases_Sum']]
covid_countycases_zip_7_merged['LastUpdated'] = '2021-03-07'
### Covid Zip Deaths on 21st February
covid_countycases_zip_21 = wa_chd_zip_21_cases.copy()
covid_countycases_zip_21['Location_Name'] = covid_countycases_zip_21['Location_Name'].astype('int')
covid_countycases_zip_21_primary_key_list = [item for item in range(216,300)]
covid_countycases_zip_21['ZIPCases_ID'] = covid_countycases_zip_21_primary_key_list
covid_countycases_zip_21_merged = covid_countycases_zip_21.merge(zip_table, left_on='Location_Name', right_on='ZIP_Code')
covid_countycases_zip_21_merged.columns
covid_countycases_zip_21_merged.rename({'Positives':'ZIPCases_Sum'},axis = 1,inplace = True)
covid_countycases_zip_21_merged = covid_countycases_zip_21_merged[['ZIPCases_ID','ZIP_ID','ZIPCases_Sum']]
covid_countycases_zip_21_merged['LastUpdated'] = '2021-02-21'
covid_zipcases = covid_countycases_zip_7_merged.append(covid_countycases_zip_21_merged)
covid_zipcases = covid_zipcases.sort_values(by='ZIPCases_ID').reset_index(drop=True)
# In[37]:
### Covid Zip Deaths on 7th March
covid_countydeaths_zip_7 = wa_chd_zip_7_deaths.copy()
covid_countydeaths_zip_7['Location_Name'] = covid_countydeaths_zip_7['Location_Name'].astype('int')
covid_countydeaths_zip_7_primary_key_list = [item for item in range(3000,3084)]
covid_countydeaths_zip_7['ZIPDeaths_ID'] = covid_countydeaths_zip_7_primary_key_list
covid_countydeaths_zip_7_merged = covid_countydeaths_zip_7.merge(zip_table, left_on='Location_Name', right_on='ZIP_Code')
covid_countydeaths_zip_7_merged.rename({'Deaths':'ZIPDeaths_Sum'},axis = 1,inplace = True)
covid_countydeaths_zip_7_merged = covid_countydeaths_zip_7_merged[['ZIPDeaths_ID','ZIP_ID','ZIPDeaths_Sum']]
covid_countydeaths_zip_7_merged.loc[:,'LastUpdated'] = '2021-03-07'
# -----------------
### Covid Zip Deaths on 21st February
covid_countydeaths_zip_21 = wa_chd_zip_21_deaths.copy()
covid_countydeaths_zip_21['Location_Name'] = covid_countydeaths_zip_21['Location_Name'].astype('int')
covid_countydeaths_zip_21_primary_key_list = [item for item in range(2916,3000)]
covid_countydeaths_zip_21['ZIPDeaths_ID'] = covid_countydeaths_zip_21_primary_key_list
covid_countydeaths_zip_21_merged = covid_countydeaths_zip_21.merge(zip_table, left_on='Location_Name', right_on='ZIP_Code')
covid_countydeaths_zip_21_merged.rename({'Deaths':'ZIPDeaths_Sum'},axis = 1,inplace = True)
covid_countydeaths_zip_21_merged = covid_countydeaths_zip_21_merged[['ZIPDeaths_ID','ZIP_ID','ZIPDeaths_Sum']]
covid_countydeaths_zip_21_merged.loc[:,'LastUpdated'] = '2021-02-21'
covid_zipdeaths = covid_countydeaths_zip_7_merged.append(covid_countydeaths_zip_21_merged)
covid_zipdeaths = covid_zipdeaths.sort_values(by='ZIPDeaths_ID').reset_index(drop=True)
# In[38]:
### Covid Zip Hospitalizations on 7th March
covid_countyhospitalizations_zip_7 = wa_chd_zip_7_hospitalizations.copy()
covid_countyhospitalizations_zip_7.dtypes
covid_countyhospitalizations_zip_7['Location_Name'] = covid_countyhospitalizations_zip_7['Location_Name'].astype('int')
covid_countyhospitalizations_zip_7_primary_key_list = [item for item in range(30000,30084)]
covid_countyhospitalizations_zip_7['ZIPHospitalizations_ID'] = covid_countyhospitalizations_zip_7_primary_key_list
covid_countyhospitalizations_zip_7_merged =covid_countyhospitalizations_zip_7.merge(zip_table, left_on='Location_Name', right_on='ZIP_Code')
covid_countyhospitalizations_zip_7_merged = covid_countyhospitalizations_zip_7_merged.rename({'Hospitalizations':'ZIPHospitalizations_Sum'},axis =1)
covid_countyhospitalizations_zip_7_merged = covid_countyhospitalizations_zip_7_merged[['ZIPHospitalizations_ID','ZIP_ID','ZIPHospitalizations_Sum']]
covid_countyhospitalizations_zip_7_merged.loc[:,'LastUpdated'] = '2021-03-07'
# ----------------
### Covid Zip Hospitalizations on 21st February
covid_countyhospitalizations_zip_21 = wa_chd_zip_21_hospitalizations.copy()
covid_countyhospitalizations_zip_21['Location_Name'] = covid_countyhospitalizations_zip_21['Location_Name'].astype('int')
covid_countyhospitalizations_zip_21_primary_key_list = [item for item in range(29916,30000)]
covid_countyhospitalizations_zip_21['ZIPHospitalizations_ID'] = covid_countyhospitalizations_zip_21_primary_key_list
covid_countyhospitalizations_zip_21_merged =covid_countyhospitalizations_zip_21.merge(zip_table, left_on='Location_Name', right_on='ZIP_Code')
covid_countyhospitalizations_zip_21_merged = covid_countyhospitalizations_zip_21_merged.rename({'Hospitalizations':'ZIPHospitalizations_Sum'},axis =1)
covid_countyhospitalizations_zip_21_merged = covid_countyhospitalizations_zip_21_merged[['ZIPHospitalizations_ID','ZIP_ID','ZIPHospitalizations_Sum']]
covid_countyhospitalizations_zip_21_merged.loc[:,'LastUpdated'] = '2021-02-21'
covid_ziphospitalizations = covid_countyhospitalizations_zip_7_merged.append(covid_countyhospitalizations_zip_21_merged)
covid_ziphospitalizations = covid_ziphospitalizations.sort_values(by='ZIPHospitalizations_ID').reset_index(drop=True)
# ## Covid 19 Vaccinations County Wise
# In[39]:
### County Vaccinations on 7th March
covid_countyvaccinations_7 = wa_vacc_7.copy()
covid_countyvaccinations_7_primary_key_list = [item for item in range(800,839)]
covid_countyvaccinations_7['CountyVaccinations_ID'] = covid_countyvaccinations_7_primary_key_list
covid_countyvaccinations_7_merged = covid_countyvaccinations_7.merge(df_county_name, left_on='County', right_on='County_Name')
covid_countyvaccinations_7_merged = covid_countyvaccinations_7_merged.rename({'People Initiating Vaccination':'CountyVaccinations_Sum'},axis =1 )
covid_countyvaccinations_7_merged.columns
covid_countyvaccinations_7_merged = covid_countyvaccinations_7_merged[['CountyVaccinations_ID',
'County_ID',
'CountyVaccinations_Sum']]
covid_countyvaccinations_7_merged['LastUpdated'] = '2021-03-07'
### County Vaccinations on 21st February
covid_countyvaccinations_21 = wa_vacc_21.copy()
covid_countyvaccinations_21_primary_key_list = [item for item in range(761,800)]
covid_countyvaccinations_21['CountyVaccinations_ID'] = covid_countyvaccinations_21_primary_key_list
covid_countyvaccinations_21_merged = covid_countyvaccinations_21.merge(df_county_name, left_on='County', right_on='County_Name')
covid_countyvaccinations_21_merged = covid_countyvaccinations_21_merged.rename({'People Initiating Vaccination':'CountyVaccinations_Sum'},axis =1 )
covid_countyvaccinations_21_merged.columns
covid_countyvaccinations_21_merged = covid_countyvaccinations_21_merged[['CountyVaccinations_ID',
'County_ID',
'CountyVaccinations_Sum']]
covid_countyvaccinations_21_merged['LastUpdated'] = '2021-02-21'
covid_countyvaccinations = covid_countyvaccinations_7_merged.append(covid_countyvaccinations_21_merged)
covid_countyvaccinations = covid_countyvaccinations.sort_values(by='CountyVaccinations_ID').reset_index(drop=True)
# ## Vaccination Sites
# In[40]:
vaccination_sites_df = pd.read_excel(r'/Users/rohan20k/Desktop/data_imt_563/WaVaccinationSites__20210121181237.xlsx')
vaccination_sites_df.columns
'County','Facility','Address','ZIP','Info Website','Email','Phone','Scheduling Site','Instructions for Public','Walk-In Instructions','Appt Available'
vaccination_sites_df = vaccination_sites_df[['County',
'Facility','Address','ZIP','Info Website','Email','Phone','Scheduling Site','Instructions for Public','Walk-In Instructions','Appt Available']]
vaccination_sites_df.loc[:,'County'] = vaccination_sites_df.loc[:,'County'].apply(lambda x:x+' County')
vaccination_sites_df_primary_key_list = [item for item in range(6000,6246)]
vaccination_sites_df['Site_ID'] = vaccination_sites_df_primary_key_list
vaccination_sites_df_merged =vaccination_sites_df.merge(zip_table, left_on='ZIP', right_on='ZIP_Code')
vaccination_sites_df_merged = vaccination_sites_df_merged[['Site_ID',
'County_ID',
'ZIP_ID',
'Facility',
'Address',
'Info Website',
'Email',
'Phone',
'Instructions for Public',
'Scheduling Site',
'Appt Available']]
vaccination_sites_df_merged.loc[:,'LastUpdated'] = '2021-02-21'
vaccination_sites_df_merged = vaccination_sites_df_merged.rename({'Facility':'Site_Name',
'Address':'Site_Address',
'Info Website':'Info_URL',
'Email':'Site_Email',
'Phone':'Site_Phone',
'Instructions for Public':'Site_Instructions',
'Scheduling Site':'Scheduling_URL',
'Appt Available':'Appt_Available_Status'},axis = 1)
vaccination_sites_df_merged['LastUpdated'] = vaccination_sites_df_merged['LastUpdated'].astype('datetime64[ns]')
vaccination_sites_df_merged['Appt_Available_Status'] = vaccination_sites_df_merged['Appt_Available_Status'].astype('bool')
vaccination_sites_df_merged.dtypes
vaccination_sites = vaccination_sites_df_merged.copy()
# ## Datatype Verification
# state_name,
# county_name,
# zip_table,
# covid_countycases,
# covid_countydeaths,
# covid_countyhospitalizations,
# covid_countyvaccinations,
# covid_zipcases,
# covid_zipdeaths,
# covid_ziphospitalizations
# In[41]:
state_name.dtypes
state_name['State_ID'] = df_state_name['State_ID'].astype('int64')
state_name.dtypes
county_name.dtypes
zip_table.dtypes
covid_countycases['LastUpdated']=covid_countycases['LastUpdated'].astype('datetime64[ns]')
covid_countycases.dtypes
covid_countydeaths['LastUpdated']=covid_countydeaths['LastUpdated'].astype('datetime64[ns]')
covid_countydeaths.dtypes
covid_countyhospitalizations['LastUpdated']=covid_countyhospitalizations['LastUpdated'].astype('datetime64[ns]')
covid_countyhospitalizations.dtypes
covid_countyvaccinations['LastUpdated']=covid_countyvaccinations['LastUpdated'].astype('datetime64[ns]')
covid_countyvaccinations.dtypes
covid_zipcases['LastUpdated']=covid_zipcases['LastUpdated'].astype('datetime64[ns]')
covid_zipcases.dtypes
covid_zipdeaths['LastUpdated']=covid_zipdeaths['LastUpdated'].astype('datetime64[ns]')
covid_zipdeaths.dtypes
covid_ziphospitalizations['LastUpdated']=covid_ziphospitalizations['LastUpdated'].astype('datetime64[ns]')
covid_ziphospitalizations.dtypes
engine = create_engine(URL(
account = 'tca69088',
role = 'SYSADMIN',
user = 'Group7',
password = '<PASSWORD>!',
database = 'IMT_DB',
schema = 'PUBLIC',
))
def data_push(df,table_name):
df['Upload_Timestamp'] = pd.Timestamp.now(tz="America/Los_Angeles")
df.to_sql(table_name.lower(),con=engine,if_exists='replace',index = False)
connection = engine.connect()
connection.close()
engine.dispose()
print(f"Data has been successfully transferred to Snowflake in {table_name}")
| pd.set_option("display.max_rows", None, "display.max_columns", None) | pandas.set_option |
"""
/*
* Copyright (C) 2019-2021 University of South Florida
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
import os
# Import dependencies
from collections import defaultdict
from pathlib import Path
import haversine as hs
import numpy as np
import pandas as pd
from haversine import Unit
from src.gt_merger import constants
from src.gt_merger.args import get_parser
from src.gt_merger.preprocess import preprocess_gt_data, preprocess_oba_data, is_valid_oba_dataframe, \
is_valid_gt_dataframe
# -------------------------------------------
def main():
# Verify if the OBA input file exists
if not os.path.isfile(command_line_args.obaFile):
print("OBA data file not found:", command_line_args.obaFile)
exit()
# Verify if GT input file exists
if not os.path.isfile(command_line_args.gtFile):
print("Ground truth data file not found:", command_line_args.gtFile)
exit()
# Verify if there is a list of devices
if command_line_args.deviceList:
# Verify if the list of devices file exists
if os.path.isfile(command_line_args.deviceList):
with open(command_line_args.deviceList) as f:
list_of_devices = f.readline().split(",")
list_of_devices = [s.strip() for s in list_of_devices]
else:
print("File with white list of devices not found:", command_line_args.deviceList)
exit()
else:
list_of_devices = []
# Verify if the data folder exists
if not os.path.isdir(command_line_args.outputDir):
print("Data folder not found, trying to create it in the current working directory:",
command_line_args.outputDir)
try:
os.makedirs(command_line_args.outputDir, exist_ok=True)
except OSError:
print("There was an error while creating the data folder:", command_line_args.outputDir)
exit()
# Create sub-folders for output an logs
path_logs = os.path.join(command_line_args.outputDir, constants.FOLDER_LOGS)
if not os.path.isdir(path_logs):
try:
os.mkdir(path_logs)
except OSError:
print("There was an error while creating the sub folder for logs:", path_logs)
exit()
path_output = os.path.join(command_line_args.outputDir, constants.FOLDER_MERGED_DATA)
if not os.path.isdir(path_output):
try:
os.mkdir(path_output)
except OSError:
print("There was an error while creating the sub-folder for output files:", path_logs)
exit()
# Create path OS independent for excel file
excel_path = Path(command_line_args.gtFile)
# Load ground truth data to a dataframe
gt_data = pd.read_excel(excel_path)
# Validate gt dataframe
if not is_valid_gt_dataframe(gt_data):
print("Ground truth data frame is empty or does not have the required columns.")
exit()
# Preprocess ground truth data
gt_data, data_gt_dropped = preprocess_gt_data(gt_data, command_line_args.removeStillMode)
print("Ground truth data preprocessed.")
# Save data to be dropped to a csv file
dropped_file_path = os.path.join(command_line_args.outputDir, constants.FOLDER_LOGS,
constants.GT_DROPPED_DATA_FILE_NAME)
data_gt_dropped.to_csv(path_or_buf=dropped_file_path, index=False)
# Create path OS independent for csv file
csv_path = Path(command_line_args.obaFile)
# Load OBA data
oba_data = pd.read_csv(csv_path)
# Validate oba dataframe
if not is_valid_oba_dataframe(oba_data):
print("OBA data frame is empty or does not have the required columns.")
exit()
# If a devices white list was provided, list the devices
if list_of_devices:
oba_data = oba_data[oba_data["User ID"].isin(list_of_devices)]
# Preprocess OBA data
oba_data, data_csv_dropped = preprocess_oba_data(oba_data, command_line_args.minActivityDuration,
command_line_args.minTripLength, command_line_args.removeStillMode)
print("OBA data preprocessed.")
print(oba_data.info())
print(gt_data.info())
# Data preprocessing IS OVER
# Save oba dropped data to a csv file
dropped_file_path = os.path.join(command_line_args.outputDir, constants.FOLDER_LOGS,
constants.OBA_DROPPED_DATA_FILE_NAME)
data_csv_dropped.to_csv(path_or_buf=dropped_file_path, index=False)
if command_line_args.iterateOverTol:
first_tol = 30000
save_to_path = os.path.join(constants.FOLDER_MERGED_DATA, "batch")
else:
save_to_path = os.path.join(constants.FOLDER_MERGED_DATA)
first_tol = constants.TOLERANCE
for tol in range(first_tol, command_line_args.tolerance + 1, constants.CALCULATE_EVERY_N_SECS):
print("TOLERANCE:", str(tol))
# merge dataframes one to one or one to many according to the commandline parameter
if command_line_args.mergeOneToOne:
merged_data_frame, num_matches_df = merge(gt_data, oba_data, tol)
else:
merged_data_frame, num_matches_df, unmatched_oba_trips_df = merge_to_many(gt_data, oba_data, tol)
# Save unmatched oba records to csv
unmatched_file_path = os.path.join(command_line_args.outputDir, save_to_path,
"oba_records_without_match_on_GT.csv")
unmatched_oba_trips_df.to_csv(path_or_buf=unmatched_file_path, index=False)
# Calculate difference
merged_data_frame['Time_Difference'] = merged_data_frame.apply(
lambda x: (x['Activity Start Date and Time* (UTC)'] - x['GT_DateTimeOrigUTC_Backup']) / np.timedelta64(1, 's')
if pd.notna(x['Activity Start Date and Time* (UTC)']) else "", 1)
# Calculate distance between GT and OBA starting points
merged_data_frame['Distance_Difference'] = merged_data_frame.apply(
lambda row: hs.haversine((row['GT_LatOrig'], row['GT_LonOrig']),
(row['Origin latitude (*best)'], row['Origin longitude (*best)']),
unit=Unit.METERS), axis=1)
# Add Manual Assignment Column before reorganize
merged_data_frame["Manual Assignment"] = ''
# Reorder merged dataframe columns
new_column_orders = constants.GT_NEW_COLUMNS_ORDER + constants.OBA_NEW_COLUMNS_ORDER
merged_data_frame = merged_data_frame[new_column_orders]
# Save merged data to csv
merged_file_path = os.path.join(command_line_args.outputDir, save_to_path,
constants.MERGED_DATA_FILE_NAME + "_" + str(tol) + ".csv")
num_matches_file_path = os.path.join(command_line_args.outputDir, save_to_path,
"num_matches" + "_" + str(tol) + ".csv")
merged_data_frame.to_csv(path_or_buf=merged_file_path, index=False)
num_matches_df.to_csv(path_or_buf=num_matches_file_path, index=False)
def merge(gt_data, oba_data, tolerance):
"""
Merge gt_data dataframe and oba_data dataframe using the nearest value between columns 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'. Before merging, the data is grouped by 'GT_Collector' on gt_data and
each row on gt_data will be paired with one or none of the rows on oba_data grouped by userId.
:param tolerance: maximum allowed difference (seconds) between 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'.
:param gt_data: dataframe with preprocessed data from ground truth XLSX data file
:param oba_data: dataframe with preprocessed data from OBA firebase export CSV data file
:return: dataframe with the merged data and a dataframe with summary of matches by collector/oba_user(phone).
"""
list_collectors = gt_data['GT_Collector'].unique()
list_oba_users = oba_data['User ID'].unique()
merged_df = pd.DataFrame()
matches_df = pd.DataFrame(list_collectors, columns=['GT_Collector'])
list_total_trips = []
list_matches = []
matches_dict = defaultdict(list)
for collector in list_collectors:
print("Merging data for collector ", collector)
# Create dataframe for a collector on list_collectors
gt_data_collector = gt_data[gt_data["GT_Collector"] == collector]
# Make sure dataframe is sorted by 'ClosesTime'
gt_data_collector.sort_values('GT_DateTimeOrigUTC', inplace=True)
# Add total trips per collector
list_total_trips.append(len(gt_data_collector))
i = 0
list_matches_by_phone = []
for oba_user in list_oba_users:
# Create a dataframe with the oba_user activities only
oba_data_user = oba_data[oba_data["User ID"] == oba_user]
# Make sure dataframes is sorted by 'Activity Start Date and Time* (UTC)'
oba_data_user.sort_values('Activity Start Date and Time* (UTC)', inplace=True)
temp_merge = pd.merge_asof(gt_data_collector, oba_data_user, left_on="GT_DateTimeOrigUTC",
right_on="Activity Start Date and Time* (UTC)",
direction="forward",
tolerance=pd.Timedelta(str(tolerance) + "ms"), left_by='GT_Mode',
right_by='Google Activity')
merged_df = | pd.concat([merged_df, temp_merge], ignore_index=True) | pandas.concat |
from CommonServerPython import *
from FindEmailCampaign import *
import json
from datetime import datetime
import pandas as pd
import tldextract
from email.utils import parseaddr
import pytest
no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)
def extract_domain(address):
global no_fetch_extract
if address == '':
return ''
email_address = parseaddr(address)[1]
ext = no_fetch_extract(email_address)
return '{}.{}'.format(ext.domain, ext.suffix)
EXISTING_INCIDENTS = []
RESULTS = None
EXISTING_INCIDENT_ID = DUP_INCIDENT_ID = None
IDS_COUNTER = 57878
text = "Imagine there's no countries It isn't hard to do Nothing to kill or die for And no religion too " \
"Imagine all the people Living life in peace"
text2 = "Love of my life, you've hurt me You've broken my heart and now you leave me Love of my life, can't you see?\
Bring it back, bring it back Don't take it away from me, because you don't know What it means to me"
INCIDENTS_CONTEXT_KEY = 'EmailCampaign.' + INCIDENTS_CONTEXT_TD
def create_incident(subject=None, body=None, html=None, emailfrom=None, created=None, id_=None,
similarity=0, sender='<EMAIL>', emailto='<EMAIL>', emailcc='',
emailbcc='', status=1, severity=1):
global IDS_COUNTER
dt_format = '%Y-%m-%d %H:%M:%S.%f %z'
incident = {
"id": id_ if id_ is not None else str(IDS_COUNTER),
"name": ' '.join(str(x) for x in [subject, body, html, emailfrom]),
'created': created.strftime(dt_format) if created is not None else datetime.now().strftime(dt_format),
'type': 'Phishing',
'similarity': similarity,
'emailfrom': sender,
PREPROCESSED_EMAIL_BODY: body,
'emailbodyhtml': html,
PREPROCESSED_EMAIL_SUBJECT: subject,
'fromdomain': extract_domain(sender),
'emailto': emailto,
'emailcc': emailcc,
'emailbcc': emailbcc,
'status': status,
'severity': severity
}
return incident
def set_existing_incidents_list(incidents_list):
global EXISTING_INCIDENTS
EXISTING_INCIDENTS = incidents_list
def executeCommand(command, args=None):
global EXISTING_INCIDENTS, EXISTING_INCIDENT_ID, DUP_INCIDENT_ID
if command == 'FindDuplicateEmailIncidents':
incidents_str = json.dumps(EXISTING_INCIDENTS)
return [{'Contents': incidents_str, 'Type': 'not error'}]
if command == 'CloseInvestigationAsDuplicate':
EXISTING_INCIDENT_ID = args['duplicateId']
def results(arg):
global RESULTS
RESULTS.append(arg)
def mock_summarize_email_body(body, subject, nb_sentences=3, subject_weight=1.5, keywords_weight=1.5):
return '{}\n{}'.format(subject, body)
def test_return_campaign_details_entry(mocker):
global RESULTS
RESULTS = []
mocker.patch.object(demisto, 'results', side_effect=results)
mocker.patch('FindEmailCampaign.summarize_email_body', mock_summarize_email_body)
inciddent1 = create_incident(subject='subject', body='email body')
incidents_list = [inciddent1]
data = pd.DataFrame(incidents_list)
return_campaign_details_entry(data, fields_to_display=[])
res = RESULTS[0]
context = res['EntryContext']
assert context['EmailCampaign.isCampaignFound']
assert context['EmailCampaign.involvedIncidentsCount'] == len(data)
for original_incident, context_incident in zip(incidents_list, context[INCIDENTS_CONTEXT_KEY]):
for k in ['id', 'similarity', 'emailfrom']:
assert original_incident[k] == context_incident[k]
assert original_incident['emailto'] in context_incident['recipients']
assert original_incident['fromdomain'] == context_incident['emailfromdomain']
assert extract_domain(original_incident['emailto']) in context_incident['recipientsdomain']
def test_return_campaign_details_entry_comma_seperated_recipients(mocker):
global RESULTS
RESULTS = []
mocker.patch.object(demisto, 'results', side_effect=results)
mocker.patch('FindEmailCampaign.summarize_email_body', mock_summarize_email_body)
inciddent1 = create_incident(subject='subject', body='email body', emailto='<EMAIL>, <EMAIL>')
incidents_list = [inciddent1]
data = pd.DataFrame(incidents_list)
return_campaign_details_entry(data, fields_to_display=[])
res = RESULTS[0]
context = res['EntryContext']
assert context['EmailCampaign.isCampaignFound']
assert context['EmailCampaign.involvedIncidentsCount'] == len(data)
for original_incident, context_incident in zip(incidents_list, context[INCIDENTS_CONTEXT_KEY]):
for k in ['id', 'similarity', 'emailfrom']:
assert original_incident[k] == context_incident[k]
for recipient in original_incident['emailto'].split(','):
assert recipient.strip() in context_incident['recipients']
assert extract_domain(recipient) in context_incident['recipientsdomain']
assert original_incident['fromdomain'] == context_incident['emailfromdomain']
def test_return_campaign_details_entry_list_dumped_recipients(mocker):
global RESULTS
RESULTS = []
mocker.patch.object(demisto, 'results', side_effect=results)
mocker.patch('FindEmailCampaign.summarize_email_body', mock_summarize_email_body)
inciddent1 = create_incident(subject='subject', body='email body', emailto='["<EMAIL>", "<EMAIL>"]')
incidents_list = [inciddent1]
data = pd.DataFrame(incidents_list)
return_campaign_details_entry(data, fields_to_display=[])
res = RESULTS[0]
context = res['EntryContext']
assert context['EmailCampaign.isCampaignFound']
assert context['EmailCampaign.involvedIncidentsCount'] == len(data)
for original_incident, context_incident in zip(incidents_list, context[INCIDENTS_CONTEXT_KEY]):
for k in ['id', 'similarity', 'emailfrom']:
assert original_incident[k] == context_incident[k]
for recipient in json.loads(original_incident['emailto']):
assert recipient.strip() in context_incident['recipients']
assert extract_domain(recipient) in context_incident['recipientsdomain']
assert original_incident['fromdomain'] == context_incident['emailfromdomain']
def test_return_campaign_details_entry_list_dumped_recipients_cc(mocker):
global RESULTS
RESULTS = []
mocker.patch.object(demisto, 'results', side_effect=results)
mocker.patch('FindEmailCampaign.summarize_email_body', mock_summarize_email_body)
inciddent1 = create_incident(subject='subject', body='email body', emailcc='["<EMAIL>", "<EMAIL>"]')
incidents_list = [inciddent1]
data = pd.DataFrame(incidents_list)
return_campaign_details_entry(data, fields_to_display=[])
res = RESULTS[0]
context = res['EntryContext']
assert context['EmailCampaign.isCampaignFound']
assert context['EmailCampaign.involvedIncidentsCount'] == len(data)
for original_incident, context_incident in zip(incidents_list, context[INCIDENTS_CONTEXT_KEY]):
for k in ['id', 'similarity', 'emailfrom']:
assert original_incident[k] == context_incident[k]
for recipient in json.loads(original_incident['emailcc']):
assert recipient.strip() in context_incident['recipients']
assert extract_domain(recipient) in context_incident['recipientsdomain']
assert original_incident['fromdomain'] == context_incident['emailfromdomain']
ADDITIONAL_CONTEXT_KEYS_PARAMETRIZE = [
(['name', 'emailfrom', 'emailto', 'severity', 'status', 'created']),
(['name', 'emailfrom', 'emailto'])
]
def prepare_additional_context_fields_test(mocker):
global RESULTS
RESULTS = []
# prepare
mocker.patch.object(demisto, 'results', side_effect=results)
mocker.patch('FindEmailCampaign.summarize_email_body', mock_summarize_email_body)
incident = create_incident(
subject='subject', body='email body',
emailfrom='<EMAIL>', emailto='<EMAIL>, <EMAIL>', emailcc='["<EMAIL>", "<EMAIL>"]')
incidents_list = [incident]
data = | pd.DataFrame(incidents_list) | pandas.DataFrame |
# import pandas and numpy, and load the covid data
import pandas as pd
import numpy as np
pd.set_option('display.width', 200)
pd.set_option('display.max_columns', 35)
| pd.set_option('display.max_rows', 200) | pandas.set_option |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.5
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_pricing_zcb [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_pricing_zcb&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-pricing-normal-quad-approx).
# +
import numpy as np
import pandas as pd
from scipy.linalg import expm
import matplotlib.pyplot as plt
from datetime import timedelta
from arpym.pricing import zcb_value
from arpym.statistics import moments_mvou
from arpym.tools import histogram_sp, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_zcb-parameters)
tau_hor = 3 # time to horizon
j_ = 1000 # number of scenarios
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_zcb-implementation-step00): Upload data
# +
path = '../../../databases/temporary-databases'
df = pd.read_csv(path + '/db_proj_scenarios_yield.csv', header=0)
j_m_, _ = df.shape
df2 = pd.read_csv(path + '/db_proj_dates.csv', header=0, parse_dates=True)
t_m = np.array(pd.to_datetime(df2.values.reshape(-1)), dtype='datetime64[D]')
m_ = t_m.shape[0]-1
deltat_m = np.busday_count(t_m[0], t_m[1])
if tau_hor > m_:
print(" Projection doesn't have data until given horizon!!! Horizon lowered to ", m_)
tau_hor = m_
# number of monitoring times
m_ = tau_hor
t_m = t_m[:m_+1]
t_now = t_m[0]
t_hor = t_m[-1]
tau = np.array(list(map(int, df.columns))) # times to maturity
d_ = tau.shape[0]
x_tnow_thor = np.array(df).reshape(j_, int(j_m_/j_), d_)
x_tnow_thor = x_tnow_thor[:j_, :m_+1, :]
y_tnow = x_tnow_thor[0, 0, :]
y_thor = x_tnow_thor[:, -1, :]
df = | pd.read_csv(path + '/db_proj_scenarios_yield_par.csv', header=0) | pandas.read_csv |
"""
Organisation: ekholabs
Author: <EMAIL>
"""
import pandas as pd
'''
Let's now use Pandas' Series and DataFrames to create our own data structures.
'''
def create_series():
fruits = | pd.Series(['Banana', 'Kilo', .63]) | pandas.Series |
"""
Coding: UTF-8
Author: Randal
Time: 2021/2/20
E-mail: <EMAIL>
Description: This is a simple toolkit for data extraction of text.
The most important function in the script is about word frequency statistics.
Using re, I generalized the process in words counting, regardless of any preset
word segmentation. Besides, many interesting functions, like getting top sentences are built here.
All rights reserved.
"""
import xlwings as xw
import pandas as pd
import numpy as np
import os
import re
from alive_progress import alive_bar
from alive_progress import show_bars, show_spinners
import jieba
import datetime
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import math
class jieba_vectorizer(CountVectorizer):
def __init__(self, tf, userdict, stopwords, orient=False):
"""
:param tf: 输入的样本框,{axis: 1, 0: id, 1: 标题, 2: 正文, 3: 来源, 4: freq}
:param stopwords: 停用词表的路径
:param user_dict_link: 关键词清单的路径
:param orient: {True: 返回的 DTM 只包括关键词清单中的词,False: 返回 DTM 中包含全部词语}
:return: 可以直接使用的词向量样本
"""
self.userdict = userdict
self.orient = orient
self.stopwords = stopwords
jieba.load_userdict(self.userdict) # 载入关键词词典
tf = tf.copy() # 防止对函数之外的原样本框造成改动
print('切词中,请稍候……')
rule = re.compile(u'[^\u4e00-\u9fa5]') # 清洗所有样本,只保留汉字
for i in range(0, tf.shape[0]):
try:
tf.iloc[i, 2] = rule.sub('', tf.iloc[i, 2])
except TypeError:
print('样本清洗Error: doc_id = ' + str(i))
continue
if self.stopwords is not None:
stopwords = txt_to_list(self.stopwords) # 载入停用词表
else:
stopwords = []
# 开始切词
words = []
items = range(0, len(tf))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i, row in tf.iterrows():
item = row['正文']
result = jieba.cut(item)
# 同时过滤停用词
word = ''
for element in result:
if element not in stopwords:
if element != '\t':
word += element
word += " "
words.append(word)
bar()
# CountVectorizer() 可以自动完成词频统计,通过fit_transform生成文本向量和词袋库
# 如果需要换成 tfidfVectorizer, 把下面三行修改一下就可以了
vect = CountVectorizer()
X = vect.fit_transform(words)
self.vectorizer = vect
matrix = X
X = X.toarray()
# 二维ndarray可以展示在pycharm里,但是和DataFrame性质完全不同
# ndarray 没有 index 和 column
features = vect.get_feature_names()
XX = pd.DataFrame(X, index=tf['id'], columns=features)
self.DTM0 = matrix
self.DTM = XX
self.features = features
# # 下面是之前走的弯路,不足一哂
# words_bag = vect.vocabulary_
# # 字典的转置(注意只适用于vk一一对应的情况,1v多k请参考setdefault)
# bag_words = dict((v, k) for k, v in words_bag.items())
#
# # 字典元素的排列顺序不等于字典元素值的排列顺序
# lst = []
# for i in range(0, len(XX.columns)):
# lst.append(bag_words[i])
# XX.columns = lst
if orient:
dict_filter = txt_to_list(self.userdict)
for word in features:
if word not in dict_filter:
XX.drop([word], axis=1, inplace=True)
self.DTM_key = XX
def get_feature_names(self):
return self.features
def strip_non_keywords(self, df):
ff = df.copy()
dict_filter = txt_to_list(self.userdict)
for word in self.features:
if word not in dict_filter:
ff.drop([word], axis=1, inplace=True)
return ff
def make_doc_freq(word, doc):
"""
:param word: 指的是要对其进行词频统计的关键词
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(word, doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得上下文
context[str(i)] = doc[MIN: MAX]
except IndexError:
print('IndexError: ' + word)
freq['Context'] = context
return freq
def make_info_freq(name, pattern, doc):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的正则表达式
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
注:该函数返回字典中的context元素为元组:(关键词,上下文)
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(pattern[0], doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
cls = pattern[1]
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
freq['Name'] = name
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得匹配到的关键词,并做掐头去尾处理
word = match_cut(doc[lst[i][0]: lst[i][1]], cls)
# 将关键词和上下文打包,存储到 context 条目中
context[str(i)] = (word, doc[MIN: MAX])
except IndexError:
print('IndexError: ' + name)
freq['Context'] = context
return freq
def make_docs_freq(word, docs):
"""
:param word: 指的是要对其进行词频统计的关键词
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列 (iloc: 0),正文列 (iloc: 2) 和预留出的频次列 (iloc: 4)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
for i in range(0, len(docs)):
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
freq['Doc' + str(docs.iloc[i, 0])] = make_doc_freq(word, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
context = doc['Context']
for i in range(0, num):
strip = {'id': item, 'freq': id_freq[item], 'word': word, 'num': i, 'context': context[str(i)]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'word'], drop=True, inplace=True)
freq['DFC'] = data
return freq
def make_infos_freq(name, pattern, docs):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的(正则表达式, 裁剪方法)
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列(iloc: 0)和正文列(iloc: 2)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
items = range(0, len(docs))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i in items:
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
# pattern 要全须全尾地传递进去,因为make_info_freq两个参数都要用
freq['Doc' + str(docs.iloc[i, 0])] = make_info_freq(name, pattern, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
bar()
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'form', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
# 从(关键词,上下文)中取出两个元素
context = doc['Context']
for i in range(0, num):
# context 中的关键词已经 match_cut 完毕,不需要重复处理
strip = {'id': item, 'form': name, 'freq': id_freq[item], 'word': context[str(i)][0],
'num': i, 'context': context[str(i)][1]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'form', 'word'], drop=True, inplace=True)
freq['DFC'] = data
print(name + ' Completed')
return freq
def words_docs_freq(words, docs):
"""
:param words: 表示要对其做词频统计的关键词清单
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列、正文列、和频率列
:return: 返回字典,其中包括“单关键词-多文本”的词频字典集合,以及最终的DFC(doc-frequency-context)和DTM(doc-term matrix)
"""
freqs = dict()
# 与此同时新建一个空壳DataFrame,用于汇总DFC
data = pd.DataFrame()
# 新建一个空壳,用于汇总DTM(Doc-Term-Matrix)
dtm = pd.DataFrame(None, columns=words, index=docs['id'])
# 来吧,一个循环搞定所有
items = range(len(words))
with alive_bar(len(items), force_tty=True, bar='blocks') as bar:
for word in words:
freq = make_docs_freq(word, docs)
freqs[word] = freq
data = data.append(freq['DFC'])
for item in docs['id']:
dtm.loc[item, word] = freq['Doc' + str(item)]['Frequency']
bar()
# 记得要sort一下,不然排序的方式不对(应该按照doc id来排列)
data.sort_index(inplace=True)
freqs['DFC'] = data
freqs['DTM'] = dtm
return freqs
def infos_docs_freq(infos, docs):
"""
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列和正文列
:param infos: 指的是正则表达式的列表,格式为字典,key是示例,如“(1)”,value 是正则表达式,如“([0-9])”
:return: 返回字典,其中包括“单关键词-多文本”的词频字典集合,以及最终的DFC(doc-frequency-context)和DTM(doc-term matrix)
"""
freqs = dict()
# 与此同时新建一个空壳DataFrame,用于汇总DFC
data = pd.DataFrame()
# 新建一个空壳,用于汇总DTM(Doc-Term-Matrix)
dtm = pd.DataFrame(None, columns=list(infos.keys()), index=docs['id'])
# 来吧,一个循环搞定所有
items = range(len(infos))
with alive_bar(len(items), force_tty=True, bar='blocks') as bar:
for k, v in infos.items():
freq = make_infos_freq(k, v, docs)
freqs[k] = freq
data = data.append(freq['DFC'])
for item in docs['id']:
dtm.loc[item, k] = freq['Doc' + str(item)]['Frequency']
bar()
# 记得要sort一下,不然排序的方式不对(应该按照doc id来排列)
data.sort_index(inplace=True)
freqs['DFC'] = data
freqs['DTM'] = dtm
return freqs
def massive_pop(infos, doc):
"""
:param infos: List,表示被删除内容对应的正则表达式
:param doc: 表示正文
:return: 返回一个完成删除的文本
"""
for info in infos:
doc = re.sub(info, '', doc)
return doc
def massive_sub(infos, doc):
"""
:param infos: Dict, 表示被替换内容对应的正则表达式及替换对象
:param doc: 表示正文
:return: 返回一个完成替换的文本
"""
for v, k in infos:
doc = re.sub(v, k, doc)
return doc
# 接下来取每个样本的前n句话(或者不多于前n句话的内容),再做一次进行对比
# 取前十句话的原理是,对!?。等表示语义结束的符号进行计数,满十次为止
def top_n_sent(n, doc, percentile=1):
"""
:param n: n指句子的数量,这个函数会返回一段文本中前n句话,若文本内容不多于n句,则全文输出
:param word: 指正文内容
:param percentile: 按照分位数来取句子时,要输入的分位,比如一共有十句话,取50%分位就是5句
如果有11句话,向下取整也是输出5句
:return: 返回字符串:前n句话
"""
info = '[。?!]'
# 在这个函数体内,函数主体语句的作用域大于循环体,因此循环内的变量相当于局部变量
# 因此想在循环外直接返回,就会出现没有定义的错误,因此可以做一个全局声明
# 但是不建议这样做,因为如果函数外有一个变量恰巧和局部变量重名,那函数外的变量也会被改变
# 因此还是推荐多使用迭代器,把循环包裹成迭代器,可以解决很多问题
# 而且已经封装好的迭代器,例如re.findall_iter,就不用另外再去写了,调用起来很方便
# 如下,第一行代码的作用是用列表包裹迭代器,形成一个生成器的列表
# 每个生成器都存在自己的 Attribute
re_iter = list(re.finditer(info, doc))
# max_iter 是 re 匹配到的最大次数
max_iter = len(re_iter)
# 这一句表示,正文过于简短,或者没有标点,此时直接输出全文
if max_iter == 0:
return doc
# 考虑 percentile 的情况,如果总共有11句,就舍弃掉原来的 n,直接改为总句数的 percentile 对应的句子数
# 注意是向下取整
if percentile != 1:
n = math.ceil(percentile * max_iter)
# 如果匹配到至少一句,循环自然结束,输出结果
if n > 0:
return doc[0: re_iter[n - 1].end()]
# 如果正文过于简短,或设定的百分比过低,一句话都凑不齐,此时直接输出第一句
elif n == 0:
return doc[0: re_iter[0].end()]
# 如果匹配到的句子数大于 n,此时只取前 n 句
if max_iter >= n:
return doc[0: re_iter[n - 1].end()]
# 如果匹配到的句子不足 n 句,直接输出全部内容
elif 0 < max_iter < n:
return doc[0: re_iter[-1].end()]
# 为减少重名的可能,尽量在函数体内减少变量的使用
def dtm_sort_filter(dtm, keymap, name=None):
"""
:param dtm: 前面生成的词频统计矩阵:Doc-Term-Matrix
:param keymap: 字典,标明了 类别-关键词列表 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个字典,字典包含两个 pandas.DataFrame: 一个是表示各个种类是否存在的二进制表,另一个是最终的种类数
"""
dtm = dtm.applymap(lambda x: 1 if x != 0 else 0)
strips = {}
for i, row in dtm.iterrows():
strip = {}
for k, v in keymap.items():
strip[k] = 0
for item in v:
try:
strip[k] += row[item]
except KeyError:
pass
strips[i] = strip
dtm_class = pd.DataFrame.from_dict(strips, orient='index')
dtm_class = dtm_class.applymap(lambda x: 1 if x != 0 else 0)
dtm_final = dtm_class.agg(np.sum, axis=1)
result = {'DTM_class': dtm_class, 'DTM_final': dtm_final}
return result
def dtm_point_giver(dtm, keymap, scoremap, name=None):
"""
:param dtm: 前面生成的词频统计矩阵:Doc-Term-Matrix
:param keymap: 字典,{TypeA: [word1, word2, word3, ……], TypeB: ……}
:param scoremap: 字典,标明了 类别-分值 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本的分值(所有关键词的分值取最高)
"""
dtm = dtm.applymap(lambda x: 1 if x != 0 else 0)
# 非 keymap 中词会被过滤掉
strips = {}
for i, row in dtm.iterrows():
strip = {}
for k, v in keymap.items():
strip[k] = 0
for item in v:
try:
strip[k] += row[item]
except KeyError:
pass
strips[i] = strip
dtm_class = pd.DataFrame.from_dict(strips, orient='index')
dtm_class = dtm_class.applymap(lambda x: 1 if x != 0 else 0)
# 找到 columns 对应的分值
keywords = list(dtm_class.columns)
multiplier = []
for keyword in keywords:
multiplier.append(scoremap[keyword])
# DataFrame 的乘法运算,不会改变其 index 和 columns
dtm_score = dtm_class.mul(multiplier, axis=1)
# 取一个最大值来赋分
dtm_score = dtm_score.agg(np.max, axis=1)
return dtm_score
def dfc_sort_filter(dfc, keymap, name=None):
"""
:param dfc: 前面生成的词频统计明细表:Doc-Frequency-Context
:param keymap: 字典,标明了 关键词-所属种类 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本中所包含的业务种类数
"""
# 接下来把关键词从 dfc 的 Multi-index 中拿出来(这个index本质上就是一个ndarray)
# 拿出来关键词就可以用字典进行映射
# 先新建一列class-id,准备放置映射的结果
dfc.insert(0, 'cls-id', None)
# 开始遍历
for i in range(0, len(dfc.index)):
dfc.iloc[i, 0] = keymap[dfc.index[i][2]]
# 理论上就可以直接通过 excel 的分类计数功能来看业务种类数了
# 失败了,excel不能看种类数,只能给所有值做计数,因此还需要借助python的unique语句
# dfc.to_excel('被监管业务统计.xlsx')
# 可以对于每一种index做一个计数,使用loc索引到的对象是一个DataFrame
# 先拿到一个doc id的列表
did = []
for item in dfc.index.unique():
did.append(item[0])
did = list(pd.Series(did).unique())
# 接下来获得每一类的结果,注:多重索引的取值值得关注
uni = {}
for item in did:
uni[item] = len(dfc.loc[item, :, :]['cls-id'].unique())
# 把生成的字典转换为以键值行索引的 DataFrame
uni = pd.DataFrame.from_dict(uni, orient='index')
uni.fillna(0, axis=1, inplace=True)
# uni.to_excel(name)
return uni
def dfc_point_giver(dfc, keymap, name=None):
"""
:param dfc: 前面生成的词频统计明细表:Doc-Frequency-Context
:param keymap: 字典,标明了 关键词-分值 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本的分值(所有关键词的分值取最高)
"""
dfc.insert(0, 'point', None)
# 开始遍历
for i in range(0, len(dfc.index)):
dfc.iloc[i, 0] = keymap[dfc.index[i][2]]
# 可以对于每一种index做一个计数,使用loc索引到的对象是一个DataFrame
# 先拿到一个doc id的列表
did = []
for item in dfc.index.unique():
did.append(item[0])
did = list(pd.Series(did).unique())
# 接下来获得每一类的结果,注:多重索引的取值值得关注
uni = {}
for item in did:
uni[item] = max(dfc.loc[item, :, :]['point'].unique())
# 把生成的字典转换为以键值行索引的 DataFrame
uni = pd.DataFrame.from_dict(uni, orient='index')
uni.fillna(0, axis=1, inplace=True)
# uni.to_excel(name)
return uni
def dfc_sort_counter(dfc, name=None):
"""
:param dfc: 前面生成的词频统计明细表:Doc-Frequency-Context
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本中所包含的业务种类数
"""
# 可以对于每一种index做一个计数,使用loc索引到的对象是一个DataFrame
dfc.insert(0, 'form', None)
for i in range(0, dfc.shape[0]):
dfc.iloc[i, 0] = dfc.index[i][2]
# 先拿到一个doc id的列表
did = []
for item in dfc.index.unique():
did.append(item[0])
did = list( | pd.Series(did) | pandas.Series |
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.base import clone
from sklearn.metrics import recall_score, precision_score, accuracy_score, confusion_matrix
import pandas as pd
from random import shuffle
import matplotlib.pyplot as plt
import seaborn as sb
import os
import joblib
sb.set(style="whitegrid")
def sub_sample(X_train, y_train, sub_sample_size):
# re-join X_train and y_train for more convenient sub-sampling per label
joined_train = | pd.DataFrame({"X_train": X_train, "y_train": y_train}) | pandas.DataFrame |
"""
This is the main script to be run from the directory root, it will start the Flask application running which one can
then connect to.
"""
# external packages
from astrodbkit2.astrodb import Database, REFERENCE_TABLES # used for pulling out database and querying
from astropy.coordinates import SkyCoord
from astropy.table import Table # tabulating
from bokeh.embed import json_item # bokeh embedding
from bokeh.layouts import row, column # bokeh displaying nicely
from bokeh.models import ColumnDataSource, Range1d, CustomJS,\
Select, Toggle, TapTool, OpenURL, HoverTool # bokeh models
from bokeh.plotting import figure, curdoc # bokeh plotting
from flask import Flask, render_template, request, redirect, url_for, jsonify # website functionality
from flask_cors import CORS # cross origin fix (aladin mostly)
from flask_wtf import FlaskForm # web forms
from markdown2 import markdown # using markdown formatting
import numpy as np # numerical python
import pandas as pd # running dataframes
from wtforms import StringField, SubmitField # web forms
from wtforms.validators import DataRequired, StopValidation # validating web forms
# internal packages
import argparse # system arguments
import os # operating system
from typing import Union, List # type hinting
from urllib.parse import quote # handling strings into url friendly form
# local packages
from simple_callbacks import JSCallbacks
# initialise
app_simple = Flask(__name__) # start flask app
app_simple.config['SECRET_KEY'] = os.urandom(32) # need to generate csrf token as basic security for Flask
CORS(app_simple) # makes CORS work (aladin notably)
def sysargs():
"""
These are the system arguments given after calling this python script
Returns
-------
_args
The different argument parameters, can be grabbed via their long names (e.g. _args.host)
"""
_args = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
_args.add_argument('-i', '--host', default='127.0.0.1',
help='Local IP Address to host server, default 127.0.0.1')
_args.add_argument('-p', '--port', default=8000,
help='Local port number to host server through, default 8000', type=int)
_args.add_argument('-d', '--debug', help='Run Flask in debug mode?', default=False, action='store_true')
_args.add_argument('-f', '--file', default='SIMPLE.db',
help='Database file path relative to current directory, default SIMPLE.db')
_args = _args.parse_args()
return _args
class SimpleDB(Database): # this keeps pycharm happy about unresolved references
"""
Wrapper class for astrodbkit2.Database specific to SIMPLE
"""
Sources = None # initialise class attribute
Photometry = None
Parallaxes = None
class Inventory:
"""
For use in the solo result page where the inventory of an object is queried, grabs also the RA & Dec
"""
ra: float = 0
dec: float = 0
def __init__(self, resultdict: dict):
"""
Constructor method for Inventory
Parameters
----------
resultdict: dict
The dictionary of all the key: values in a given object inventory
"""
self.results: dict = resultdict # given inventory for a target
for key in self.results: # over every key in inventory
if args.debug:
print(key)
if key in REFERENCE_TABLES: # ignore the reference table ones
continue
lowkey: str = key.lower() # lower case of the key
mkdown_output: str = self.listconcat(key) # get in markdown the dataframe value for given key
setattr(self, lowkey, mkdown_output) # set the key attribute with the dataframe for given key
try:
srcs: pd.DataFrame = self.listconcat('Sources', rtnmk=False) # open the Sources result
self.ra, self.dec = srcs.ra[0], srcs.dec[0]
except (KeyError, AttributeError):
pass
return
def listconcat(self, key: str, rtnmk: bool = True) -> Union[pd.DataFrame, str]:
"""
Concatenates the list for a given key
Parameters
----------
key: str
The key corresponding to the inventory
rtnmk: bool
Switch for whether to return either a markdown string or a dataframe
"""
obj: List[dict] = self.results[key] # the value for the given key
df: pd.DataFrame = pd.concat([ | pd.DataFrame(objrow, index=[i]) | pandas.DataFrame |
from bs4 import BeautifulSoup
import urllib.request
import os.path
import pandas as pd
import re
final = pd.DataFrame()
plays = ['antony-and-cleopatra', 'asyoulikeit', 'errors', 'coriolanus', 'hamlet', 'henry4pt1', 'henry4pt2','henryv',
'juliuscaesar', 'lear', 'macbeth', 'measure-for-measure', 'merchant', 'msnd',
'muchado', 'othello', 'richardii', 'richardiii', 'romeojuliet', 'shrew', 'tempest', 'twelfthnight',
'twogentlemen', 'winterstale']
#not included in nfs: All's Well That Ends Well, Cymbeline, Henry VI1-3, Henry VIII,
# Love's Labours Lost, Merchant of Venice, Merry Wives of Windsor,
# Pericles, Timon of Athens, Titus Andronicus, Troilus & Cressida
for play in plays:
print(play)
play_lines = pd.DataFrame()
for ii in range(0,500,2):
if os.path.isfile(play+str(ii)) :
fname = play+str(ii)
file = open(fname)
soup = BeautifulSoup(file.read(), 'html.parser')
lines = pd.DataFrame()
lines_m = pd.DataFrame()
lines_o = pd.DataFrame()
for translation in ['original', 'modern']:
if translation == 'original':
quotes = soup.find_all('td', attrs={'class':'noFear-left'})
else:
quotes = soup.find_all('td', attrs={'class':'noFear-right'})
for q in quotes:
if translation == 'original':
player = q.find('b').text if q.find('b') else None
originals = q.find_all('div', attrs={'class':translation+'-line'})
originals = [orig.text for orig in originals if orig]
original = ' '.join(originals)
lines_o = lines_o.append( | pd.DataFrame({'player':player, translation: original}, index=[0]) | pandas.DataFrame |
from __future__ import division
import os
import sys
from pdb import set_trace
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from Utils.FileUtil import list2dataframe
from smote import SMOTE
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
def getTunings(fname):
raw = pd.read_csv(root + '/old/tunings.csv').transpose().values.tolist()
formatd = | pd.DataFrame(raw[1:], columns=raw[0]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import sklearn.metrics
import ubelt as ub
from sklearn.preprocessing import LabelEncoder
def classification_report(y_true, y_pred, target_names=None,
sample_weight=None, verbose=False):
"""
Computes a classification report which is a collection of various metrics
commonly used to evaulate classification quality. This can handle binary
and multiclass settings.
Note that this function does not accept probabilities or scores and must
instead act on final decisions. See ovr_classification_report for a
probability based report function using a one-vs-rest strategy.
This emulates the bm(cm) Matlab script written by <NAME> that is used
for computing bookmaker, markedness, and various other scores.
References:
https://csem.flinders.edu.au/research/techreps/SIE07001.pdf
https://www.mathworks.com/matlabcentral/fileexchange/5648-bm-cm-?requestedDomain=www.mathworks.com
<NAME>, (2012). A Comparison of MCC and CEN
Error Measures in MultiClass Prediction
Example:
>>> # xdoctest: +IGNORE_WANT
>>> y_true = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3]
>>> y_pred = [1, 2, 1, 3, 1, 2, 2, 3, 2, 2, 3, 3, 2, 3, 3, 3, 1, 3]
>>> target_names = None
>>> sample_weight = None
>>> report = classification_report(y_true, y_pred, verbose=0)
>>> print(report['confusion'])
pred 1 2 3 Σr
real
1 3 1 1 5
2 0 4 1 5
3 1 1 6 8
Σp 4 6 8 18
>>> print(report['metrics'])
metric precision recall fpr markedness bookmaker mcc support
class
1 0.7500 0.6000 0.0769 0.6071 0.5231 0.5635 5
2 0.6667 0.8000 0.1538 0.5833 0.6462 0.6139 5
3 0.7500 0.7500 0.2000 0.5500 0.5500 0.5500 8
combined 0.7269 0.7222 0.1530 0.5751 0.5761 0.5758 18
Ignore:
>>> size = 100
>>> rng = np.random.RandomState(0)
>>> p_classes = np.array([.90, .05, .05][0:2])
>>> p_classes = p_classes / p_classes.sum()
>>> p_wrong = np.array([.03, .01, .02][0:2])
>>> y_true = testdata_ytrue(p_classes, p_wrong, size, rng)
>>> rs = []
>>> for x in range(17):
>>> p_wrong += .05
>>> y_pred = testdata_ypred(y_true, p_wrong, rng)
>>> report = classification_report(y_true, y_pred, verbose='hack')
>>> rs.append(report)
>>> import plottool as pt
>>> pt.qtensure()
>>> df = pd.DataFrame(rs).drop(['raw'], axis=1)
>>> delta = df.subtract(df['target'], axis=0)
>>> sqrd_error = np.sqrt((delta ** 2).sum(axis=0))
>>> print('Error')
>>> print(sqrd_error.sort_values())
>>> ys = df.to_dict(orient='list')
>>> pt.multi_plot(ydata_list=ys)
"""
if target_names is None:
unique_labels = np.unique(np.hstack([y_true, y_pred]))
if len(unique_labels) == 1 and (unique_labels[0] == 0 or unique_labels[0] == 1):
target_names = np.array([False, True])
y_true_ = y_true
y_pred_ = y_pred
else:
lb = LabelEncoder()
lb.fit(unique_labels)
y_true_ = lb.transform(y_true)
y_pred_ = lb.transform(y_pred)
target_names = lb.classes_
else:
y_true_ = y_true
y_pred_ = y_pred
# Real data is on the rows,
# Pred data is on the cols.
cm = sklearn.metrics.confusion_matrix(
y_true_, y_pred_, sample_weight=sample_weight)
confusion = cm # NOQA
k = len(cm) # number of classes
N = cm.sum() # number of examples
real_total = cm.sum(axis=1)
pred_total = cm.sum(axis=0)
# the number of "positive" cases **per class**
n_pos = real_total # NOQA
# the number of times a class was predicted.
n_neg = N - n_pos # NOQA
# number of true positives per class
n_tps = np.diag(cm)
# number of true negatives per class
n_fps = (cm - np.diagflat(np.diag(cm))).sum(axis=0)
tprs = n_tps / real_total # true pos rate (recall)
tpas = n_tps / pred_total # true pos accuracy (precision)
unused = (real_total + pred_total) == 0
fprs = n_fps / n_neg # false pose rate
fprs[unused] = np.nan
rprob = real_total / N
pprob = pred_total / N
if len(cm) == 2:
[[A, B],
[C, D]] = cm
(A * D - B * C) / np.sqrt((A + C) * (B + D) * (A + B) * (C + D))
# bookmaker is analogous to recall, but unbiased by class frequency
rprob_mat = np.tile(rprob, [k, 1]).T - (1 - np.eye(k))
bmcm = cm.T / rprob_mat
bms = np.sum(bmcm.T, axis=0) / N
# markedness is analogous to precision, but unbiased by class frequency
pprob_mat = np.tile(pprob, [k, 1]).T - (1 - np.eye(k))
mkcm = cm / pprob_mat
mks = np.sum(mkcm.T, axis=0) / N
mccs = np.sign(bms) * np.sqrt(np.abs(bms * mks))
perclass_data = ub.odict([
('precision', tpas),
('recall', tprs),
('fpr', fprs),
('markedness', mks),
('bookmaker', bms),
('mcc', mccs),
('support', real_total),
])
tpa = np.nansum(tpas * rprob)
tpr = np.nansum(tprs * rprob)
fpr = np.nansum(fprs * rprob)
mk = np.nansum(mks * rprob)
bm = np.nansum(bms * pprob)
# The simple mean seems to do the best
mccs_ = mccs[~np.isnan(mccs)]
if len(mccs_) == 0:
mcc_combo = np.nan
else:
mcc_combo = np.nanmean(mccs_)
combined_data = ub.odict([
('precision', tpa),
('recall', tpr),
('fpr', fpr),
('markedness', mk),
('bookmaker', bm),
# ('mcc', np.sign(bm) * np.sqrt(np.abs(bm * mk))),
('mcc', mcc_combo),
# np.sign(bm) * np.sqrt(np.abs(bm * mk))),
('support', real_total.sum())
])
# Not sure how to compute this. Should it agree with the sklearn impl?
if verbose == 'hack':
verbose = False
mcc_known = sklearn.metrics.matthews_corrcoef(
y_true, y_pred, sample_weight=sample_weight)
mcc_raw = np.sign(bm) * np.sqrt(np.abs(bm * mk))
import scipy as sp
def gmean(x, w=None):
if w is None:
return sp.stats.gmean(x)
return np.exp(np.nansum(w * np.log(x)) / np.nansum(w))
def hmean(x, w=None):
if w is None:
return sp.stats.hmean(x)
return 1 / (np.nansum(w * (1 / x)) / np.nansum(w))
def amean(x, w=None):
if w is None:
return np.mean(x)
return np.nansum(w * x) / np.nansum(w)
report = {
'target': mcc_known,
'raw': mcc_raw,
}
# print('%r <<<' % (mcc_known,))
means = {
'a': amean,
# 'h': hmean,
'g': gmean,
}
weights = {
'p': pprob,
'r': rprob,
'': None,
}
for mean_key, mean in means.items():
for w_key, w in weights.items():
# Hack of very wrong items
if mean_key == 'g':
if w_key in ['r', 'p', '']:
continue
if mean_key == 'g':
if w_key in ['r']:
continue
m = mean(mccs, w)
r_key = '{} {}'.format(mean_key, w_key)
report[r_key] = m
# print(r_key)
# print(np.abs(m - mcc_known))
# print(ut.repr4(report, precision=8))
return report
# print('mcc_known = %r' % (mcc_known,))
# print('mcc_combo1 = %r' % (mcc_combo1,))
# print('mcc_combo2 = %r' % (mcc_combo2,))
# print('mcc_combo3 = %r' % (mcc_combo3,))
index = | pd.Index(target_names, name='class') | pandas.Index |
"""Log Imputation Class
<NAME> 2021
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn import metrics
"""Log Imputation Class
<NAME> 2021
"""
from collections import defaultdict
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn import metrics
class LogImputer:
def __init__(
self,
train,
test,
imputer,
iterative=False,
log10logs=None,
imputer_kwargs=dict(),
):
self.train = train
self.test = test
self.imputer = imputer
self.imputer_init = imputer_kwargs
self.iterative = iterative
self.log10logs = log10logs
self.encoders = dict()
self.scalar = None
self.imputation_train_impts = dict()
self.imputation_train_dfs = dict()
def encode(self, logs):
"""Encode logs to int.
Args:
logs ([type]): [description]
"""
if isinstance(logs, str):
logs = [logs]
data = pd.concat([self.train, self.test])
for log in logs:
self.encoders[log] = LabelEncoder()
self.encoders[log].fit(data[log])
self.train[log] = self.encoders[log].transform(self.train[log])
self.test[log] = self.encoders[log].transform(self.test[log])
def decode(self):
"""Decode logs based unpon previous encodings."""
for log in self.encoders:
self.train[log] = self.encoders[log].inverse_transform(self.train[log])
self.test[log] = self.encoders[log].inverse_transform(self.test[log])
def scale(self):
"""Standard scaling and log10 transform"""
for log in self.log10logs:
self.train[log] = np.log10(self.train[log])
self.test[log] = np.log10(self.test[log])
data = pd.concat([self.train, self.test])
self.scalar = StandardScaler()
self.scalar.fit(data)
self.train.loc[:, :] = self.scalar.transform(self.train)
self.test.loc[:, :] = self.scalar.transform(self.test)
def _test_data_prep(self, data, test_target, set_to_nan=0.3):
"""This method sets set_to_nan fraction of the values to nan so we can measure the model accuracy."""
data = data.copy()
# just get non-nan values for test target
sub = data.dropna(subset=[test_target])
# introduce random missing
rand_set_mask = np.random.random(len(sub)) < set_to_nan
replace = sub.index[rand_set_mask]
# create flags, create and return new data
data.loc[replace, test_target] = np.nan
data["set_nan"] = False
data.loc[replace, "set_nan"] = True
data["was_nan"] = data[test_target].isna()
# print("Col, InputSize, Number of Nan, % NaN, Original Nan", "Training Size")
# print(
# f"{j:>3}",
# f"{data.shape[0]:>10}",
# f"{replace.size:>14}",
# f"{100*np.sum(data.set_nan)/sub.shape[0]:>6.2f}",
# f"{np.sum(data.was_nan):>13}",
# f"{sub.shape[0]-replace.size:>13}",
# )
return data
def fit(self, logs=None, target_logs=None, test_proportion=0.3, **kwargs):
"""Fit the imputer/s.
Args:
logs ([type], optional): [description]. Defaults to None.
"""
if logs is None:
logs = self.train.columns
self.fitted_logs = logs
if target_logs is None:
target_logs = self.train.columns
for key in target_logs:
self.imputation_train_dfs[key] = self._test_data_prep(
self.train, key, set_to_nan=test_proportion
)
# mice mode
if self.iterative:
for key in target_logs:
self.imputation_train_impts[key] = IterativeImputer(
self.imputer(**self.imputer_init), **kwargs
)
self.imputation_train_impts[key].fit(
self.imputation_train_dfs[key][logs].copy()
)
# direct prediction mode (won't work for reg. that don't handle nans)
else:
for key in target_logs:
self.imputation_train_impts[key] = self.imputer(**self.imputer_init)
self.imputation_train_impts[key].fit(
self.imputation_train_dfs[key]
.dropna(subset=[key])
.loc[:, set(logs).difference((key,))],
self.imputation_train_dfs[key][key].dropna(),
)
def predict(self, predict_for="train"):
"""Prediction Mode - Not available for iterative imputer."""
pass
def impute(self, impute_for="train"):
"""Imputation Mode"""
df = self.__getattribute__(impute_for)
if self.iterative:
imputed = {
key: self.imputation_train_impts[key].transform(df[self.fitted_logs])
for key in self.imputation_train_impts
}
output_df = imputed[tuple(imputed.keys())[0]]
for key in imputed.keys():
output_df[key] = imputed[key][key]
else:
predicted = {
key: self.imputation_train_impts[key].predict(
df.loc[:, set(self.fitted_logs).difference((key,))]
)
for key in self.imputation_train_impts
}
imputed = {
key: np.where(df[key].isna().values, ar, df[key].values)
for key, ar in predicted.items()
}
output_df = df.copy()
for key in imputed:
output_df[key] = imputed[key]
return output_df
def _impute_training(self):
if self.iterative:
imputed = {}
for key in self.imputation_train_impts:
imputed[key] = self.imputation_train_dfs[key].copy()
imputed[key].loc[:, self.fitted_logs] = self.imputation_train_impts[
key
].transform(self.imputation_train_dfs[key][self.fitted_logs])
else:
imputed = {}
for key in self.imputation_train_dfs:
fitted_logs = set(self.fitted_logs).difference((key,))
pred = self.imputation_train_impts[key].predict(
self.imputation_train_dfs[key][fitted_logs]
)
imputed[key] = self.imputation_train_dfs[key].copy()
mask = self.imputation_train_dfs[key][key].isna()
imputed[key].loc[mask, key] = pred[mask]
return imputed
def _impute_test(self):
if self.iterative:
imputed = {}
for key in self.imputation_train_impts:
imputed[key] = self.test.copy()
imputed[key][key] = np.nan
imputed[key].loc[:, self.fitted_logs] = self.imputation_train_impts[
key
].transform(imputed[key][self.fitted_logs])
else:
imputed = {}
for key in self.imputation_train_dfs:
fitted_logs = set(self.fitted_logs).difference((key,))
pred = self.imputation_train_impts[key].predict(self.test[fitted_logs])
imputed[key] = self.test.copy()
imputed[key].loc[:, key] = pred
return imputed
def score(self, score="train"):
"""Evaluate the models against the NANed data from the training set."""
scores = defaultdict(dict)
if score == "train":
imputed_dfs = self._impute_training()
df = self.train
elif score == "test":
imputed_dfs = self._impute_test()
df = self.test
else:
raise ValueError(f"unknown score type: {score}")
for key, d in imputed_dfs.items():
if score == "train":
mask = self.imputation_train_dfs[key].set_nan.values
elif score == "test":
mask = ~df[key].isna()
truth = df.loc[mask, key].values
test = d.loc[mask, key].values
se = np.power((truth - test) / truth, 2)
perc_error_score = np.nanmean(np.power(se, 0.5)) * 100.0
er = dict(
perc_error=perc_error_score,
explained_var=metrics.explained_variance_score(truth, test),
max_error=metrics.max_error(truth, test),
mae=metrics.mean_absolute_error(truth, test),
mse=metrics.mean_squared_error(truth, test),
r2=metrics.r2_score(truth, test),
)
scores[key] = er
return | pd.DataFrame(scores) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import datetime
from io import StringIO
import re
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, option_context
from pandas.util import testing as tm
import pandas.io.formats.format as fmt
lorem_ipsum = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex"
" ea commodo consequat. Duis aute irure dolor in reprehenderit in"
" voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur"
" sint occaecat cupidatat non proident, sunt in culpa qui officia"
" deserunt mollit anim id est laborum.")
def expected_html(datapath, name):
"""
Read HTML file from formats data directory.
Parameters
----------
datapath : pytest fixture
The datapath fixture injected into a test by pytest.
name : str
The name of the HTML file without the suffix.
Returns
-------
str : contents of HTML file.
"""
filename = '.'.join([name, 'html'])
filepath = datapath('io', 'formats', 'data', 'html', filename)
with open(filepath, encoding='utf-8') as f:
html = f.read()
return html.rstrip()
@pytest.fixture(params=['mixed', 'empty'])
def biggie_df_fixture(request):
"""Fixture for a big mixed Dataframe and an empty Dataframe"""
if request.param == 'mixed':
df = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
df.loc[:20, 'A'] = np.nan
df.loc[:20, 'B'] = np.nan
return df
elif request.param == 'empty':
df = DataFrame(index=np.arange(200))
return df
@pytest.fixture(params=fmt._VALID_JUSTIFY_PARAMETERS)
def justify(request):
return request.param
@pytest.mark.parametrize('col_space', [30, 50])
def test_to_html_with_col_space(col_space):
df = DataFrame(np.random.random(size=(1, 3)))
# check that col_space affects HTML generation
# and be very brittle about it.
result = df.to_html(col_space=col_space)
hdrs = [x for x in result.split(r"\n") if re.search(r"<th[>\s]", x)]
assert len(hdrs) > 0
for h in hdrs:
assert "min-width" in h
assert str(col_space) in h
def test_to_html_with_empty_string_label():
# GH 3547, to_html regards empty string labels as repeated labels
data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}
df = DataFrame(data).set_index(['c1', 'c2'])
result = df.to_html()
assert "rowspan" not in result
@pytest.mark.parametrize('df,expected', [
(DataFrame({'\u03c3': np.arange(10.)}), 'unicode_1'),
(DataFrame({'A': ['\u03c3']}), 'unicode_2')
])
def test_to_html_unicode(df, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html()
assert result == expected
def test_to_html_decimal(datapath):
# GH 12031
df = DataFrame({'A': [6.0, 3.1, 2.2]})
result = df.to_html(decimal=',')
expected = expected_html(datapath, 'gh12031_expected_output')
assert result == expected
@pytest.mark.parametrize('kwargs,string,expected', [
(dict(), "<type 'str'>", 'escaped'),
(dict(escape=False), "<b>bold</b>", 'escape_disabled')
])
def test_to_html_escaped(kwargs, string, expected, datapath):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: string,
b: string},
'co>l2': {a: string,
b: string}}
result = DataFrame(test_dict).to_html(**kwargs)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('index_is_named', [True, False])
def test_to_html_multiindex_index_false(index_is_named, datapath):
# GH 8452
df = DataFrame({
'a': range(2),
'b': range(3, 5),
'c': range(5, 7),
'd': range(3, 5)
})
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
if index_is_named:
df.index = Index(df.index.values, name='idx')
result = df.to_html(index=False)
expected = expected_html(datapath, 'gh8452_expected_output')
assert result == expected
@pytest.mark.parametrize('multi_sparse,expected', [
(False, 'multiindex_sparsify_false_multi_sparse_1'),
(False, 'multiindex_sparsify_false_multi_sparse_2'),
(True, 'multiindex_sparsify_1'),
(True, 'multiindex_sparsify_2')
])
def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
if expected.endswith('2'):
df.columns = index[::2]
with option_context('display.multi_sparse', multi_sparse):
result = df.to_html()
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('max_rows,expected', [
(60, 'gh14882_expected_output_1'),
# Test that ... appears in a middle level
(56, 'gh14882_expected_output_2')
])
def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
# GH 14882 - Issue on truncation with odd length DataFrame
index = MultiIndex.from_product([[100, 200, 300],
[10, 20, 30],
[1, 2, 3, 4, 5, 6, 7]],
names=['a', 'b', 'c'])
df = DataFrame({'n': range(len(index))}, index=index)
result = df.to_html(max_rows=max_rows)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('df,formatters,expected', [
(DataFrame(
[[0, 1], [2, 3], [4, 5], [6, 7]],
columns=['foo', None], index=lrange(4)),
{'__index__': lambda x: 'abcd' [x]},
'index_formatter'),
(DataFrame(
{'months': [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
{'months': lambda x: x.strftime('%Y-%m')},
'datetime64_monthformatter'),
(DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f')}),
{'hod': lambda x: x.strftime('%H:%M')},
'datetime64_hourformatter')
])
def test_to_html_formatters(df, formatters, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html(formatters=formatters)
assert result == expected
def test_to_html_regression_GH6098():
df = DataFrame({
'clé1': ['a', 'a', 'b', 'b', 'a'],
'clé2': ['1er', '2ème', '1er', '2ème', '1er'],
'données1': np.random.randn(5),
'données2': np.random.randn(5)})
# it works
df.pivot_table(index=['clé1'], columns=['clé2'])._repr_html_()
def test_to_html_truncate(datapath):
index = pd.date_range(start='20010101', freq='D', periods=20)
df = DataFrame(index=index, columns=range(20))
result = df.to_html(max_rows=8, max_cols=4)
expected = expected_html(datapath, 'truncate')
assert result == expected
@pytest.mark.parametrize('sparsify,expected', [
(True, 'truncate_multi_index'),
(False, 'truncate_multi_index_sparse_off')
])
def test_to_html_truncate_multi_index(sparsify, expected, datapath):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('option,result,expected', [
(None, lambda df: df.to_html(), '1'),
(None, lambda df: df.to_html(border=0), '0'),
(0, lambda df: df.to_html(), '0'),
(0, lambda df: df._repr_html_(), '0'),
])
def test_to_html_border(option, result, expected):
df = DataFrame({'A': [1, 2]})
if option is None:
result = result(df)
else:
with option_context('display.html.border', option):
result = result(df)
expected = 'border="{}"'.format(expected)
assert expected in result
def test_display_option_warning():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.options.html.border
@pytest.mark.parametrize('biggie_df_fixture', ['mixed'], indirect=True)
def test_to_html(biggie_df_fixture):
# TODO: split this test
df = biggie_df_fixture
s = df.to_html()
buf = StringIO()
retval = df.to_html(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
df.to_html(columns=['B', 'A'], col_space=17)
df.to_html(columns=['B', 'A'],
formatters={'A': lambda x: '{x:.1f}'.format(x=x)})
df.to_html(columns=['B', 'A'], float_format=str)
df.to_html(columns=['B', 'A'], col_space=12, float_format=str)
@pytest.mark.parametrize('biggie_df_fixture', ['empty'], indirect=True)
def test_to_html_empty_dataframe(biggie_df_fixture):
df = biggie_df_fixture
df.to_html()
def test_to_html_filename(biggie_df_fixture, tmpdir):
df = biggie_df_fixture
expected = df.to_html()
path = tmpdir.join('test.html')
df.to_html(path)
result = path.read()
assert result == expected
def test_to_html_with_no_bold():
df = DataFrame({'x': np.random.randn(5)})
html = df.to_html(bold_rows=False)
result = html[html.find("</thead>")]
assert '<strong' not in result
def test_to_html_columns_arg():
df = DataFrame(tm.getSeriesData())
result = df.to_html(columns=['A'])
assert '<th>B</th>' not in result
@pytest.mark.parametrize('columns,justify,expected', [
(MultiIndex.from_tuples(
list(zip(np.arange(2).repeat(2), np.mod(lrange(4), 2))),
names=['CL0', 'CL1']),
'left',
'multiindex_1'),
(MultiIndex.from_tuples(
list(zip(range(4), np.mod(lrange(4), 2)))),
'right',
'multiindex_2')
])
def test_to_html_multiindex(columns, justify, expected, datapath):
df = DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify=justify)
expected = expected_html(datapath, expected)
assert result == expected
def test_to_html_justify(justify, datapath):
df = DataFrame({'A': [6, 30000, 2],
'B': [1, 2, 70000],
'C': [223442, 0, 1]},
columns=['A', 'B', 'C'])
result = df.to_html(justify=justify)
expected = expected_html(datapath, 'justify').format(justify=justify)
assert result == expected
@pytest.mark.parametrize("justify", ["super-right", "small-left",
"noinherit", "tiny", "pandas"])
def test_to_html_invalid_justify(justify):
# GH 17527
df = DataFrame()
msg = "Invalid value for justify parameter"
with pytest.raises(ValueError, match=msg):
df.to_html(justify=justify)
def test_to_html_index(datapath):
# TODO: split this test
index = ['foo', 'bar', 'baz']
df = DataFrame({'A': [1, 2, 3],
'B': [1.2, 3.4, 5.6],
'C': ['one', 'two', np.nan]},
columns=['A', 'B', 'C'],
index=index)
expected_with_index = expected_html(datapath, 'index_1')
assert df.to_html() == expected_with_index
expected_without_index = expected_html(datapath, 'index_2')
result = df.to_html(index=False)
for i in index:
assert i not in result
assert result == expected_without_index
df.index = Index(['foo', 'bar', 'baz'], name='idx')
expected_with_index = expected_html(datapath, 'index_3')
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]
df.index = MultiIndex.from_tuples(tuples)
expected_with_index = expected_html(datapath, 'index_4')
assert df.to_html() == expected_with_index
result = df.to_html(index=False)
for i in ['foo', 'bar', 'car', 'bike']:
assert i not in result
# must be the same result as normal index
assert result == expected_without_index
df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])
expected_with_index = expected_html(datapath, 'index_5')
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
@pytest.mark.parametrize('classes', [
"sortable draggable",
["sortable", "draggable"]
])
def test_to_html_with_classes(classes, datapath):
df = DataFrame()
expected = expected_html(datapath, 'with_classes')
result = df.to_html(classes=classes)
assert result == expected
def test_to_html_no_index_max_rows(datapath):
# GH 14998
df = DataFrame({"A": [1, 2, 3, 4]})
result = df.to_html(index=False, max_rows=1)
expected = expected_html(datapath, 'gh14998_expected_output')
assert result == expected
def test_to_html_multiindex_max_cols(datapath):
# GH 6131
index = MultiIndex(levels=[['ba', 'bb', 'bc'], ['ca', 'cb', 'cc']],
codes=[[0, 1, 2], [0, 1, 2]],
names=['b', 'c'])
columns = MultiIndex(levels=[['d'], ['aa', 'ab', 'ac']],
codes=[[0, 0, 0], [0, 1, 2]],
names=[None, 'a'])
data = np.array(
[[1., np.nan, np.nan], [np.nan, 2., np.nan], [np.nan, np.nan, 3.]])
df = DataFrame(data, index, columns)
result = df.to_html(max_cols=2)
expected = expected_html(datapath, 'gh6131_expected_output')
assert result == expected
def test_to_html_multi_indexes_index_false(datapath):
# GH 22579
df = DataFrame({'a': range(10), 'b': range(10, 20), 'c': range(10, 20),
'd': range(10, 20)})
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
df.index = MultiIndex.from_product([['a', 'b'],
['c', 'd', 'e', 'f', 'g']])
result = df.to_html(index=False)
expected = expected_html(datapath, 'gh22579_expected_output')
assert result == expected
@pytest.mark.parametrize('index_names', [True, False])
@pytest.mark.parametrize('header', [True, False])
@pytest.mark.parametrize('index', [True, False])
@pytest.mark.parametrize('column_index, column_type', [
(Index([0, 1]), 'unnamed_standard'),
(Index([0, 1], name='columns.name'), 'named_standard'),
(MultiIndex.from_product([['a'], ['b', 'c']]), 'unnamed_multi'),
(MultiIndex.from_product(
[['a'], ['b', 'c']], names=['columns.name.0',
'columns.name.1']), 'named_multi')
])
@pytest.mark.parametrize('row_index, row_type', [
(Index([0, 1]), 'unnamed_standard'),
(Index([0, 1], name='index.name'), 'named_standard'),
(MultiIndex.from_product([['a'], ['b', 'c']]), 'unnamed_multi'),
(MultiIndex.from_product(
[['a'], ['b', 'c']], names=['index.name.0',
'index.name.1']), 'named_multi')
])
def test_to_html_basic_alignment(
datapath, row_index, row_type, column_index, column_type,
index, header, index_names):
# GH 22747, GH 22579
df = DataFrame(np.zeros((2, 2), dtype=int),
index=row_index, columns=column_index)
result = df.to_html(
index=index, header=header, index_names=index_names)
if not index:
row_type = 'none'
elif not index_names and row_type.startswith('named'):
row_type = 'un' + row_type
if not header:
column_type = 'none'
elif not index_names and column_type.startswith('named'):
column_type = 'un' + column_type
filename = 'index_' + row_type + '_columns_' + column_type
expected = expected_html(datapath, filename)
assert result == expected
@pytest.mark.parametrize('index_names', [True, False])
@pytest.mark.parametrize('header', [True, False])
@pytest.mark.parametrize('index', [True, False])
@pytest.mark.parametrize('column_index, column_type', [
(Index(np.arange(8)), 'unnamed_standard'),
(Index(np.arange(8), name='columns.name'), 'named_standard'),
(MultiIndex.from_product(
[['a', 'b'], ['c', 'd'], ['e', 'f']]), 'unnamed_multi'),
(MultiIndex.from_product(
[['a', 'b'], ['c', 'd'], ['e', 'f']], names=['foo', None, 'baz']),
'named_multi')
])
@pytest.mark.parametrize('row_index, row_type', [
(Index(np.arange(8)), 'unnamed_standard'),
(Index(np.arange(8), name='index.name'), 'named_standard'),
(MultiIndex.from_product(
[['a', 'b'], ['c', 'd'], ['e', 'f']]), 'unnamed_multi'),
(MultiIndex.from_product(
[['a', 'b'], ['c', 'd'], ['e', 'f']], names=['foo', None, 'baz']),
'named_multi')
])
def test_to_html_alignment_with_truncation(
datapath, row_index, row_type, column_index, column_type,
index, header, index_names):
# GH 22747, GH 22579
df = DataFrame(np.arange(64).reshape(8, 8),
index=row_index, columns=column_index)
result = df.to_html(
max_rows=4, max_cols=4,
index=index, header=header, index_names=index_names)
if not index:
row_type = 'none'
elif not index_names and row_type.startswith('named'):
row_type = 'un' + row_type
if not header:
column_type = 'none'
elif not index_names and column_type.startswith('named'):
column_type = 'un' + column_type
filename = 'trunc_df_index_' + row_type + '_columns_' + column_type
expected = expected_html(datapath, filename)
assert result == expected
@pytest.mark.parametrize('index', [False, 0])
def test_to_html_truncation_index_false_max_rows(datapath, index):
# GH 15019
data = [[1.764052, 0.400157],
[0.978738, 2.240893],
[1.867558, -0.977278],
[0.950088, -0.151357],
[-0.103219, 0.410599]]
df = DataFrame(data)
result = df.to_html(max_rows=4, index=index)
expected = expected_html(datapath, 'gh15019_expected_output')
assert result == expected
@pytest.mark.parametrize('index', [False, 0])
@pytest.mark.parametrize('col_index_named, expected_output', [
(False, 'gh22783_expected_output'),
(True, 'gh22783_named_columns_index')
])
def test_to_html_truncation_index_false_max_cols(
datapath, index, col_index_named, expected_output):
# GH 22783
data = [[1.764052, 0.400157, 0.978738, 2.240893, 1.867558],
[-0.977278, 0.950088, -0.151357, -0.103219, 0.410599]]
df = | DataFrame(data) | pandas.DataFrame |
# coding:utf-8
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: TXVision
## Email: <EMAIL>
## Copyright (c) 2021
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import argparse
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import itertools
import time
import cv2
import random
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.model_selection import KFold
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import torch.nn.functional as F
import encoding
from encoding.utils import (accuracy, AverageMeter, MixUpWrapper, LR_Scheduler)
from decimal import Decimal
class Options():
def __init__(self):
# data settings
parser = argparse.ArgumentParser(description='Inference')
parser.add_argument('--dataset', type=str, default='imagenet',
help='training dataset (default: imagenet)')
parser.add_argument('--num_classes', type=int, metavar='N',
help='num_classes')
parser.add_argument('--fold', type=int, metavar='N',
help='the fold of K-Fold')
parser.add_argument('--csv-path', type=str,
help='the csv file contained all clinical info of pids')
parser.add_argument('--base-size', type=int, default=None,
help='base image size')
parser.add_argument('--crop-size', type=int, default=224,
help='crop image size')
# model params
parser.add_argument('--model', type=str, default='densenet',
help='network model type (default: densenet)')
parser.add_argument('--rectify', action='store_true',
default=False, help='rectify convolution')
parser.add_argument('--rectify-avg', action='store_true',
default=False, help='rectify convolution')
# training hyper params
parser.add_argument('--batch-size', type=int, default=8, metavar='N',
help='batch size for training (default: 128)')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
# cuda, seed and logging
parser.add_argument('--no-cuda', action='store_true',
default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, # default=None,
help='put the path to resuming file if needed')
parser.add_argument('--verify', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--export', type=str, default=None,
help='put the path to resuming file if needed')
self.parser = parser
def parse(self):
args = self.parser.parse_args()
return args
def draw_roc(ensemble_gt, ensemble_pred, name='en'):
import matplotlib.pyplot as plt
# 计算置信区间
def ciauc(auc, pos_n, neg_n):
import math
q0 = auc * (1 - auc)
q1 = auc / (2 - auc) - auc ** 2
q2 = 2 * (auc ** 2) / (1 + auc) - auc ** 2
se = math.sqrt((q0 + (pos_n - 1) * q1 + (neg_n - 1) * q2) / (pos_n * neg_n))
z_crit = 1.959964
lower = auc - z_crit * se
upper = auc + z_crit * se
lower = max(lower, 0.)
upper = min(upper, 1.)
# print("[{:.3f}, {:.3f}]".format(lower, upper))
return lower, upper
plt.figure()
lw = 2
plt.figure(figsize=(10, 10))
num_pos, num_neg = np.sum(ensemble_gt), len(ensemble_gt) - np.sum(ensemble_gt)
fpr, tpr, threshold = roc_curve(np.array(ensemble_gt), np.array(ensemble_pred)) ###计算真正率和假正率
roc_auc = auc(fpr, tpr) ###计算auc的值
lower, upper = ciauc(roc_auc, num_pos, num_neg)
plt.plot(fpr, tpr, color='darkorange', alpha=.8,
lw=lw, label='Ensemble (AUC:{:0.3f} [95%CI, {:0.3f}-{:0.3f}])'.format(roc_auc, lower,
upper)) ###假正率为横坐标,真正率为纵坐标做曲线
plt.plot([0, 1], [0, 1], color='gray', lw=1, linestyle='--')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('1-Specificity', size=18, weight='bold')
plt.ylabel('Sensitivity', size=18, weight='bold')
plt.title('Testing ROC curves')
plt.legend(loc="lower right")
plt.savefig(name + '_roc.jpg')
plt.show()
def get_2D_gaussian_map(im_h, im_w):
from numpy import matlib as mb
IMAGE_WIDTH = im_w
IMAGE_HEIGHT = im_h
center_x = IMAGE_WIDTH / 2
center_y = IMAGE_HEIGHT / 2
R = np.sqrt(center_x ** 2 + center_y ** 2)
# Gauss_map = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH))
# 直接利用矩阵运算实现
mask_x = mb.repmat(center_x, IMAGE_HEIGHT, IMAGE_WIDTH)
mask_y = mb.repmat(center_y, IMAGE_HEIGHT, IMAGE_WIDTH)
x1 = np.arange(IMAGE_WIDTH)
x_map = mb.repmat(x1, IMAGE_HEIGHT, 1)
y1 = np.arange(IMAGE_HEIGHT)
y_map = mb.repmat(y1, IMAGE_WIDTH, 1)
y_map = np.transpose(y_map)
Gauss_map = np.sqrt((x_map - mask_x) ** 2 + (y_map - mask_y) ** 2)
Gauss_map = np.exp(-0.5 * Gauss_map / R)
return Gauss_map
def multiScaleSharpen_v1(img, radius=5):
img = np.float32(img)
Dest_float_img = np.zeros(img.shape, dtype=np.float32) + 114
w1 = 0.5
w2 = 0.5
w3 = 0.25
GaussBlue1 = np.float32(cv2.GaussianBlur(img, (radius, radius), 1))
GaussBlue2 = np.float32(cv2.GaussianBlur(img, (radius * 2 - 1, radius * 2 - 1), 2))
GaussBlue3 = np.float32(cv2.GaussianBlur(img, (radius * 4 - 1, radius * 4 - 1), 4))
D1 = img - GaussBlue1
D2 = GaussBlue1 - GaussBlue2
D3 = GaussBlue2 - GaussBlue3
D1_mask = (D1 > 0) + (-1) * (D1 <= 0) + 0.0
Dest_float_img = (1 - w1 * D1_mask) * D1 + w2 * D2 + w3 * D3 + img
Dest_img = cv2.convertScaleAbs(Dest_float_img)
return Dest_img
def edge_demo(image):
blurred = cv2.GaussianBlur(image, (3, 3), 0)
edge_output = cv2.Canny(blurred, 50, 150)
return edge_output.copy()
class DRimgDataset(Dataset):
def __init__(self, index_list, data_shape=256, label_name='label', mode='train', csv_path='',
reverse=False, has_aug=False):
super(DRimgDataset, self).__init__()
self.mode = mode
self.data_shape = data_shape
self.reverse = reverse
self.label_name = label_name
self.index_list = index_list
self.add_gaussian_mask = True
self.add_edge = True
self.detail_enhancement = True
self.wavelet_trans = True
self.padding = True
self.resize = True
self.mosaic = True
self.has_aug = has_aug
self.random_rotate = True
self.random_lightness = True
self.random_transpose = True
self.random_mirror = True
self.random_brightness = False
self.random_gaussian_noise = False
self.random_rician_noise = False
self.len = len(index_list)
self.all_df = pd.read_csv(csv_path)
print('=== mode:' + self.mode)
print('=== num of samples: ', self.len)
print('=== num of l1 samples: ',
len([item for item in self.index_list if int(item.split('_')[-1].split('.')[0]) == 1]))
print('=== num of l2 samples: ',
len([item for item in self.index_list if int(item.split('_')[-1].split('.')[0]) == 2]))
def load_mosaic(self, index):
# loads images in a mosaic
s = self.data_shape
xc, yc = [int(random.uniform(s * 0.75, s * 1.25)) for _ in range(2)] # mosaic center x, y
ll_ = int(index.split('_')[-1].split('.')[0])
s_list = [item for item in self.index_list if int(item.split('_')[-1].split('.')[0]) == ll_]
# s_list = list(self.all_df[(self.all_df['dataset'] == 'develop') & (self.all_df['label'] == ll_)]['pid'])
np.random.shuffle(s_list)
indices = [index] + s_list[:3] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
return img4
def load_image_0(self, index):
sample = cv2.imread(index)
h0, w0 = sample.shape[:2]
if self.detail_enhancement:
sample = multiScaleSharpen_v1(sample, 5)
if self.padding:
height, width, num_channel = sample.shape
max_edge = max(height, width)
new_pix = np.zeros((max_edge, max_edge, num_channel), dtype=np.uint8) + 114
if self.add_edge:
edge_ = edge_demo(sample)
sample[:, :, 2] = edge_.astype(np.uint8) * 255
if self.mode == 'train':
if height > width:
random_bias_range = max_edge - width
else:
random_bias_range = max_edge - height
random_bias = np.random.randint(random_bias_range)
if height > width:
new_pix[0:height, random_bias:random_bias + width, :] = sample[0:height, 0:width, :]
else:
new_pix[random_bias:random_bias + height, 0:width, :] = sample[0:height, 0:width, :]
else:
new_pix[0:height, 0:width, :] = sample[0:height, 0:width, :]
else:
new_pix = sample
if self.resize:
new_pix = cv2.resize(new_pix, (self.data_shape, self.data_shape))
return new_pix, (h0, w0), new_pix.shape[:2]
def __getitem__(self, index):
file_path = self.index_list[index]
new_pix, _, _ = self.load_image_0(file_path)
new_pix = new_pix.transpose((2, 0, 1)).copy()
target = int(self.index_list[index].split('_')[-1].split('.')[0]) - 1
if self.mode == 'inference':
return torch.from_numpy(new_pix).type(torch.FloatTensor), target, self.index_list[index]
return torch.from_numpy(new_pix).type(torch.FloatTensor), target # torch.from_numpy(target).long()
def __len__(self):
"""
Total number of samples in the dataset
"""
return self.len
def get_split_deterministic(all_keys, fold=0, num_splits=5, random_state=12345):
"""
Splits a list of patient identifiers (or numbers) into num_splits folds and returns the split for fold fold.
:param all_keys:
:param fold:
:param num_splits:
:param random_state:
:return:
"""
all_keys_sorted = np.sort(list(all_keys))
splits = KFold(n_splits=num_splits, shuffle=True, random_state=random_state)
for i, (train_idx, test_idx) in enumerate(splits.split(all_keys_sorted)):
if i == fold:
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
break
return train_keys, test_keys
def main():
# init the args
args = Options().parse()
args.cuda = not args.no_cuda and torch.cuda.is_available()
print(args)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
csvPath = args.csv_path
train_fold_idx = args.fold
df_tmp = pd.read_csv(csvPath)
valid_idx = np.array(df_tmp[df_tmp['dataset'] == 'test']['pid'])
valset = DRimgDataset(index_list=valid_idx,
data_shape=256,
label_name='label',
mode='inference',
csv_path=csvPath,
reverse=False,
has_aug=False,
)
val_loader = torch.utils.data.DataLoader(
valset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True if args.cuda else False)
# init the model
model_kwargs = {'pretrained': True}
model_kwargs['num_classes'] = args.num_classes
if args.rectify:
model_kwargs['rectified_conv'] = True
model_kwargs['rectify_avg'] = args.rectify_avg
model = encoding.models.get_model(args.model, **model_kwargs)
print(model)
if args.cuda:
# torch.cuda.set_device(0)
model.cuda()
# Please use CUDA_VISIBLE_DEVICES to control the number of gpus
model = nn.DataParallel(model)
# checkpoint
if args.verify:
if os.path.isfile(args.verify):
print("=> loading checkpoint '{}'".format(args.verify))
model.module.load_state_dict(torch.load(args.verify))
else:
raise RuntimeError("=> no verify checkpoint found at '{}'". \
format(args.verify))
elif args.resume is not None:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.module.load_state_dict(checkpoint['state_dict'])
else:
raise RuntimeError("=> no resume checkpoint found at '{}'". \
format(args.resume))
model.eval()
top1 = AverageMeter()
top5 = AverageMeter()
res_dict = {}
is_best = False
tbar = tqdm(val_loader, desc='\r')
for batch_idx, (data, target, pid_) in enumerate(tbar):
if args.cuda:
data, target = data.cuda(), target.cuda()
with torch.no_grad():
output = model(data)
output = F.softmax(output, dim=1)
res_dict[batch_idx] = [target.cpu().numpy(), output.cpu().numpy(), pid_]
# for accuracy func, output must be one-hot style
acc1, acc5 = accuracy(output, target, topk=(1, 1))
top1.update(acc1[0], data.size(0))
top5.update(acc5[0], data.size(0))
tbar.set_description('Top1: %.3f | Top5: %.3f' % (top1.avg, top5.avg))
print('Top1 Acc: %.3f | Top5 Acc: %.3f ' % (top1.avg, top5.avg))
pid_list_tmp = []
y_true_tmp = []
y_pred_tmp = []
for k, v in res_dict.items():
b_pid_list = v[-1]
b_y_true = v[0]
b_y_pred = v[1]
for i in range(len(b_pid_list)):
pid_list_tmp.append(b_pid_list[i])
y_true_tmp.append(b_y_true[i])
y_pred_tmp.append(b_y_pred[i, 1])
res_name = args.resume.split('/')[-1] + '_' + str(args.fold).zfill(2) + '_res.csv'
| pd.DataFrame({'pid': pid_list_tmp, 'y_true': y_true_tmp, 'y_pred': y_pred_tmp}) | pandas.DataFrame |
# flake8: noqa: F841
import tempfile
from pathlib import Path
from typing import List
from pandas._typing import Scalar, ArrayLike
import pandas as pd
import numpy as np
from pandas.core.window import ExponentialMovingWindow
def test_types_init() -> None:
pd.Series(1)
pd.Series((1, 2, 3))
pd.Series(np.array([1, 2, 3]))
pd.Series(data=[1, 2, 3, 4], name="series")
pd.Series(data=[1, 2, 3, 4], dtype=np.int8)
pd.Series(data={'row1': [1, 2], 'row2': [3, 4]})
pd.Series(data=[1, 2, 3, 4], index=[4, 3, 2, 1], copy=True)
def test_types_any() -> None:
res1: bool = pd.Series([False, False]).any()
res2: bool = pd.Series([False, False]).any(bool_only=False)
res3: bool = pd.Series([np.nan]).any(skipna=False)
def test_types_all() -> None:
res1: bool = pd.Series([False, False]).all()
res2: bool = pd.Series([False, False]).all(bool_only=False)
res3: bool = pd.Series([np.nan]).all(skipna=False)
def test_types_csv() -> None:
s = pd.Series(data=[1, 2, 3])
csv_df: str = s.to_csv()
with tempfile.NamedTemporaryFile() as file:
s.to_csv(file.name)
s2: pd.DataFrame = pd.read_csv(file.name)
with tempfile.NamedTemporaryFile() as file:
s.to_csv(Path(file.name))
s3: pd.DataFrame = pd.read_csv(Path(file.name))
# This keyword was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
with tempfile.NamedTemporaryFile() as file:
s.to_csv(file.name, errors='replace')
s4: pd.DataFrame = pd.read_csv(file.name)
def test_types_copy() -> None:
s = pd.Series(data=[1, 2, 3, 4])
s2: pd.Series = s.copy()
def test_types_select() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s[0]
s[1:]
def test_types_iloc_iat() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s2 = pd.Series(data=[1, 2])
s.loc['row1']
s.iat[0]
s2.loc[0]
s2.iat[0]
def test_types_loc_at() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s2 = pd.Series(data=[1, 2])
s.loc['row1']
s.at['row1']
s2.loc[1]
s2.at[1]
def test_types_boolean_indexing() -> None:
s = pd.Series([0, 1, 2])
s[s > 1]
s[s]
def test_types_df_to_df_comparison() -> None:
s = pd.Series(data={'col1': [1, 2]})
s2 = pd.Series(data={'col1': [3, 2]})
res_gt: pd.Series = s > s2
res_ge: pd.Series = s >= s2
res_lt: pd.Series = s < s2
res_le: pd.Series = s <= s2
res_e: pd.Series = s == s2
def test_types_head_tail() -> None:
s = pd.Series([0, 1, 2])
s.head(1)
s.tail(1)
def test_types_sample() -> None:
s = pd.Series([0, 1, 2])
s.sample(frac=0.5)
s.sample(n=1)
def test_types_nlargest_nsmallest() -> None:
s = pd.Series([0, 1, 2])
s.nlargest(1)
s.nlargest(1, 'first')
s.nsmallest(1, 'last')
s.nsmallest(1, 'all')
def test_types_filter() -> None:
s = pd.Series(data=[1, 2, 3, 4], index=['cow', 'coal', 'coalesce', ''])
s.filter(items=['cow'])
s.filter(regex='co.*')
s.filter(like='al')
def test_types_setting() -> None:
s = pd.Series([0, 1, 2])
s[3] = 4
s[s == 1] = 5
s[:] = 3
def test_types_drop() -> None:
s = pd.Series([0, 1, 2])
res: pd.Series = s.drop(0)
res2: pd.Series = s.drop([0, 1])
res3: pd.Series = s.drop(0, axis=0)
res4: None = s.drop([0, 1], inplace=True, errors='raise')
res5: None = s.drop([0, 1], inplace=True, errors='ignore')
def test_types_drop_multilevel() -> None:
index = pd.MultiIndex(levels=[['top', 'bottom'], ['first', 'second', 'third']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
s = pd.Series(data=[1, 2, 3, 4, 5, 6], index=index)
res: pd.Series = s.drop(labels='first', level=1)
def test_types_dropna() -> None:
s = pd.Series([1, np.nan, np.nan])
res: pd.Series = s.dropna()
res2: None = s.dropna(axis=0, inplace=True)
def test_types_fillna() -> None:
s = pd.Series([1, np.nan, np.nan, 3])
res: pd.Series = s.fillna(0)
res2: pd.Series = s.fillna(0, axis='index')
res3: pd.Series = s.fillna(method='backfill', axis=0)
res4: None = s.fillna(method='bfill', inplace=True)
res5: pd.Series = s.fillna(method='pad')
res6: pd.Series = s.fillna(method='ffill', limit=1)
def test_types_sort_index() -> None:
s = pd.Series([1, 2, 3], index=[2, 3, 1])
res: pd.Series = s.sort_index()
res2: None = s.sort_index(ascending=False, inplace=True)
res3: pd.Series = s.sort_index(kind="mergesort")
# This was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
def test_types_sort_index_with_key() -> None:
s = pd.Series([1, 2, 3], index=['a', 'B', 'c'])
res: pd.Series = s.sort_index(key=lambda k: k.str.lower())
def test_types_sort_values() -> None:
s = pd.Series([4, 2, 1, 3])
res: pd.Series = s.sort_values(0)
res2: pd.Series = s.sort_values(ascending=False)
res3: None = s.sort_values(inplace=True, kind='quicksort')
res4: pd.Series = s.sort_values(na_position='last')
res5: pd.Series = s.sort_values(ignore_index=True)
# This was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
def test_types_sort_values_with_key() -> None:
s = pd.Series([1, 2, 3], index=[2, 3, 1])
res: pd.Series = s.sort_values(key=lambda k: -k)
def test_types_shift() -> None:
s = pd.Series([1, 2, 3])
s.shift()
s.shift(axis=0, periods=1)
s.shift(-1, fill_value=0)
def test_types_rank() -> None:
s = pd.Series([1, 1, 2, 5, 6, np.nan, 'milion'])
s.rank()
s.rank(axis=0, na_option='bottom')
s.rank(method="min", pct=True)
s.rank(method="dense", ascending=True)
s.rank(method="first", numeric_only=True)
def test_types_mean() -> None:
s = pd.Series([1, 2, 3, np.nan])
f1: float = s.mean()
s1: pd.Series = s.mean(axis=0, level=0)
f2: float = s.mean(skipna=False)
f3: float = s.mean(numeric_only=False)
def test_types_median() -> None:
s = pd.Series([1, 2, 3, np.nan])
f1: float = s.median()
s1: pd.Series = s.median(axis=0, level=0)
f2: float = s.median(skipna=False)
f3: float = s.median(numeric_only=False)
def test_types_sum() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.sum()
s.sum(axis=0, level=0)
s.sum(skipna=False)
s.sum(numeric_only=False)
s.sum(min_count=4)
def test_types_cumsum() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.cumsum()
s.cumsum(axis=0)
s.cumsum(skipna=False)
def test_types_min() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.min()
s.min(axis=0)
s.min(level=0)
s.min(skipna=False)
def test_types_max() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.max()
s.max(axis=0)
s.max(level=0)
s.max(skipna=False)
def test_types_quantile() -> None:
s = pd.Series([1, 2, 3, 10])
s.quantile([0.25, 0.5])
s.quantile(0.75)
s.quantile()
s.quantile(interpolation='nearest')
def test_types_clip() -> None:
s = pd.Series([-10, 2, 3, 10])
s.clip(lower=0, upper=5)
s.clip(lower=0, upper=5, inplace=True)
def test_types_abs() -> None:
s = | pd.Series([-10, 2, 3, 10]) | pandas.Series |
## Generate twitter Pre-Trained Word2Vec and trained Word2Vec
## Word2Vec
import os
os.chdir("C:/Users/dordo/Dropbox/Capstone Project")
import pandas as pd
import pickle
from gensim import corpora
from gensim.models import Word2Vec
import gensim.downloader as api
##---------------------------------------------------------------------------##
## Define function to get embeddings from memory
def get_wv(model, dicts):
""" Get word embeddings in memory"""
w2v_embed = {}
missing = []
for val in dicts.values():
try:
it = model.wv[val]
except:
missing.append(val)
it = None
w2v_embed[val] = it
return w2v_embed, missing
##---------------------------------------------------------------------------##
## Reading in pre processed data
with open('Data/Twitter/ProcessedTwitter.pkl', 'rb') as input:
txt_end = pickle.load(input)
## Create dictionary
dicts = corpora.Dictionary(txt_end)
len(dicts)
## Filter by appeareance in documents
dicts.filter_extremes(no_below=40, no_above=0.5, keep_n=None, keep_tokens=None)
len(dicts)
##--------------------------------------------------------------------------##
## PreTrained Word2vec
path = "C:/Users/dordo/Documents/Daniel/LSE/Capstone/Modelo/GoogleNews-vectors-negative300.bin"
model = Word2Vec(txt_end, size = 300, min_count = 40)
model.intersect_word2vec_format(path,
lockf=1.0,
binary=True)
model.train(txt_end, total_examples=model.corpus_count, epochs=25)
embeds_1 = get_wv(model, dicts)
## How many word of our corpus appear in the pre trained?
##---------------------------------------------------------------------------##
## Self Trained Word2Vec
model_t = Word2Vec(txt_end, window=5, min_count=40, workers=4, size = 50)
model_t.train(txt_end, epochs=50, total_words = model_t.corpus_total_words,
total_examples = model_t.corpus_count)
embeds_2 = get_wv(model_t, dicts)
##---------------------------------------------------------------------------##
## Pre Trained GLOVE
model_g = api.load("glove-twitter-50")
embeds_3 = get_wv(model_g, dicts)
embeds_3df = | pd.DataFrame(embeds_3[0]) | pandas.DataFrame |
import tide_constituents as tc
from py_noaa import coops
import pandas as pd
import numpy as np
import tappy
start = '20180201'
end = '20180228'
interval = 1
start = | pd.to_datetime(start) | pandas.to_datetime |
import numpy as np
import xarray as xr
import pandas as pd
import os
from collections import OrderedDict
# from astropy.time import Time
import logging
import copy
from typing import List, Dict, Union, Tuple
import pysagereader
class SAGEIILoaderV700(object):
"""
Class designed to load the v7.00 SAGE II spec and index files provided by NASA ADSC into python
Data files must be accessible by the users machine, and can be downloaded from:
https://eosweb.larc.nasa.gov/project/sage2/sage2_v7_table
Parameters
----------
data_folder
location of sage ii index and spec files.
output_format
format for the output data. If ``'xarray'`` the output is returned as an ``xarray.Dataset``.
If None the output is returned as a dictionary of numpy arrays.
**NOTE: the following options only apply to xarray output types**
species
Species to be returned in the output data. If None all species are returned. Options are
``aerosol``, ``ozone``, ``h2o``, and ``no2``. If more than one species is returned fields will be NaN-padded
where data is not available. ``species`` is only used if ``'xarray'`` is set as the ``output_data`` format,
otherwise it has no effect.
cf_names
If True then CF-1.7 naming conventions are used for the output_data when ``xarray`` is selected.
filter_aerosol
filter the aerosol using the cloud flag
filter_ozone
filter the ozone using the criteria recommended in the release notes
* Exclusion of all data points with an uncertainty estimate of 300% or greater
* Exclusion of all profiles with an uncertainty greater than 10% between 30 and 50 km
* Exclusion of all data points at altitude and below the occurrence of an aerosol extinction value of
greater than 0.006 km^-1
* Exclusion of all data points at altitude and below the occurrence of both the 525nm aerosol extinction
value exceeding 0.001 km^-1 and the 525/1020 extinction ratio falling below 1.4
* Exclusion of all data points below 35km an 200% or larger uncertainty estimate
enumerate_flags
expand the index and species flags to their boolean values.
normalize_percent_error
give the species error as percent rather than percent * 100
return_separate_flags
return the enumerated flags as a separate data array
Example
-------
>>> sage = SAGEIILoaderV700()
>>> sage.data_folder = 'path/to/data'
>>> data = sage.load_data('2004-1-1','2004-5-1')
In addition to the sage ii fields reported in the files, two additional time fields are provided
to allow for easier subsetting of the data.
``data['mjd']`` is a numpy array containing the modified julian dates of each scan
``date['time']`` is an pandas time series object containing the times of each scan
"""
def __init__(self, data_folder: str=None, output_format: str='xarray', species: List[str]=('aerosol', 'h2o', 'no2', 'ozone', 'background'),
cf_names: bool=False, filter_aerosol: bool=False, filter_ozone: bool=False,
enumerate_flags: bool=False, normalize_percent_error: bool=False, return_separate_flags: bool=False):
if type(species) == str:
species = [species]
self.data_folder = data_folder # Type: str
self.version = '7.00'
self.index_file = 'SAGE_II_INDEX_'
self.spec_file = 'SAGE_II_SPEC_'
self.fill_value = np.nan
self.spec_format = self.get_spec_format()
self.index_format = self.get_index_format()
self.output_format = output_format
self.species = [s.lower() for s in species]
self.cf_names = cf_names
self.filter_aerosol = filter_aerosol
self.filter_ozone = filter_ozone
self.normalize_percent_error = normalize_percent_error
self.enumerate_flags = enumerate_flags
self.return_separate_flags = return_separate_flags
@staticmethod
def get_spec_format() -> Dict[str, Tuple[str, int]]:
"""
spec format taken from sg2_specinfo.pro provided in the v7.00 download
used for reading the binary data format
Returns
-------
Dict
Ordered dictionary of variables provided in the spec file. Each dictionary field contains a
tuple with the information (data type, number of data points). Ordering is important as the
sage ii binary files are read sequentially.
"""
spec = OrderedDict()
spec['Tan_Alt'] = ('float32', 8) # Subtangent Altitudes(km)
spec['Tan_Lat'] = ('float32', 8) # Subtangent Latitudes @ Tan_Alt(deg)
spec['Tan_Lon'] = ('float32', 8) # Subtangent Longitudes @ Tan_Alt(deg)
spec['NMC_Pres'] = ('float32', 140) # Gridded Pressure profile(mb)
spec['NMC_Temp'] = ('float32', 140) # Gridded Temperature profile(K)
spec['NMC_Dens'] = ('float32', 140) # Gridded Density profile(cm ^ (-3))
spec['NMC_Dens_Err'] = ('int16', 140) # Error in NMC_Dens( % * 1000)
spec['Trop_Height'] = ('float32', 1) # NMC Tropopause Height(km)
spec['Wavelength'] = ('float32', 7) # Wavelength of each channel(nm)
spec['O3'] = ('float32', 140) # O3 Density profile 0 - 70 Km(cm ^ (-3))
spec['NO2'] = ('float32', 100) # NO2 Density profile 0 - 50 Km(cm ^ (-3))
spec['H2O'] = ('float32', 100) # H2O Volume Mixing Ratio 0 - 50 Km(ppp)
spec['Ext386'] = ('float32', 80) # 386 nm Extinction 0 - 40 Km(1 / km)
spec['Ext452'] = ('float32', 80) # 452 nm Extinction 0 - 40 Km(1 / km)
spec['Ext525'] = ('float32', 80) # 525 nm Extinction 0 - 40 Km(1 / km)
spec['Ext1020'] = ('float32', 80) # 1020 nm Extinction 0 - 40 Km(1 / km)
spec['Density'] = ('float32', 140) # Calculated Density 0 - 70 Km(cm ^ (-3))
spec['SurfDen'] = ('float32', 80) # Aerosol surface area dens 0 - 40 km(um ^ 2 / cm ^ 3)
spec['Radius'] = ('float32', 80) # Aerosol effective radius 0 - 40 km(um)
spec['Dens_Mid_Atm'] = ('float32', 70) # Middle Atmosphere Density(cm ^ (-3))
spec['O3_Err'] = ('int16', 140) # Error in O3 density profile( % * 100)
spec['NO2_Err'] = ('int16', 100) # Error in NO2 density profile( % * 100)
spec['H2O_Err'] = ('int16', 100) # Error in H2O mixing ratio( % * 100)
spec['Ext386_Err'] = ('int16', 80) # Error in 386 nm Extinction( % * 100)
spec['Ext452_Err'] = ('int16', 80) # Error in 452 nm Extinction( % * 100)
spec['Ext525_Err'] = ('int16', 80) # Error in 525 nm Extinction( % * 100)
spec['Ext1020_Err'] = ('int16', 80) # Error in 1019 nm Extinction( % * 100)
spec['Density_Err'] = ('int16', 140) # Error in Density( % * 100)
spec['SurfDen_Err'] = ('int16', 80) # Error in surface area dens( % * 100)
spec['Radius_Err'] = ('int16', 80) # Error in aerosol radius( % * 100)
spec['Dens_Mid_Atm_Err'] = ('int16', 70) # Error in Middle Atm.Density( % * 100)
spec['InfVec'] = ('uint16', 140) # Informational Bit flags
return spec
@staticmethod
def get_index_format() -> Dict[str, Tuple[str, int]]:
"""
index format taken from sg2_indexinfo.pro provided in the v7.00 download
used for reading the binary data format
Returns
-------
Dict
an ordered dictionary of variables provided in the index file. Each dictionary
field contains a tuple with the information (data type, length). Ordering is
important as the sage ii binary files are read sequentially.
"""
info = OrderedDict()
info['num_prof'] = ('uint32', 1) # Number of profiles in these files
info['Met_Rev_Date'] = ('uint32', 1) # LaRC Met Model Revision Date(YYYYMMDD)
info['Driver_Rev'] = ('S1', 8) # LaRC Driver Version(e.g. 6.20)
info['Trans_Rev'] = ('S1', 8) # LaRC Transmission Version
info['Inv_Rev'] = ('S1', 8) # LaRC Inversion Version
info['Spec_Rev'] = ('S1', 8) # LaRC Inversion Version
info['Eph_File_Name'] = ('S1', 32) # Ephemeris data file name
info['Met_File_Name'] = ('S1', 32) # Meteorological data file name
info['Ref_File_Name'] = ('S1', 32) # Refraction data file name
info['Tran_File_Name'] = ('S1', 32) # Transmission data file name
info['Spec_File_Name'] = ('S1', 32) # Species profile file name
info['FillVal'] = ('float32', 1) # Fill value
# Altitude grid and range info
info['Grid_Size'] = ('float32', 1) # Altitude grid spacing(0.5 km)
info['Alt_Grid'] = ('float32', 200) # Geometric altitudes(0.5, 1.0, ..., 100.0 km)
info['Alt_Mid_Atm'] = ('float32', 70) # Middle atmosphere geometric altitudes
info['Range_Trans'] = ('float32', 2) # Transmission min & max altitudes[0.5, 100.]
info['Range_O3'] = ('float32', 2) # Ozone min & max altitudes[0.5, 70.0]
info['Range_NO2'] = ('float32', 2) # NO2 min & max altitudes[0.5, 50.0]
info['Range_H2O'] = ('float32', 2) # Water vapor min & max altitudes[0.5, 50.0]
info['Range_Ext'] = ('float32', 2) # Aerosol extinction min & max altitudes[0.5, 40.0]
info['Range_Dens'] = ('float32', 2) # Density min & max altitudes[0.5, 70.0]
info['Spare'] = ('float32', 2) #
# Event specific info useful for data subsetting
info['YYYYMMDD'] = ('int32', 930) # Event date at 20km subtangent point
info['Event_Num'] = ('int32', 930) # Event number
info['HHMMSS'] = ('int32', 930) # Event time at 20km
info['Day_Frac'] = ('float32', 930) # Time of year(DDD.frac) at 20 km
info['Lat'] = ('float32', 930) # Subtangent latitude at 20 km(-90, +90)
info['Lon'] = ('float32', 930) # Subtangent longitude at 20 km(-180, +180)
info['Beta'] = ('float32', 930) # Spacecraft beta angle(deg)
info['Duration'] = ('float32', 930) # Duration of event(sec)
info['Type_Sat'] = ('int16', 930) # Event Type Instrument(0 = SR, 1 = SS)
info['Type_Tan'] = ('int16', 930) # Event Type Local(0 = SR, 1 = SS)
# Process tracking and flag info
info['Dropped'] = ('int32', 930) # Dropped event flag
info['InfVec'] = ('uint32', 930) # Bit flags relating to processing (
# NOTE: readme_sage2_v6.20.txt says InfVec is 16 bit but appears to actually be 32 (also in IDL software)
# Record creation dates and times
info['Eph_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Eph_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Met_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Met_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Ref_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Ref_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Tran_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Tran_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
info['Spec_Cre_Date'] = ('int32', 930) # Record creation date(YYYYMMDD format)
info['Spec_Cre_Time'] = ('int32', 930) # Record creation time(HHMMSS format)
return info
def get_spec_filename(self, year: int, month: int) -> str:
"""
Returns the spec filename given a year and month
Parameters
----------
year
year of the data that will be loaded
month
month of the data that will be loaded
Returns
-------
filename of the spec file where the data is stored
"""
file = os.path.join(self.data_folder,
self.spec_file + str(int(year)) + str(int(month)).zfill(2) + '.' + self.version)
if not os.path.isfile(file):
file = None
return file
def get_index_filename(self, year: int, month: int) -> str:
"""
Returns the index filename given a year and month
Parameters
----------
year
year of the data that will be loaded
month
month of the data that will be loaded
Returns
-------
filename of the index file where the data is stored
"""
file = os.path.join(self.data_folder,
self.index_file + str(int(year)) + str(int(month)).zfill(2) + '.' + self.version)
if not os.path.isfile(file):
file = None
return file
def read_spec_file(self, file: str, num_profiles: int) -> List[Dict]:
"""
Parameters
----------
file
name of the spec file to be read
num_profiles
number of profiles to read from the spec file (usually determined from the index file)
Returns
-------
list of dictionaries containing the spec data. Each list is one event
"""
# load the file into the buffer
file_format = self.spec_format
with open(file, "rb") as f:
buffer = f.read()
# initialize the list of dictionaries
data = [None] * num_profiles
for p in range(num_profiles):
data[p] = dict()
# load the data from the buffer
bidx = 0
for p in range(num_profiles):
for key in file_format.keys():
nbytes = np.dtype(file_format[key][0]).itemsize * file_format[key][1]
data[p][key] = copy.copy(np.frombuffer(buffer[bidx:bidx+nbytes],
dtype=file_format[key][0]))
bidx += nbytes
return data
def read_index_file(self, file: str) -> Dict:
"""
Read the binary file into a python data structure
Parameters
----------
file
filename to be read
Returns
-------
data from the file
"""
file_format = self.index_format
with open(file, "rb") as f:
buffer = f.read()
data = dict()
# load the data from file into a list
bidx = 0
for key in file_format.keys():
nbytes = np.dtype(file_format[key][0]).itemsize * file_format[key][1]
if file_format[key][0] == 'S1':
data[key] = copy.copy(buffer[bidx:bidx + nbytes].decode('utf-8'))
else:
data[key] = copy.copy(np.frombuffer(buffer[bidx:bidx + nbytes], dtype=file_format[key][0]))
if len(data[key]) == 1:
data[key] = data[key][0]
bidx += nbytes
# make a more useable time field
date_str = []
# If the time overflows by less than the scan time just set it to midnight
data['HHMMSS'][(data['HHMMSS'] >= 240000) & (data['HHMMSS'] < (240000 + data['Duration']))] = 235959
# otherwise, set it as invalid
data['HHMMSS'][data['HHMMSS'] >= 240000] = -999
for idx, (ymd, hms) in enumerate(zip(data['YYYYMMDD'], data['HHMMSS'])):
if (ymd < 0) | (hms < 0):
date_str.append('1970-1-1 00:00:00') # invalid sage ii date
else:
hours = int(hms/10000)
mins = int((hms % 10000)/100)
secs = hms % 100
date_str.append(str(ymd)[0:4] + '-' + str(ymd)[4:6].zfill(2) + '-' +
str(ymd)[6::].zfill(2) + ' ' + str(hours).zfill(2) + ':' +
str(mins).zfill(2) + ':' + str(secs).zfill(2))
# data['time'] = Time(date_str, format='iso')
data['time'] = pd.to_datetime(date_str)
data['mjd'] = np.array((data['time'] - pd.Timestamp('1858-11-17')) / pd.Timedelta(1, 'D'))
data['mjd'][data['mjd'] < 40588] = -999 # get rid of invalid dates
return data
def load_data(self, min_date: str, max_date: str,
min_lat: float=-90, max_lat: float=90,
min_lon: float=-180, max_lon: float=360) -> Union[Dict, xr.Dataset]:
"""
Load the SAGE II data for the specified dates and locations.
Parameters
----------
min_date
start date where data will be loaded in iso format, eg: '2004-1-1'
max_date
end date where data will be loaded in iso format, eg: '2004-1-1'
min_lat
minimum latitude (optional)
max_lat
maximum latitude (optional)
min_lon
minimum longitude (optional)
max_lon
maximum longitude (optional)
Returns
-------
Variables are returned as numpy arrays (1 or 2 dimensional depending on the variable)
"""
min_time = pd.Timestamp(min_date)
max_time = pd.Timestamp(max_date)
data = dict()
init = False
# create a list of unique year/month combinations between the start/end dates
uniq = OrderedDict()
for year in [(t.date().year, t.date().month) for t in
pd.date_range(min_time, max_time+pd.Timedelta(27, 'D'), freq='27D')]:
uniq[year] = year
# load in the data from the desired months
for (year, month) in list(uniq.values()):
logging.info('loading data for : ' + str(year) + '/' + str(month))
indx_file = self.get_index_filename(year, month)
# if the file does not exist move on to the next month
if indx_file is None:
continue
indx_data = self.read_index_file(indx_file)
numprof = indx_data['num_prof']
spec_data = self.read_spec_file(self.get_spec_filename(year, month), numprof)
# get rid of the duplicate names for InfVec
for sp in spec_data:
sp['ProfileInfVec'] = copy.copy(sp['InfVec'])
del sp['InfVec']
for key in indx_data.keys():
# get rid of extraneous profiles in the index so index and spec are the same lengths
if hasattr(indx_data[key], '__len__'):
indx_data[key] = np.delete(indx_data[key], np.arange(numprof, 930))
# add the index values to the data set
if key in data.keys():
# we dont want to replicate certain fields
if (key[0:3] != 'Alt') & (key[0:5] != 'Range') & (key[0:7] != 'FillVal'):
data[key] = np.append(data[key], indx_data[key])
else:
if key == 'FillVal':
data[key] = indx_data[key]
else:
data[key] = [indx_data[key]]
# initialize the data dictionaries as lists
if init is False:
for key in spec_data[0].keys():
data[key] = []
init = True
# add the spec values to the data set
for key in spec_data[0].keys():
data[key].append(np.asarray([sp[key] for sp in spec_data]))
# join all of our lists into an array - this could be done more elegantly with vstack to avoid
# the temporary lists, but this is much faster
for key in data.keys():
if key == 'FillVal':
data[key] = float(data[key]) # make this a simple float rather than zero dimensional array
elif len(data[key][0].shape) > 0:
data[key] = np.concatenate(data[key], axis=0)
else:
data[key] = np.asarray(data[key])
data = self.subset_data(data, min_date, max_date, min_lat, max_lat, min_lon, max_lon)
if not data:
return None
if self.output_format == 'xarray':
data = self.convert_to_xarray(data)
return data
@staticmethod
def subset_data(data: Dict, min_date: str, max_date: str,
min_lat: float, max_lat: float,
min_lon: float, max_lon: float) -> Dict:
"""
Removes any data from the dictionary that does not meet the specified time, latitude and longitude requirements.
Parameters
----------
data
dictionary of sage ii data. Must have the fields 'mjd', 'Lat' and 'Lon'. All others are optional
min_date
start date where data will be loaded in iso format, eg: '2004-1-1'
max_date
end date where data will be loaded in iso format, eg: '2004-1-1'
min_lat
minimum latitude (optional)
max_lat
maximum latitude (optional)
min_lon
minimum longitude (optional)
max_lon
maximum longitude (optional)
Returns
-------
returns the dictionary with only data in the valid latitude, longitude and time range
"""
min_mjd = (pd.Timestamp(min_date) - pd.Timestamp('1858-11-17')) / pd.Timedelta(1, 'D')
max_mjd = (pd.Timestamp(max_date) - pd.Timestamp('1858-11-17')) / pd.Timedelta(1, 'D')
good = (data['mjd'] > min_mjd) & (data['mjd'] < max_mjd) & \
(data['Lat'] > min_lat) & (data['Lat'] < max_lat) & \
(data['Lon'] > min_lon) & (data['Lon'] < max_lon)
if np.any(good):
for key in data.keys():
if hasattr(data[key], '__len__'):
if data[key].shape[0] == len(good):
data[key] = data[key][good]
else:
print('no data satisfies the criteria')
data = {}
return data
def convert_to_xarray(self, data: Dict) -> Union[xr.Dataset, Tuple[xr.Dataset, xr.Dataset]]:
"""
Parameters
----------
data
Data from the ``load_data`` function
Returns
-------
data formatted to an xarray Dataset
"""
# split up the fields into one of different sizes and optional returns
fields = dict()
# not currently returned
fields['geometry'] = ['Tan_Alt', 'Tan_Lat', 'Tan_Lon']
fields['flags'] = ['InfVec', 'Dropped']
fields['profile_flags'] = ['ProfileInfVec']
# always returned - 1 per profile
fields['general'] = ['Event_Num', 'Lat', 'Lon', 'Beta', 'Duration', 'Type_Sat', 'Type_Tan', 'Trop_Height']
# optional return parameters
fields['background'] = ['NMC_Pres', 'NMC_Temp', 'NMC_Dens', 'NMC_Dens_Err', 'Density', 'Density_Err']
fields['ozone'] = ['O3', 'O3_Err']
fields['no2'] = ['NO2', 'NO2_Err']
fields['h2o'] = ['H2O', 'H2O_Err']
fields['aerosol'] = ['Ext386', 'Ext452', 'Ext525', 'Ext1020', 'Ext386_Err', 'Ext452_Err', 'Ext525_Err',
'Ext1020_Err']
fields['particle_size'] = ['SurfDen', 'Radius', 'SurfDen_Err', 'Radius_Err']
xr_data = []
index_flags = self.convert_index_bit_flags(data)
species_flags = self.convert_species_bit_flags(data)
time = pd.to_timedelta(data['mjd'], 'D') + pd.Timestamp('1858-11-17')
data['Trop_Height'] = data['Trop_Height'].flatten()
for key in fields['general']:
xr_data.append(xr.DataArray(data[key], coords=[time], dims=['time'], name=key))
if 'aerosol' in self.species or self.filter_ozone: # we need aerosol to filter ozone
altitude = data['Alt_Grid'][0:80]
wavel = np.array([386.0, 452.0, 525.0, 1020.0])
ext = np.array([data['Ext386'], data['Ext452'], data['Ext525'], data['Ext1020']])
xr_data.append(xr.DataArray(ext, coords=[wavel, time, altitude],
dims=['wavelength', 'time', 'Alt_Grid'], name='Ext'))
ext = np.array([data['Ext386_Err'], data['Ext452_Err'], data['Ext525_Err'], data['Ext1020_Err']])
xr_data.append(xr.DataArray(ext, coords=[wavel, time, altitude],
dims=['wavelength', 'time', 'Alt_Grid'], name='Ext_Err'))
for key in fields['particle_size']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
if 'no2' in self.species:
altitude = data['Alt_Grid'][0:100]
for key in fields['no2']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
if 'h2o' in self.species:
altitude = data['Alt_Grid'][0:100]
for key in fields['h2o']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
if any(i in ['ozone', 'o3'] for i in self.species):
altitude = data['Alt_Grid'][0:140]
for key in fields['ozone']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
if 'background' in self.species:
altitude = data['Alt_Grid'][0:140]
for key in fields['background']:
xr_data.append(xr.DataArray(data[key], coords=[time, altitude],
dims=['time', 'Alt_Grid'], name=key))
xr_data = xr.merge(xr_data)
if self.enumerate_flags:
xr_data = xr.merge([xr_data, index_flags, species_flags])
for var in xr_data.variables.keys():
if xr_data[var].dtype == 'float32' or 'Err' in var:
xr_data[var] = xr_data[var].where(xr_data[var] != data['FillVal'])
# determine cloud filter for aerosol data
cloud_filter = xr.full_like(species_flags.Cloud_Bit_1, fill_value=True, dtype=bool)
min_alt = (xr_data.Alt_Grid * (species_flags.Cloud_Bit_1 & species_flags.Cloud_Bit_2)).max(dim='Alt_Grid')
cloud_filter = cloud_filter.where(cloud_filter.Alt_Grid > min_alt)
xr_data['cloud_filter'] = np.isnan(cloud_filter)
# determine valid ozone altitudes
if any(i in ['ozone', 'o3'] for i in self.species):
# add an ozone filter field for convenience
ozone_good = xr.full_like(species_flags.Cloud_Bit_1, fill_value=True, dtype=bool)
# Exclusion of all data points with an uncertainty estimate of 300% or greater
ozone_good = ozone_good.where(xr_data.O3_Err < 30000)
# Exclusion of all profiles with an uncertainty greater than 10% between 30 and 50 km
no_good = (xr_data.O3_Err > 1000) & (xr_data.Alt_Grid > 30) & (xr_data.Alt_Grid < 50)
ozone_good = ozone_good.where(~no_good)
# Exclusion of all data points at altitude and below the occurrence of an aerosol extinction value of
# greater than 0.006 km^-1
# NOTE: the wavelength to use as the filter is not specified in the documentation, so I have chosen the
# wavelength with the smallest extinction and therefore the strictest filtering
min_alt = (xr_data.Alt_Grid * (xr_data.Ext.sel(wavelength=1020) > 0.006)).max(dim='Alt_Grid')
ozone_good = ozone_good.where(xr_data.Alt_Grid > min_alt)
# Exclusion of all data points at altitude and below the occurrence of both the 525nm aerosol extinction
# value exceeding 0.001 km^-1 and the 525/1020 extinction ratio falling below 1.4
min_alt = (xr_data.Alt_Grid * ((xr_data.Ext.sel(wavelength=525) > 0.001) &
((xr_data.Ext.sel(wavelength=525) / xr_data.Ext.sel(
wavelength=1020)) < 1.4))).max(dim='Alt_Grid')
ozone_good = ozone_good.where(xr_data.Alt_Grid > min_alt)
# Exclusion of all data points below 35km an 200% or larger uncertainty estimate
no_good = (xr_data.O3_Err > 20000) & (xr_data.Alt_Grid < 35)
ozone_good = ~np.isnan(ozone_good.where(~no_good))
xr_data['ozone_filter'] = ozone_good
if self.filter_aerosol:
xr_data['Ext'] = xr_data.Ext.where(~xr_data.cloud_filter)
if self.filter_ozone:
xr_data['O3'] = xr_data.O3.where(ozone_good)
# drop aerosol if not requested
if self.filter_ozone and not ('aerosol' in self.species):
xr_data.drop(['Ext', 'Ext_Err', 'wavelength'])
if self.normalize_percent_error:
for var in xr_data.variables.keys():
if 'Err' in var: # put error units back into percent
xr_data[var] = (xr_data[var] / 100).astype('float32')
xr_data = xr_data.transpose('time', 'Alt_Grid', 'wavelength')
xr_data = self.apply_cf_conventions(xr_data)
if self.return_separate_flags:
return xr_data, xr.merge([index_flags, species_flags])
else:
return xr_data
def apply_cf_conventions(self, data):
attrs = {'time': {'standard_name': 'time'},
'Lat': {'standard_name': 'latitude',
'units': 'degrees_north'},
'Lon': {'standard_name': 'longitude',
'units': 'degrees_east'},
'Alt_Grid': {'units': 'km'},
'wavelength': {'units': 'nm',
'description': 'wavelength at which aerosol extinction is retrieved'},
'O3': {'standard_name': 'number_concentration_of_ozone_molecules_in_air',
'units': 'cm-3'},
'NO2': {'standard_name': 'number_concentration_of_nitrogen_dioxide_molecules_in_air',
'units': 'cm-3'},
'H2O': {'standard_name': 'number_concentration_of_water_vapor_in_air',
'units': 'cm-3'},
'Ext': {'standard_name': 'volume_extinction_coefficient_in_air_due_to_ambient_aerosol_particles',
'units': 'km-1'},
'O3_Err': {'standard_name': 'number_concentration_of_ozone_molecules_in_air_error',
'units': 'percent'},
'NO2_Err': {'standard_name': 'number_concentration_of_nitrogen_dioxide_molecules_in_air_error',
'units': 'percent'},
'H2O_Err': {'standard_name': 'number_concentration_of_water_vapor_in_air_error',
'units': 'percent'},
'Ext_Err': {'standard_name': 'volume_extinction_coefficient_in_air_due_to_ambient_aerosol_'
'particles_error',
'units': 'percent'},
'Duration': {'units': 'seconds',
'description': 'duration of the sunrise/sunset event'},
'Beta': {'units': 'degrees',
'description': 'angle between the satellite orbit plane and the sun'},
'Trop_Height': {'units': 'km'},
'Radius': {'units': 'microns'},
'SurfDen': {'units': 'microns2 cm-3'}}
for key in attrs.keys():
data[key].attrs = attrs[key]
data.attrs = {'description': 'Retrieved vertical profiles of aerosol extinction, ozone, '
'nitrogen dioxide, water vapor, and meteorological profiles from SAGE II '
'version 7.00',
'publication reference': '<NAME>., <NAME>., <NAME>., & <NAME>. (2013). '
'SAGE version 7.0 algorithm: application to SAGE II. Atmospheric '
'Measurement Techniques, 6(12), 3539-3561.',
'title': 'SAGE II version 7.00',
'date_created': | pd.Timestamp.now() | pandas.Timestamp.now |
import argparse
import pickle
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from copy import deepcopy
from torch.utils.data import Dataset, DataLoader
from transformers import BertConfig, BertTokenizer, BertModel, AdamW, get_linear_schedule_with_warmup
from sklearn.model_selection import train_test_split
import pytorch_lightning as pl
from pytorch_lightning.metrics.functional.classification import auroc
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from sklearn.metrics import accuracy_score, multilabel_confusion_matrix, f1_score, precision_score, recall_score, precision_recall_fscore_support
from itertools import compress
from pathlib import Path
# For group analysis:
# This assumes that there are sub-groups within the gold data that are found in the folder ./groups/
# Each file represents a sub-group and has and id per line that corresponds to the sample_id in the gold/test data
group_path = './groups/'
group_files = [n.name for n in Path(group_path).glob('*.txt')]
mapped_groups = dict()
groups = dict()
for group in group_files:
groups[group] = [int(l) for l in open(group_path+group, 'r').readlines() if l.strip() != '']
for i in groups[group]:
if i not in mapped_groups:
mapped_groups.update({i: list()})
mapped_groups[i].append(group)
#For the different runs, use a random seed
RANDOM_SEED = np.random.choice(range(100))
#RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
errors = list()
BERT_MODEL_NAME = 'bert-base-cased'
tokeniser = BertTokenizer.from_pretrained(BERT_MODEL_NAME)
#sample_row = df.iloc[16]
#sample_text = sample_row.text
#sample_labels = sample_row[LABEL_COLUMNS]
# encoding = tokeniser.encode_plus(
# sample,
# truncation=True,
# add_special_tokens=True,
# max_length=128,
# return_token_type_ids=False,
# padding="max_length",
# return_attention_mask=True,
# return_tensors="pt"
# )
#print(encoding.keys())
#print(encoding['input_ids'].shape)
#print(encoding['attention_mask'].shape)
#print(encoding['input_ids'].squeeze())
#print(encoding['attention_mask'].squeeze())
#print(tokeniser.convert_ids_to_tokens(encoding['input_ids'].squeeze())[:20])
def classify(sample, model, tokeniser, label_names, thresholds):
encoding = tokeniser.encode_plus(
sample,
truncation=True,
add_special_tokens=True,
max_length=128,
return_token_type_ids=False,
padding="max_length",
return_attention_mask=True,
return_tensors="pt"
)
_, prediction = model(encoding['input_ids'], encoding['attention_mask'])
prediction = prediction.flatten().numpy()
# print(prediction)
predicted_labels = []
binarised_labels = []
scores = []
for i, label_name in enumerate(label_names):
label_probability = prediction[i]
scores.append(label_probability)
if label_probability > thresholds[i]:
# print('LABEL PROB', label_probability)
predicted_labels.append(label_name)
binarised_labels.append(1)
else:
binarised_labels.append(0)
return predicted_labels, binarised_labels, scores
#train_dataset = TextDataset(train_df, tokeniser)
#sample_item = train_dataset[0]
#print('SAMPLE ITEM KEYS', sample_item.keys())
#print(sample_item['text'])
#print(sample_item['input_ids'].shape, sample_item['attention_mask'].shape, sample_item['labels'].shape)
#sample_item['labels']
# bert_model = BertModel.from_pretrained(BERT_MODEL_NAME, return_dict=True)
# prediction = bert_model(sample_item['input_ids'].unsqueeze(dim=0), sample_item['attention_mask'].unsqueeze(dim=0))
parser = argparse.ArgumentParser(description='args for the multilabel text classifier')
parser.add_argument('-dropout', type=float, default=0.1)
parser.add_argument('-early', action='store_true')
parser.add_argument('-folds', type=int, default=10)
parser.add_argument('-epochs', type=int, default=20)
parser.add_argument('-batch_size', type=int, default=32)
parser.add_argument('-csv', type=str, default='test.csv')
parser.add_argument('-var_thresh', action='store_true', default=False)
parser.add_argument('-threshold', type=float, default=0.5)
parser.add_argument('-soft', action='store_true', default=False)
parser.add_argument('-ratings', action='store_true', default=False)
parser.add_argument('-test', type=str)
parser.add_argument('-theme', action='store_true', default=False)
parser.add_argument('-gold_val', action='store_true', default=False)
parser.add_argument('-output', type=str, default='./output/')
parser.add_argument('-cycles', type=int)
parser.add_argument('-print_sents', action='store_true')
parser.add_argument('-print_output', action='store_true')
parser.add_argument('-final_model', action='store_true')
parser.add_argument('-errors', action='store_true')
args = parser.parse_args()
OUTPUT = args.output
NUM_EPOCHS = args.epochs # 10
BATCH_SIZE = args.batch_size
THRESHOLD = args.threshold
FOLDS = args.folds
df = pd.read_csv(args.csv)
print('### NUM ENTRIES AFTER READING CSV', len(df))
if args.test:
df_test = | pd.read_csv(args.test) | pandas.read_csv |
import os
import pandas as pd
from grdb.database.v1_1_0 import (
sample,
preparation_step,
recipe,
properties,
raman_set,
raman_file,
raman_spectrum,
sem_file,
)
from sqlalchemy import String, Integer, Float
sql_validator = {
"int": lambda x: isinstance(x.property.columns[0].type, Integer),
"float": lambda x: isinstance(x.property.columns[0].type, Float),
"str": lambda x: isinstance(x.property.columns[0].type, String),
}
def convert(value, field):
if sql_validator["int"](field):
return int(value)
elif sql_validator["float"](field):
return float(value)
else:
return str(value)
def upload_file(file_path, folder_name=None):
box_adaptor = BoxAdaptor(box_config_path)
upload_folder = box_adaptor.create_upload_folder(folder_name=folder_name)
box_file = box_adaptor.upload_file(upload_folder, file_path, str(uuid.uuid4()))
return box_file.get_shared_link_download_url(access="open")
def get_filepaths(reference_id, folder_path="./"):
contents = os.listdir(os.path.join(folder_path, reference_id))
raman = []
sem = []
for f in contents:
if f.split(".")[-1] == "txt":
raman.append(f)
elif f.split(".")[-1] == "tif":
sem.append(f)
return raman, sem
sample_fields = ["material_name", "experiment_date"]
preparation_fields = [
"name",
"duration",
"furnace_temperature",
"furnace_pressure",
"sample_location",
"helium_flow_rate",
"hydrogen_flow_rate",
"argon_flow_rate",
"carbon_source",
"carbon_source_flow_rate",
"cooling_rate",
]
recipe_fields = [
"catalyst",
"tube_diameter",
"cross_sectional_area",
"tube_length",
"base_pressure",
"thickness",
"diameter",
"length",
]
properties_fields = [
"average_thickness_of_growth",
"standard_deviation_of_growth",
"number_of_layers",
"growth_coverage",
"domain_size",
"shape",
]
all_fields = sample_fields + preparation_fields + recipe_fields + properties_fields
def build_db(session, filepath, sem_raman_path=None):
var_map = pd.read_csv(os.path.join(filepath, "varmap2.csv")).to_dict()
data = pd.read_csv(os.path.join(filepath, "recipe_2018_11_08.csv")).iloc[:-1, :]
col_names = data.columns
for i in range(data.shape[0]):
s = sample()
s.material_name = "Graphene"
s.validated = True
session.add(s)
session.commit()
pr = properties()
pr.sample_id = s.id
r = recipe()
r.sample_id = s.id
for j in range(30):
value = data.iloc[i, j]
if pd.isnull(value) == False:
dbkey = var_map[col_names[j]][0]
if dbkey == "identifier":
identifier = str(data.iloc[i, j])
if dbkey in properties_fields:
value = convert(data.iloc[i, j], getattr(properties, dbkey))
setattr(pr, dbkey, value)
elif dbkey in recipe_fields:
value = convert(data.iloc[i, j], getattr(recipe, dbkey))
if "mTorr" in col_names[j]:
setattr(prep, dbkey, value / 1000)
else:
setattr(prep, dbkey, value)
session.add(pr)
session.add(r)
session.commit()
total_steps = 0
# Annealing
for step, j in enumerate(range(31, 109, 13)):
prep = preparation_step()
prep.name = "Annealing"
prep.recipe_id = r.id
for p in range(13):
dbkey = var_map[col_names[j + p]][0]
value = data.iloc[i, j + p]
if pd.isnull(value) == False and dbkey in preparation_fields:
value = convert(
data.iloc[i, j + p], getattr(preparation_step, dbkey)
)
# print(prep.name,col_names[j+p],dbkey,value,type(value))
if "flow_rate" in dbkey:
if "sccm" in col_names[j + p]:
setattr(prep, dbkey, value)
else:
setattr(prep, dbkey, value / 0.01270903)
elif "furnace_pressure" in dbkey:
if "mTorr" in col_names[j + p]:
setattr(prep, dbkey, value / 1000)
else:
setattr(prep, dbkey, value)
else:
setattr(prep, dbkey, value)
if prep.duration != None:
prep.step = total_steps
total_steps += 1
# print('Added Annealing')
# print(vars(prep))
session.add(prep)
session.commit()
# Growing
for step, j in enumerate(range(110, 188, 13)):
prep = preparation_step()
prep.name = "Growing"
prep.recipe_id = r.id
for p in range(13):
dbkey = var_map[col_names[j + p]][0]
value = data.iloc[i, j + p]
if pd.isnull(value) == False and dbkey in preparation_fields:
value = convert(
data.iloc[i, j + p], getattr(preparation_step, dbkey)
)
# print(prep.name,col_names[j+p],dbkey,value,type(value))
if "flow_rate" in dbkey:
if "sccm" in col_names[j + p]:
setattr(prep, dbkey, value)
else:
setattr(prep, dbkey, value / 0.01270903)
elif "furnace_pressure" in dbkey:
if "mTorr" in col_names[j + p]:
setattr(prep, dbkey, value / 1000)
else:
setattr(prep, dbkey, value)
else:
setattr(prep, dbkey, value)
if prep.duration != None:
prep.step = total_steps
total_steps += 1
# print('Added Growing')
# print(vars(prep))
session.add(prep)
session.commit()
# Cooling
for step, j in enumerate(range(191, 268, 13)):
prep = preparation_step()
prep.name = "Cooling"
prep.cooling_rate = convert(
data.iloc[i, 190], getattr(preparation_step, "cooling_rate")
)
prep.recipe_id = r.id
for p in range(13):
dbkey = var_map[col_names[j + p]][0]
value = data.iloc[i, j + p]
if | pd.isnull(value) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 19:53:06 2020
@author: abhi0
"""
#Categorical encoding features II
#Kaggle challenge
#Open competition
import pandas as pd
df=pd.read_csv("C:/Users/abhi0/Downloads/train.csv/train.csv")
dfTest=pd.read_csv("C:/Users/abhi0/Downloads/test_Kaggle.csv")
#Initial approach: Drop NaN's
df=df.dropna()
################ Feature pre-processing: #####################
#dropping the ID column
df.drop(['id'],axis=1,inplace=True)
#dropping some of the nominal variables:
#nom_5,nom_6,nom_7,nom_8,nom_9
df.drop(['nom_5','nom_6','nom_7','nom_8','nom_9'],axis=1,inplace=True)
########## Label encoding the binary variables ###############
# For 'bin_3' variable
from sklearn.preprocessing import LabelEncoder
labelencoder_bin_3 = LabelEncoder()
df['bin_3'] = labelencoder_bin_3.fit_transform(df['bin_3'])
# For 'bin_4' variable
from sklearn.preprocessing import LabelEncoder
labelencoder_bin_4 = LabelEncoder()
df['bin_4'] = labelencoder_bin_4.fit_transform(df['bin_4'])
########## Label encoding the ordinal variables ###############
for i in range(1,6):
labelencoder_ord_i = LabelEncoder()
df['ord_'+str(i)] = labelencoder_ord_i.fit_transform(df['ord_'+str(i)])
######### One-hot encoding for nominal variables ###########
for i in range(0,5):
df_dummies=pd.get_dummies(df['nom_'+str(i)],prefix='nom_'+str(i)+'_category:')
df=pd.concat([df,df_dummies],axis=1)
df=df.drop(['nom_'+str(i)],axis=1)
#For 'day' variable:
df_dummies=pd.get_dummies(df['day'],prefix='day_category:')
df=pd.concat([df,df_dummies],axis=1)
df=df.drop(['day'],axis=1)
#For 'month' variable:
df_dummies=pd.get_dummies(df['month'],prefix='month_category:')
df= | pd.concat([df,df_dummies],axis=1) | pandas.concat |
# coding: utf-8
# # Interrogating building age distributions
#
# This notebook is to explore the distribution of building ages in
# communities in Western Australia.
from os.path import join as pjoin
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import re
import seaborn as sns
sns.set_context("poster")
sns.set_style('darkgrid')
# Apply GA colour palette
palette = sns.blend_palette(["#5E6A71", "#006983", "#72C7E7",
"#A33F1F", "#CA7700", "#A5D867",
"#6E7645"], 7)
# The source file `WA_Residential_Wind_Exposure_2018_TCRM.CSV` can be
# found in HPRM D2018-6256. Download a local version (by using the
# 'Supercopy' option when right-clicking on the record), and change
# the path to the appropriate folder.
inputFile = "C:/WorkSpace/data/derived/exposure/WA/WA_TILES_Residential_Wind_Exposure.csv"
df = pd.read_csv(inputFile)
output_path = "C:/Workspace/data/derived/exposure/WA/"
SA2_names = sorted(list(pd.unique(df['SA2_NAME'])))
ages = sorted(list(pd.unique(df['YEAR_BUILT'])))
print(ages)
def plotAgeDist(df, locality):
fig = plt.figure()
ax = fig.add_subplot(111)
locdf = df[df['SA2_NAME'] == locality]
sns.countplot(x="YEAR_BUILT", data=locdf, order=ages, ax=ax,
palette=palette)
ax.set_xlabel("Year built")
ax.set_ylabel("Number")
plt.setp(ax.get_xticklabels(), rotation=90)
ax.set_title("{0} - {1:,} residential buildings".format(locality, len(locdf.index)))
fig.tight_layout()
fig.savefig(pjoin(output_path, "AgeProfile", "SA2",
"{0}.png".format(locality)))
plt.clf()
plt.close('all')
# There's two aspects to the age distribution - communities where
# there has been substantial growth since the last significant
# cyclone, and communities with a large proportion of older (pre-1980)
# era construction.
# TODO:
# 1. Add a chart that ranks the localities by proportion of a
# selected age group. The list of age groups is already compiled
# (`ages`), just need to do the calculations to get proportions for
# the specified age group.
# 2. Add another figure that plots the
# predominant age group for each suburb in the locality. If there's a
# spatial layer of the boundaries for `SUBURB_2015`, then one could
# plot up a categorised map of the suburbs based on predominant age
# group.
# In[26]:
def plotBySuburb(df, locality):
fig = plt.figure()
ax = fig.add_subplot(111)
locdf = df[df['SA2_NAME'] == locality]
suburblist = locdf[locdf['SUBURB'].notnull()]['SUBURB']
suburbs = sorted(list(pd.unique(suburblist)))
sns.countplot(x='SUBURB', hue='YEAR_BUILT', data=locdf,
order=suburbs, hue_order=ages,
palette=palette,ax=ax)
ax.set_xlabel("Suburb")
ax.set_ylabel("Number")
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
l = ax.legend(title="Year built", ncol=2)
ax.set_title("{0} - {1:,} residential buildings".
format(locality, len(locdf.index)))
fig.tight_layout()
plt.savefig(pjoin(output_path, "AgeProfile", "SA2",
"BySuburb", "{0}.png".format(locality)))
plt.close('all')
for SA2 in SA2_names:
print(SA2)
plotAgeDist(df, SA2)
plotBySuburb(df, SA2)
# For the Perth region, we perform the analysis at a larger
# aggregation, due to the number of suburbs that make up the Greater
# Perth area.
# In[28]:
urbanareas = sorted(list( | pd.unique(df['UCL_NAME']) | pandas.unique |
"""
This module contains functions for handling UI callbacks on the webpage, one of which is the upload of data.
"""
# Built-in imports
import io
import base64
from datetime import datetime
from collections import namedtuple
from hashlib import md5
import logging
# Third-party module imports.
import dash
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import pandas as pd
import numpy as np
# Own module imports.
from .exceptions import UploadError, ModelCreationError
from . import analysis
from . import dbactions
from . import layout
from . import plotting
# Describe time format that we get, e.g. in file names, so we can convert it to datetime.
time_fmt = '%Y_%m_%d_%H_%M_%S'
# Set pandas plotting backend to ploty. Requires plotly >= 4.8.0.
pd.options.plotting.backend = 'plotly'
############################
# Map files to table types #
############################
def get_table_type(filename):
""" Accepted filenames:
device-<device_id>.csv,
user.csv,
session-<time_iso>.csv,
trials-<time_iso>-Block_<n>.csv
:param filename: name of uploaded file.
:type filename: str
:return: Name of table type.
:rtype: str|None
"""
basename, ext = filename.split('.')
parts = basename.split('-')
first = parts[0]
if ext == 'csv' and first in ['device', 'user', 'session', 'trials']:
return first
else:
return None
def get_file_indices(filenames, table_type):
""" Get indices of given table type from a list of file names.
:param filenames: Received file names.
:type filenames: list
:param table_type: Which tables to look for.
:type table_type: str
:return: Indices of found files.
:rtype: list
"""
indices = [i for i, f in enumerate(filenames) if get_table_type(f) == table_type]
return indices
def get_table_idx(filenames, table_type):
""" Supposed to be used against table types 'device', 'user' and 'session' which should yield a single file.
:param filenames:
:type filenames: list
:param table_type:
:type table_type: str
:return: Index of single table.
:rtype: int
"""
try:
file_idx = get_file_indices(filenames, table_type)[0]
except IndexError:
raise UploadError(f"ERROR: {table_type.title()} file is missing.")
return file_idx
#########################
# Extract file contents #
#########################
def decode_contents(list_of_contents):
""" Decode list of base64 encoded uploaded data and convert it to decoded list of data.
:param list_of_contents: List of encoded content from dash upload component.
:type list_of_contents: list
:return: List of decoded contents (str)
:rtype: list
"""
decoded = list()
for contents in list_of_contents:
content_type, content_string = contents.split(',')
decoded.append(base64.b64decode(content_string).decode('utf-8'))
return decoded
####################
# Integrity Checks #
####################
def check_circletask_hash(df, sent_hash):
""" Check if hashes match.
:param df: Dataframe to check.
:type df: pandas.DataFrame
:param sent_hash: The received hash to compare df to.
:type sent_hash: str
:return: Do hashes match?
:rtype: bool
"""
df_hash = md5(df[['df1', 'df2',
'df1_grab', 'df1_release',
'df2_grab', 'df2_release']].round(5).values.copy(order='C')).hexdigest()
if df_hash != sent_hash:
raise UploadError("ERROR: Data corrupted.")
else:
return True
def check_circletask_touched(df):
""" Check if all sliders were used.
:param df: Dataframe to check.
:type df: pandas.DataFrame
:return: Whether all sliders where used.
:rtype: bool
"""
#
untouched = df.isna().all().any()
if untouched:
raise UploadError("ERROR: Task not properly executed.")
return True
def check_circletask_integrity(df, sent_hash):
""" Evaluate if this data was tampered with and the controls actually touched.
:param df: Dataframe to check.
:type df: pandas.DataFrame
:param sent_hash: The received hash to compare df to.
:type sent_hash: str
:return: Status of integrity.
:rtype: bool
"""
try:
check = check_circletask_touched(df) and check_circletask_hash(df, sent_hash)
except UploadError:
raise UploadError("ERROR: Data corrupted.")
return check
##################
# Parsing upload #
##################
def get_device_properties(csv_file):
""" Get device properties as dictionary from a CSV file.
:param csv_file: 1 row table with device properties.
:type csv_file: str|io.StringIO
:return: Properties of the device.
:rtype: dict
"""
try:
# For device and user we expect them to contain only 1 entry. Prevent id from being converted to number.
props = pd.read_csv(csv_file, dtype={'id': object}).iloc[0].to_dict() # df->Series->dict
except Exception:
raise UploadError("ERROR: Failed to read file contents for device.")
return props
def get_user_properties(csv_file):
""" Get user data as dictionary from a CSV file.
:param csv_file: 1 row table with user data.
:type csv_file: str|io.StringIO
:return: Properties of the user.
:rtype: dict
"""
try:
# We expect the user data to contain only 1 entry. Prevent IDs from being converted to numbers.
df = pd.read_csv(csv_file, dtype={'id': object, 'device_id': object})
# We need to convert NaN to None for SQL to work.
df = df.where(pd.notnull(df), None)
props = df.iloc[0].to_dict() # df->Series->dict
except IOError:
raise UploadError("ERROR: Failed to read file contents for user.")
except IndexError:
raise UploadError("ERROR: No data in User table.")
return props
def get_blocks_df(csv_file, session_uid, user_id):
""" Return a DataFrame from CSV file or buffer and add user_id column.
:param csv_file: Table with session data for blocks.
:type csv_file: str|io.StringIO
:param session_uid: Identifier to group blocks as belonging to the same session.
:type session_uid: str
:param user_id: ID of the user who performed the session.
:type user_id: str
:return: Properties of the blocks.
:rtype: pandas.DataFrame
"""
try:
# Read data and keep empty string in treatment as empty string, not NaN.
blocks_df = pd.read_csv(csv_file, keep_default_na=False)
except Exception:
raise UploadError("ERROR: Failed to read file contents for session.")
try:
# Convert time_iso string to datetime.
blocks_df['time_iso'] = blocks_df['time_iso'].transform(lambda t: datetime.strptime(t, time_fmt))
blocks_df['user_id'] = user_id
# Rename 'block' column to CircleTaskBlock model compatible 'nth_block'
blocks_df.rename(columns={'block': 'nth_block'}, inplace=True)
except KeyError:
raise UploadError("ERROR: Missing columns in session data.")
# Unique identifier for session so we can associate the blocks with this particular session.
blocks_df['session_uid'] = session_uid
return blocks_df
def get_trials_meta(filename):
""" Take a filename and extract time_iso and block information from it.
:param filename: file name of trials table of form trials-[time_iso]-Block_[n].csv
:type filename: str
:return: time_iso and block as namedtuple
:rtype: tuple
"""
basename, ext = filename.split('.')
parts = basename.split('-')
try:
time_iso = datetime.strptime(parts[1], time_fmt) # Convert string to datetime.
block = int(parts[2].split('_')[1])
except (IndexError, ValueError):
raise UploadError("ERROR: Trial table file name has to be of form: trials-<time_iso>-Block_<n>.csv")
meta = namedtuple('trialMeta', ['time_iso', 'block'])
return meta(time_iso, block)
def get_trials_properties(filenames, contents, times, blocks, hashes, user_id):
""" Read all uploaded blocks with trials, check integrity, concatenate.
:param filenames: All uploaded filenames.
:type filenames: list
:param contents: Decoded file contents.
:type contents: list
:param times: 'time_iso' column of session.
:type times: pandas.Series
:param blocks: 'block' column of session.
:type blocks: pandas.Series
:param hashes: 'hash' column of session.
:type hashes: pandas.Series
:param user_id: ID of user who provided the data of trials.
:type user_id: str
:return: Properties of all the trials as generator.
:rtype: generator
"""
# Look for files containing trial data.
file_indices = get_file_indices(filenames, 'trials')
if not file_indices:
raise UploadError("ERROR: Trial files are missing.")
# Collect data from each file as separate DataFrames.
trials_dfs = list()
for idx in file_indices:
# Get information from filename.
try:
trials_meta = get_trials_meta(filenames[idx])
except UploadError:
raise
# Get data from content.
try:
df = pd.read_csv(io.StringIO(contents[idx]))
except Exception:
raise UploadError("ERROR: Failed to read file contents for trials.")
# Determine to which block in the session this file belongs to.
mask = (times == trials_meta.time_iso) & (blocks == trials_meta.block)
try:
block_idx = mask[mask].index[0] # The accompanying row in session file.
except IndexError:
raise UploadError("ERROR: Mismatch between data in session file and trials file.")
# Check data integrity by comparing hash values of this file and what was sent with the session file.
sent_hash = hashes.iloc[block_idx]
try:
check_passed = check_circletask_integrity(df, sent_hash)
except UploadError:
raise
# Add block index to relate trials to a CircleTaskBlock object when adding them to the database later on.
df['block_idx'] = block_idx
trials_dfs.append(df)
# Concatenate the different trials DataFrames. Rows are augmented by block & time_iso for differentiation later on.
df = pd.concat(trials_dfs)
df['user_id'] = user_id
df['trial'] = df.index
# We may get a lot of trials, put them in a generator to not hold a large list of dictionaries in memory.
props = (row._asdict() for row in df.itertuples(index=False))
return props
def parse_uploaded_files(list_of_filenames, list_of_contents):
""" Reads files and returns dictionary with keyword arguments for each table type.
These are:
'device': dict
'user': dict
'blocks': list
'trials': generator
:param list_of_filenames: list of received file names.
:type list_of_filenames: list
:param list_of_contents: List of encoded contents.
:type list_of_contents: list
:return: Dictionary with keyword arguments for database models.
:rtype: dict
"""
# If there was an error in mapping the table types to files, return that error.
try:
# If files are missing get_table_idx raises UploadError.
device_file_idx = get_table_idx(list_of_filenames, 'device')
user_file_idx = get_table_idx(list_of_filenames, 'user')
blocks_file_idx = get_table_idx(list_of_filenames, 'session')
except UploadError:
raise
# Decode the content.
try:
decoded_list = decode_contents(list_of_contents)
except Exception:
raise UploadError("ERROR: Failed to decode file contents.")
# Extract data from content for the database models.
kw = dict() # Keyword arguments for each table.
try:
kw['device'] = get_device_properties(io.StringIO(decoded_list[device_file_idx]))
kw['user'] = get_user_properties(io.StringIO(decoded_list[user_file_idx]))
except UploadError:
raise
# Use the time of the session to create a uid from it, so we can group the blocks together later on.
basename, ext = list_of_filenames[blocks_file_idx].split('.')
try:
# Unique id based on this user at that time.
session_uid = md5((kw['user']['id'] + basename.split('-')[1]).encode()).hexdigest()
except KeyError:
raise UploadError("ERROR: User ID is missing.")
except IndexError:
raise UploadError("ERROR: Session file name is missing datetime.")
try:
blocks_df = get_blocks_df(io.StringIO(decoded_list[blocks_file_idx]), session_uid, kw['user']['id'])
kw['blocks'] = [data._asdict() for data in blocks_df.itertuples(index=False)] # Convert namedtuple to dict.
kw['trials'] = get_trials_properties(list_of_filenames,
decoded_list,
blocks_df['time_iso'],
blocks_df['nth_block'],
blocks_df['hash'],
kw['user']['id'])
except (UploadError, KeyError):
raise UploadError("ERROR: Failed to parse data.")
return kw
def process_upload(filenames, contents):
""" First parse the uploaded data and then add it to the database.
:param filenames: list of received file names.
:type filenames: list
:param contents: List of encoded contents.
:type contents: list
"""
# Get keyword arguments for creation of each model.
try:
kw = parse_uploaded_files(filenames, contents)
except UploadError:
raise
try:
dbactions.add_to_db(kw['device'], kw['user'], kw['blocks'], kw['trials'])
except ModelCreationError:
raise
def records_to_df(store, columns=None):
""" Convert records-style data to pandas DataFrame.
:param store: Data from a dash_core_components.store component.
:type store: list[dict]
:param columns: Columns to use for empty DataFrame in case there are no records.
:type columns: list[dict]
:return: Stored data as a DataFrame.
:rtype: pandas.DataFrame
"""
df = pd.DataFrame(store)
# If the DataFrame is practically empty, delete everything except for the columns.
if df.isna().all().all():
df = df[0:0]
if df.columns.empty and columns:
df = pd.DataFrame(None, columns=[c['id'] for c in columns])
return df
def df_to_records(df):
""" Convert pandas DataFrame to table compatible data aka records. If DataFrame is empty keep the columns.
:type df: pandas.DataFrame
:rtype: list[dict]
"""
if df.empty:
# Return column names in 'records' style.
return [{c: None for c in df.columns}]
return df.to_dict('records')
################
# UI Callbacks #
################
def register_callbacks(dashapp):
""" Defines all callbacks to UI events in the dashboard. """
# Upload
@dashapp.callback(Output('output-data-upload', 'children'),
[Input('upload-data', 'contents')],
[State('upload-data', 'filename'),
State('upload-data', 'last_modified')])
def on_upload(list_of_contents, list_of_names, list_of_dates):
""" Upload data to SQL DB. """
if list_of_contents is not None:
try:
# Insert data into database.
process_upload(list_of_names, list_of_contents)
except (UploadError, ModelCreationError) as e:
# Display the error message.
return [html.Div(str(e))]
# Display success message.
return [html.Div("Upload successful.")]
# Data stores
@dashapp.callback([Output('datastore', 'data'),
Output('user-IDs', 'options'),
Output('removal-hint', 'children')],
[Input('output-data-upload', 'children'),
Input('refresh-btn', 'n_clicks'),
Input('date-picker-range', 'start_date'),
Input('date-picker-range', 'end_date'),
])
def set_datastore(upload_msg, refresh_clicks, start_date, end_date):
""" Get data from SQL DB and store in memory.
Update dropdown options.
"""
ctx = dash.callback_context
if not ctx.triggered:
comp_id = None
else:
# Which component triggered the callback?
comp_id = ctx.triggered[0]['prop_id'].split('.')[0]
if comp_id == 'output-data-upload': # When uploading manually on the website.
try:
# Query db on initial call when upload_msg is None or on successful upload.
if upload_msg is None or "Upload successful." in upload_msg[0].children:
users, blocks, trials = dbactions.get_data(start_date, end_date)
df, n_errors, n_invalid_sessions, n_trials_removed = analysis.preprocess_data(users, blocks, trials)
else:
return (dash.no_update,) * 3
except (TypeError, AttributeError, IndexError):
return (dash.no_update,) * 3
else:
users, blocks, trials = dbactions.get_data(start_date, end_date)
df, n_errors, n_invalid_sessions, n_trials_removed = analysis.preprocess_data(users, blocks, trials)
removal_msg = f"{n_errors} blocks with erroneous data were found. As a consequence {n_invalid_sessions} " \
f"sessions were excluded. " \
f"{n_trials_removed} trials have been excluded from the selected time period due to incorrect" \
f" execution." + " Sliders were either not used concurrently or not used at all." \
* bool(n_trials_removed)
users = [{'label': p, 'value': p} for p in df['user'].unique()]
return df_to_records(df), users, removal_msg
# Trials
@dashapp.callback([Output('trials-table', 'data'),
Output('trials-table', 'columns'),
Output('contour-store', 'data')],
[Input('datastore', 'data'),
Input('user-IDs', 'value'),
Input('contamination', 'value')])
def set_trials_table(stored_data, users_selected, contamination):
""" Prepares stored data for display in the main table. Assesses outliers as well. """
df = records_to_df(stored_data)
# Get outlier data.
if not contamination:
contamination = 0.1
try:
outliers, z = analysis.get_outlyingness(df[['df1', 'df2']].values, contamination=contamination)
except (KeyError, ValueError):
logging.log(logging.ERROR, "Could not compute outliers. Missing columns in DataFrame.")
# Create data with no outliers.
outliers = np.array(False).repeat(df.shape[0])
z = np.ones((101, 101)).astype(int)
df['outlier'] = outliers.astype(int)
# Format table columns.
columns = layout.get_columns_settings(df, order=[0, 1, 2, 3, 4, 6, 7, 8, 15, 9, 12, 16, 10, 13, 11, 14, 17, 18])
if not users_selected:
# Return all the rows on initial load/no user selected.
return df_to_records(df), columns, z.tolist()
try:
df[['user', 'block', 'condition', 'task', 'outlier']] = df[['user', 'block', 'condition', 'task',
'outlier']].astype('category')
except KeyError:
pass
filtered = df.query('`user` in @users_selected')
return df_to_records(filtered), columns, z.tolist()
@dashapp.callback(Output('filtered-hint', 'children'),
[Input('trials-table', 'derived_virtual_data')],
[State('trials-table', 'data'),
State('trials-table', 'filter_query')])
def on_table_filter(table_data_filtered, table_data, query):
""" Update message about removed trials by filtering. """
# Check if there even is data. The empty dataset has 1 row with all None.
try:
if len(table_data) == 1 and np.isnan(np.array(tuple(table_data[0].values()), dtype=np.float)).all():
n_filtered = 0
else:
n_filtered = len(table_data) - len(table_data_filtered)
except (TypeError, AttributeError):
n_filtered = 0
try:
filter = query.replace('{', '').replace('}', '')
except AttributeError:
filter = ""
filtered_msg = bool(n_filtered) * f" {n_filtered} trials were excluded by filters set in the table ({filter})."
return filtered_msg
@dashapp.callback(Output('scatterplot-trials', 'figure'),
[Input('pca-store', 'data'), # Delay update until PCA is through.
Input('pca-checkbox', 'value'),
Input('ellipses-checkbox', 'value')],
[State('trials-table', 'derived_virtual_data'),
State('contour-store', 'data')])
def set_trials_plot(pca_data, show_pca, show_ellipses, table_data, contour):
""" Update the graph for displaying trial data as scatter plot. """
df = records_to_df(table_data)
try:
df[['user', 'condition', 'block', 'task']] = df[['user', 'condition', 'block', 'task']].astype('category')
except KeyError:
pass
z = np.array(contour)
fig = plotting.generate_trials_figure(df, contour_data=z)
# PCA visualisation.
pca_df = records_to_df(pca_data)
if 'Show' in show_pca:
arrows = plotting.get_pca_annotations(pca_df)
fig.layout.update(annotations=arrows)
if 'Show' in show_ellipses:
plotting.add_pca_ellipses(fig, pca_df)
return fig
@dashapp.callback([Output('histogram-dfs', 'figure'),
Output('histogram-sum', 'figure')],
[Input('trials-table', 'derived_virtual_data')])
def set_histograms(table_data):
""" Update histograms when data in trials table changes. """
df = records_to_df(table_data)
try:
fig_dfs = plotting.generate_histograms(df[['df1', 'df2']], legend_title="DOF")
fig_sum = plotting.generate_histograms(df[['task', 'sum']], by='task', legend_title="Block Type")
except KeyError:
fig = plotting.generate_histograms(pd.DataFrame())
return fig, fig
return fig_dfs, fig_sum
@dashapp.callback([Output('qq-plot-dfs', 'figure'),
Output('qq-plot-sum', 'figure')],
[Input('trials-table', 'derived_virtual_data')])
def set_qqplots(table_data):
""" QQ-plots of degrees of freedom and of sum. """
df = records_to_df(table_data)
try:
fig_df = plotting.generate_qq_plot(df, vars_=['df1', 'df2'])
fig_sum = plotting.generate_qq_plot(df, vars_=['sum'])
except KeyError:
fig = plotting.generate_qq_plot(pd.DataFrame(columns=['task']), vars_=[])
return fig, fig
return fig_df, fig_sum
@dashapp.callback([Output('corr-table', 'data'),
Output('corr-table', 'columns')],
[Input('trials-table', 'derived_virtual_data')])
def set_corr_table(table_data):
""" Update table showing Pearson correlations between degrees of freedom and their sum. """
df = records_to_df(table_data)
correlates = ['df1', 'df2', 'sum']
try:
# We suspect the data to not be normally distributed or have outliers.
# A Spearman correlation is then more appropriate.
corr = df[correlates].corr(menthod='spearman')
except KeyError:
corr = pd.DataFrame(columns=correlates, index=correlates)
if df.empty:
corr = pd.DataFrame(columns=correlates, index=correlates)
corr.index.name = ''
corr.reset_index(inplace=True)
columns = layout.get_columns_settings(corr)
return df_to_records(corr), columns
# Reaction times
@dashapp.callback([Output('onset-dfs', 'figure'),
Output('duration-dfs', 'figure')],
[Input('trials-table', 'derived_virtual_data')],
[State('trials-table', 'columns')])
def set_grab_plots(table_data, header):
""" Update histograms when data in trials table changes. """
df = records_to_df(table_data)
try:
onset_df = df[['user', 'condition', 'block', 'task', 'df1_grab', 'df2_grab']]
duration_df = df[['user', 'condition', 'block', 'task', 'df1_duration', 'df2_duration']]
except KeyError:
col_names = [c['id'] for c in header]
onset_df = pd.DataFrame(columns=col_names)
duration_df = pd.DataFrame(columns=col_names)
fig_onset = plotting.generate_violin_figure(onset_df.rename(columns={'df1_grab': 'df1', 'df2_grab': 'df2'}),
['df1', 'df2'], ytitle="Grab Onset (s)", legend_title="DOF")
fig_duration = plotting.generate_violin_figure(duration_df.rename(columns={'df1_duration': 'df1',
'df2_duration': 'df2'}),
['df1', 'df2'], ytitle='Grab Duration (s)',
legend_title="DOF")
return fig_onset, fig_duration
@dashapp.callback(Output('barplot-variance', 'figure'),
[Input('desc-table', 'derived_virtual_data')])
def set_variance_graph(table_data):
""" Update graph showing variances of dependent and in independent variables. """
df = records_to_df(table_data)
df.dropna(inplace=True)
return plotting.generate_means_figure(df)
# PCA
@dashapp.callback(Output('pca-store', 'data'),
[Input('trials-table', 'derived_virtual_data')],
[State('trials-table', 'columns')])
def set_pca_store(table_data, table_columns):
""" Save results of PCA into a store. """
df = records_to_df(table_data, columns=table_columns)
try:
df[['user', 'condition', 'block', 'task']] = df[['user', 'condition', 'block', 'task']].astype('category')
pca_df = df.groupby('task').apply(analysis.get_pca_data)
except KeyError:
pca_df = pd.DataFrame()
else:
pca_df.reset_index(inplace=True)
if pca_df.empty:
pca_df = pd.DataFrame(None, columns=['task', 'PC', 'var_expl', 'var_expl_ratio',
'x', 'y', 'meanx', 'meany'])
return df_to_records(pca_df)
@dashapp.callback(Output('barplot-pca', 'figure'),
[Input('pca-store', 'data')])
def set_pca_plot(pca_data):
""" Update bar-plot showing explained variance per principal component. """
df = records_to_df(pca_data)
try:
df[['task', 'PC']] = df[['task', 'PC']].astype('category')
except KeyError:
pass
fig = plotting.generate_pca_figure(df)
return fig
@dashapp.callback([Output('pca-table', 'data'),
Output('pca-table', 'columns')],
[Input('pca-store', 'data')])
def set_pca_angle_table(pca_data):
""" Update table for showing divergence between principal components and UCM vectors. """
pca_df = records_to_df(pca_data)
if pca_df.empty:
angle_df = | pd.DataFrame(None, columns=['task', 'PC', 'parallel', 'orthogonal']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import mock
import pandas as pd
import numpy as np
import requests
import os
from bho_scraper import bho_scraper
from flask import Flask, request
from tests.conftest import WebServer
class Store:
# ======================== test_change_href =======================================
mock_href = r'/test/href'
correct_changed_href = r'/test--href'
# ======================================================================================
# ======================== test_standardize_query =======================================
mock_query = r'test QUery here ##::;___'
correct_standardized_query = r'testqueryhere'
# ======================================================================================
# ======================== test_search_for_series ======================================
mock_series_query = r'test series query'
mock_catalogue = {r'testseriesquery' : r'http://mock_base_url.co.uk/abc/example/href?'}
correct_search_return = (r'http://mock_base_url.co.uk/abc/example/href?', 'example-href')
# ======================================================================================
# ======================== test_scrape_catalogue =======================================
mock_text = '''
<html><body><table><tbody><tr><td><a>First row not taken</a></td><td>
First row not taken</td></tr><tr><td><a href="/yes/series/test">Yes
Series Test</a></td><td>Single volume</td></tr><tr><td>
<a href="/no-series/no_series_test">No Series Test</a> </td>
<td>Single volume</td></tr></tbody></table></body></html>
'''
correct_scraped_catalogue = {
'yesseriestest' : 'https://www.british-history.ac.uk/search/series/yes--series--test?query={}&page={}'
}
# ======================================================================================
# ======================== test_scrape_results =======================================
mock_results_html1 = '''
<html><body><div class="region region-content"><div class="view-content">
<div>
<h4 class="title"><a>Test Title 1</a></h4>
<p class="publication">Test Publication 1</p>
<p class="excerpt">Test Excerpt 1</p>
</div>
<div>
<h4 class="title"><a>Test Title 2</a></h4>
<p class="publication">Test Publication 2</p>
</div>
<div>
<h4 class="title"><a>Test Title 3</a></h4>
<p class="excerpt">Test Excerpt 3</p>
</div>
<div>
<h4 class="title"></h4>
<p class="publication">Test Publication 4</p>
<p class="excerpt">Test Excerpt 4</p>
</div>
<a title="Go to last page" href="/query=?&page=1">last</a>
</div></div></body></html>
'''
correct_dict = {
'title' : ['Test Title 1', 'Test Title 2', 'Test Title 3', np.nan],
'publication' : ['Test Publication 1', 'Test Publication 2', np.nan, 'Test Publication 4'],
'excerpt' : ['Test Excerpt 1', np.nan, 'Test Excerpt 3', 'Test Excerpt 4']
}
correct_df = pd.DataFrame(correct_dict)
# ======================== test_scrape_series =======================================
mock_results_html2 = '''
<html><body><div class="region region-content"><div class="view-content">
<div>
<h4 class="title"><a>Hello World</a></h4>
<p class="publication">abc 123</p>
<p class="excerpt">e = mc ** 2</p>
</div></div></body></html>
'''
mock_scraped_catalogue = {'http://example.com' : 'testseriesname'}
correct_scraped_df = pd.DataFrame(
{
'query' : ['test_query']*5,
'title' : ['Test Title 1', 'Test Title 2', 'Test Title 3', np.nan, 'Hello World'],
'publication' : ['Test Publication 1', 'Test Publication 2', np.nan, 'Test Publication 4', 'abc 123'],
'excerpt' : ['Test Excerpt 1', np.nan, 'Test Excerpt 3', 'Test Excerpt 4', 'e = mc ** 2']
}
)
correct_scraped_series = {'test_series_name' : correct_scraped_df}
# ============================================================================================
store = Store()
def test_change_href():
mock_href = store.mock_href
expected_result = store.correct_changed_href
actual_result = bho_scraper.change_href(mock_href)
assert expected_result == actual_result
def test_save_item_to_path():
pass
class MockRequest:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
def mocked_request_get(*args, **kwargs):
return MockRequest(text=store.mock_text, status_code=200)
@mock.patch('requests.get', side_effect=mocked_request_get)
def test_scrape_catalogue(mock_):
scraper = bho_scraper.BHOScraper()
scraper.scrape_catalogue()
actual_result = scraper.catalogue
expected_result = store.correct_scraped_catalogue
assert expected_result == actual_result
def test_reset_catalogue():
scraper = bho_scraper.BHOScraper()
scraper.catalogue = {'test' : 'catalogue'}
scraper.reset_catalogue()
assert not scraper.catalogue
def test_standardize_query():
expected_result = store.correct_standardized_query
actual_result = bho_scraper.standardize_query(store.mock_query)
assert expected_result == actual_result
def test_search_for_series():
scraper = bho_scraper.BHOScraper()
scraper.catalogue = store.mock_catalogue
actual_result = scraper.search_for_series(store.mock_series_query)
expected_result = store.correct_search_return
assert expected_result == actual_result
def mocked_request_results_get(*args, **kwargs):
return MockRequest(text=store.mock_results_html1, status_code=200)
@mock.patch('requests.get', side_effect=mocked_request_results_get)
def test_scrape_results(mock_):
scraper = bho_scraper.BHOScraper()
actual_result = scraper.scrape_results('https://hello-world.com/')
print(actual_result)
expected_result = store.correct_df
print('=========================================')
print(expected_result)
assert expected_result.equals(actual_result)
@pytest.fixture(scope="module")
def scraper_server():
app = Flask("scraper_server")
server = WebServer(app)
@server.app.route('/', methods=['GET', 'POST'])
def display_page():
page = request.args.get('page')
query = request.args.get('query')
if page == '0':
html = store.mock_results_html1.replace('\n', '')
elif page == '1':
html = store.mock_results_html2.replace('\n', '')
return html
with server.run():
yield server
class MockScraper(bho_scraper.BHOScraper):
def __init__(self, scraper_server):
super().__init__()
self.url = scraper_server.url + r'/?query={}&page={}'
self.catalogue = store.mock_scraped_catalogue
def search_for_series(self, *args):
return self.url, 'test_series_name'
def test_scrape_series(scraper_server):
def mock_scraper(*args, **kwargs):
return MockScraper(scraper_server=scraper_server)
@mock.patch('bho_scraper.bho_scraper.BHOScraper', side_effect=mock_scraper)
def get_scraper(mock_):
scraper = bho_scraper.BHOScraper()
scraper.scrape_series(['test_series_name'], ['test_query'])
return scraper
scraper = get_scraper()
for key, correct_key in zip(scraper.scraped_series.keys(), store.correct_scraped_series.keys()):
assert key == correct_key
actual_df = scraper.scraped_series[key].fillna('NaN substitute')
correct_df = store.correct_scraped_series[key].fillna('NaN substitute')
assert actual_df.equals(correct_df)
def test_scrape_series_download(scraper_server):
def mock_scraper(*args, **kwargs):
return MockScraper(scraper_server=scraper_server)
@mock.patch('bho_scraper.bho_scraper.BHOScraper', side_effect=mock_scraper)
def get_scraper(mock_):
scraper = bho_scraper.BHOScraper()
scraper.scrape_series(['test_series_name'], ['test_query'], path=os.path.join('.','temp'))
return scraper
scraper = get_scraper()
try:
temp_path = os.path.join('.','temp')
assert os.path.exists(temp_path)
downloads = os.listdir(temp_path)
assert len(downloads) == 1
csv_path = os.path.join(temp_path, downloads[-1])
actual_df = | pd.read_csv(csv_path) | pandas.read_csv |
import logging
import pathlib
import numpy as np
import pandas as pd
import coloredlogs
from pathlib import Path
from typing import Union,Dict,List
from .utils import sel_column_label, train_val_test_split, save_csv, flair_tags, flair_tags_as_string
from flair.datasets import CSVClassificationCorpus
# logger = logging.getLogger("nlp_dataset")
# logger.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
# stream_handler = logging.StreamHandler()
# stream_handler.setFormatter(formatter)
# logger.addHandler(stream_handler)
# coloredlogs.install(fmt='%(asctime)s %(name)s %(levelname)s %(message)s',level='DEBUG',logger = logger)
logger = logging.getLogger("entiretydotai")
class FlairDataset():
"""[summary]
Raises:
FileNotFoundError: [description]
Returns:
[type]: [description]
"""
def __init__(self,
data_folder: Union[str, Path],
column_name_map: Dict[int, str],
train_file=None,
test_file=None,
dev_file=None,
file_format=None,
delimiter = None,
encoding: str = "utf-8",
train_data: pd.DataFrame = None,
val_data: pd.DataFrame = None,
test_data : pd.DataFrame = None):
super().__init__()
self.data_folder = data_folder
self.column_name_map = column_name_map
self.train_file = train_file
self.test_file = test_file
self.dev_file = dev_file
self.file_format = file_format
self.delimiter = delimiter
self.processed_file = None
if self.file_format == '.csv':
logger.debug(f'Loading data in Flair CSVClassificationCorpus from path :{self.data_folder}')
self.corpus = CSVClassificationCorpus(
data_folder=self.data_folder,
train_file=self.train_file,
dev_file=self.dev_file,
test_file=self.test_file,
column_name_map=self.column_name_map,
delimiter=self.delimiter)
logger.debug(f'Number of Sentences loaded[Train]:{self.corpus.train.total_sentence_count}')
logger.debug(f'Type of tokenizer:{self.corpus.train.tokenizer.__name__}')
logger.debug(f'Sample sentence and Label from [Train]:{self.corpus.train.__getitem__(1)}\n')
logger.debug(f'Number of Sentences loaded[Valid]:{self.corpus.dev.total_sentence_count}')
logger.debug(f'Type of tokenizer:{self.corpus.dev.tokenizer.__name__}')
logger.debug(f'Sample sentence and Label from [Train]:{self.corpus.dev.__getitem__(1)}\n')
logger.debug(f'Number of Sentences loaded[Test]:{self.corpus.test.total_sentence_count}')
logger.debug(f'Type of tokenizer:{self.corpus.test.tokenizer.__name__}')
logger.debug(f'Sample sentence and Label from [Train]:{self.corpus.test.__getitem__(1)}\n')
self.train_data = train_data
self.valid_data = val_data
self.test_data = test_data
@classmethod
def csv_classification(cls,
data_folder=Union[str, Path],
file_format: str = 'csv',
filename: str = 'data',
train_val_test_split_flag: str = True,
column_mapping: List = None,
val_split_size: List = [0.1, 0.1]):
p = Path(data_folder).resolve()
if p.is_dir():
logger.debug(f'Found directory : {p}')
files = list(p.rglob('*.'+file_format))
logger.debug(f'Number of files found {len(files)}')
if len(files) < 2:
logger.debug(f'Found 1 file : {files[0].name}')
train_val_test_split_flag = True
logger.debug("Setting train_val_test_split_flag to True")
if train_val_test_split_flag:
if files[0].stem.lower() == filename:
train_file = files[0].name
flair_mapping = ['text','label']
df, column_name_map = sel_column_label(files[0],
column_mapping,
flair_mapping)
logger.debug(f'[column_name_map] {column_name_map}')
train, valid, test = train_val_test_split(df, val_split_size)
path_to_save = Path(p.parent.parent/'interim')
save_csv(train, path_to_save, 'train')
save_csv(valid, path_to_save, 'valid')
save_csv(test, path_to_save, 'test')
return FlairDataset(data_folder=path_to_save,
column_name_map=column_name_map,
train_file='train.csv',
test_file='test.csv',
dev_file='valid.csv',
file_format='.csv',
delimiter=",",
train_data=train,
val_data=valid,
test_data=test)
else:
raise FileNotFoundError
else:
raise NotImplementedError
else:
pass
class FlairTagging():
def __init__(self, dataset: CSVClassificationCorpus = None):
super().__init__()
self.dataset = dataset
@property
def list_ner_tags():
'''
List all ner- pos models available in Flair Package'''
raise NotImplementedError
def __repr__(self, tokenizer=None):
if tokenizer is None:
text = self.train_data.text[0]
tokens = str(text).split(" ")
return f'Text: {text} Tokens: {tokens}'
def add_tags(self, model: Union[str, Path] = 'ner-fast',
tag_type: str = 'ner', col_name: str = 'text',
extract_tags: bool = False, return_score: float = False,
replace_missing_tags: bool =True, missing_tags_value: str = "NA",
replace_missing_score: bool = True,
missing_score_value: np.float = np.NaN):
test = self.dataset.train_data.reset_index(drop=True).loc[:10,:].copy()
logger.debug(f'Shape of the dataframe:{test.shape}')
text = test[col_name].values
if extract_tags:
if return_score:
corpus_text, corpus_cleaned_ner_tag, corpus_score = flair_tags(
text,
model,
tag_type,
extract_tags,
return_score)
df = pd.concat([test.reset_index(drop=True),
pd.Series(corpus_text, name='tokenize_text'),
| pd.Series(corpus_cleaned_ner_tag, name='tags') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 18:19:29 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn')
#con = pdblp.BCon(debug=True, port=8194, timeout=5000)
con = pdblp.BCon(debug=False, port=8194, timeout=6000)
con.start()
index_tickers = ['NYA Index', 'SPX Index', 'CCMP Index','NDX Index','CDAX Index' ,'DAX Index',
'ASX Index','UKX Index', 'TPX Index','NKY Index', 'SHCOMP Index' ,
'SZCOMP Index','XUTUM Index','XU100 Index', 'MEXBOL Index',
'IBOV Index', 'IMOEX Index' , 'JALSH Index']
from datetime import date
start = '20040101'
today = date.today().strftime('%Y%m%d')
firstday = '19991230'
prices_open = con.bdh(index_tickers, 'PX OPEN',firstday, today)
prices_open.columns = [i[0] for i in prices_open.columns]
prices_open = prices_open[index_tickers]
prices_open_int = prices_open.interpolate(method='linear')
prices_open_w = prices_open_int.groupby(pd.Grouper(freq='W')).first()
prices_high = con.bdh(index_tickers, 'PX HIGH',firstday, today)
prices_high.columns = [i[0] for i in prices_high.columns]
prices_high = prices_high[index_tickers]
prices_high_int = prices_high.interpolate(method='linear')
prices_high_w = prices_high_int.groupby(pd.Grouper(freq='W')).max()
prices_low = con.bdh(index_tickers, 'PX LOW',firstday, today)
prices_low.columns = [i[0] for i in prices_low.columns]
prices_low = prices_low[index_tickers]
prices_low_int = prices_low.interpolate(method='linear')
prices_low_w = prices_low_int.groupby(pd.Grouper(freq='W')).min()
prices_close = con.bdh(index_tickers, 'PX LAST',firstday, today)
prices_close.columns = [i[0] for i in prices_close.columns]
prices_close = prices_close[index_tickers]
prices_close_int = prices_close.interpolate(method='linear')
prices_close_w = prices_close_int.groupby(pd.Grouper(freq='W')).last()
var_no1 = '21-1'
returns_open = prices_open_w / prices_close_w.shift(1) - 1
returns_open.columns = [var_no1+'_'+i+'_OPEN' for i in returns_open.columns]
returns_high = prices_high_w / prices_close_w.shift(1) - 1
returns_high.columns = [var_no1+'_'+i+'_HIGH' for i in returns_high.columns]
returns_low = prices_low_w / prices_close_w.shift(1) - 1
returns_low.columns = [var_no1+'_'+i+'_LOW' for i in returns_low.columns]
returns_close = prices_close_w / prices_close_w.shift(1) - 1
returns_close.columns = [var_no1+'_'+i+'_LAST' for i in returns_close.columns]
returns_fromClose_ohlc = | pd.concat([returns_open, returns_high, returns_low, returns_close],axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import warnings
def _ensure_axes(ax, enforce):
if ax is None:
# Get axes without creating new one.
fig = plt.gcf()
if fig.axes:
ax = plt.gca()
if isinstance(ax, mpl.axes.Axes) and ax.name == "polar":
return ax
else:
if enforce:
ax = plt.gca(polar=True)
return ax
else:
msg = ("Axes must use polar projection. Use one of the following "
"statements to ensure polar projection:\n"
" plt.gca(polar=True)\n"
" ax = plt.subplot(..., polar=True)\n"
" fig, ax = plt.subplots(subplot_kw={'polar': True})\n"
)
raise ValueError(msg)
def _format_input_data(x, y, hue, style, data):
fmt = "invalid"
if (y is None and data is None):
raise ValueError("Arguments y and data cannot both be None.")
if data is None:
# Array mode: only x (optionally) and y are provided.
if y is None:
msg = "In array mode (data=None), argument y must be set."
raise ValueError(msg)
if x is None:
if isinstance(y, (pd.Series, pd.DataFrame)):
x = y.index.copy()
else:
x = pd.RangeIndex(len(y))
else:
x = pd.Index(x)
data = | pd.DataFrame(y) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculations
import numpy as np
import os
import pandas as pd
import sys
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
class BeerexInputs(ModelSharedInputs):
"""
Input class for Beerex
"""
def __init__(self):
"""Class representing the inputs for Beerex"""
super(BeerexInputs, self).__init__()
#self.incorporation_depth = pd.Series([], dtype="float")
self.application_rate = pd.Series([], dtype="float")
self.application_method = pd.Series([], dtype="object")
self.crop_type = pd.Series([], dtype="object")
# self.application_units = pd.Series([], dtype="object")
self.empirical_residue = pd.Series([], dtype="object")
self.empirical_pollen = pd.Series([], dtype="float")
self.empirical_nectar = pd.Series([], dtype="float")
self.empirical_jelly = pd.Series([], dtype="float")
self.adult_contact_ld50 = pd.Series([], dtype="float")
self.adult_oral_ld50 = pd.Series([], dtype="float")
self.adult_oral_noael = pd.Series([], dtype="float")
self.larval_ld50 = pd.Series([], dtype="float")
self.larval_noael = pd.Series([], dtype="float")
self.log_kow = pd.Series([], dtype="float")
self.koc = pd.Series([], dtype="float")
self.mass_tree_vegetation = pd.Series([], dtype="float")
self.lw1_jelly = pd.Series([], dtype="float")
self.lw2_jelly = pd.Series([], dtype="float")
self.lw3_jelly = pd.Series([], dtype="float")
self.lw4_nectar = pd.Series([], dtype="float")
self.lw4_pollen = pd.Series([], dtype="float")
self.lw5_nectar = pd.Series([], dtype="float")
self.lw5_pollen = pd.Series([], dtype="float")
self.ld6_nectar = pd.Series([], dtype="float")
self.ld6_pollen = pd.Series([], dtype="float")
self.lq1_jelly = pd.Series([], dtype="float")
self.lq2_jelly = pd.Series([], dtype="float")
self.lq3_jelly = pd.Series([], dtype="float")
self.lq4_jelly = pd.Series([], dtype="float")
self.aw_cell_nectar = pd.Series([], dtype="float")
self.aw_cell_pollen = pd.Series([], dtype="float")
self.aw_brood_nectar = pd.Series([], dtype="float")
self.aw_brood_pollen = pd.Series([], dtype="float")
self.aw_comb_nectar = pd.Series([], dtype="float")
self.aw_comb_pollen = pd.Series([], dtype="float")
self.aw_fpollen_nectar = pd.Series([], dtype="float")
self.aw_fpollen_pollen = pd.Series([], dtype="float")
self.aw_fnectar_nectar = pd.Series([], dtype="float")
self.aw_fnectar_pollen = pd.Series([], dtype="float")
self.aw_winter_nectar = pd.Series([], dtype="float")
self.aw_winter_pollen = pd.Series([], dtype="float")
self.ad_nectar = pd.Series([], dtype="float")
self.ad_pollen = pd.Series([], dtype="float")
self.aq_jelly = pd.Series([], dtype="float")
class BeerexOutputs(object):
"""
Output class for Beerex
"""
def __init__(self):
"""Class representing the outputs for Beerex"""
super(BeerexOutputs, self).__init__()
self.out_eec_spray = pd.Series(name="out_eec_spray", dtype="float")
self.out_eec_soil = pd.Series(name="out_eec_soil", dtype="float")
self.out_eec_seed = pd.Series(name="out_eec_seed", dtype="float")
self.out_eec_tree = pd.Series(name="out_eec_tree", dtype="float")
self.out_eec = pd.Series(name="out_eec", dtype="float")
self.out_lw1_total_dose = pd.Series(name="out_lw1_total_dose", dtype="float")
self.out_lw2_total_dose = pd.Series(name="out_lw2_total_dose", dtype="float")
self.out_lw3_total_dose = pd.Series(name="out_lw3_total_dose", dtype="float")
self.out_lw4_total_dose = pd.Series(name="out_lw4_total_dose", dtype="float")
self.out_lw5_total_dose = pd.Series(name="out_lw5_total_dose", dtype="float")
self.out_ld6_total_dose = pd.Series(name="out_ld6_total_dose", dtype="float")
self.out_lq1_total_dose = pd.Series(name="out_lq1_total_dose", dtype="float")
self.out_lq2_total_dose = pd.Series(name="out_lq2_total_dose", dtype="float")
self.out_lq3_total_dose = pd.Series(name="out_lq3_total_dose", dtype="float")
self.out_lq4_total_dose = pd.Series(name="out_lq4_total_dose", dtype="float")
self.out_aw_cell_total_dose = pd.Series(name="out_aw_cell_total_dose", dtype="float")
self.out_aw_brood_total_dose = pd.Series(name="out_aw_brood_total_dose", dtype="float")
self.out_aw_comb_total_dose = pd.Series(name="out_aw_comb_total_dose", dtype="float")
self.out_aw_pollen_total_dose = pd.Series(name="out_aw_pollen_total_dose", dtype="float")
self.out_aw_nectar_total_dose = pd.Series(name="out_aw_nectar_total_dose", dtype="float")
self.out_aw_winter_total_dose = pd.Series(name="out_aw_winter_total_dose", dtype="float")
self.out_ad_total_dose = pd.Series(name="out_ad_total_dose", dtype="float")
self.out_aq_total_dose = pd.Series(name="out_aq_total_dose", dtype="float")
self.out_lw1_acute_rq = pd.Series(name="out_lw1_acute_rq", dtype="float")
self.out_lw2_acute_rq = pd.Series(name="out_lw2_acute_rq", dtype="float")
self.out_lw3_acute_rq = | pd.Series(name="out_lw3_acute_rq", dtype="float") | pandas.Series |
#!/usr/bin/env python
import argparse
import pandas as pd
import numpy as np
from math import floor
import tqdm
def main():
parser = argparse.ArgumentParser(description='Pairwise distances between MLST alleles')
parser.add_argument('infile', type=str, help="Tab separated file containing alleles")
parser.add_argument('outfile', type=str, help="Name for output file")
args = parser.parse_args()
alleles_in = | pd.read_csv(args.infile, sep="\t", header=0, index_col=0, dtype=str) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
from evidently import ColumnMapping
from evidently.analyzers.data_quality_analyzer import DataQualityAnalyzer
from evidently.analyzers.data_quality_analyzer import FeatureQualityStats
from evidently.analyzers.utils import process_columns
import pytest
@pytest.mark.parametrize(
"dataset, expected_metrics",
[
(
pd.DataFrame({"numerical_feature": []}),
FeatureQualityStats(
feature_type="num",
count=0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
infinite_count=None,
infinite_percentage=None,
max=None,
min=None,
mean=None,
missing_count=None,
missing_percentage=None,
most_common_value=None,
most_common_value_percentage=None,
std=None,
unique_count=None,
unique_percentage=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"numerical_feature": [np.nan, np.nan, np.nan, np.nan]}),
FeatureQualityStats(
feature_type="num",
count=0,
percentile_25=np.nan,
percentile_50=np.nan,
percentile_75=np.nan,
infinite_count=0,
infinite_percentage=0,
max=np.nan,
min=np.nan,
mean=np.nan,
missing_count=4,
missing_percentage=100,
most_common_value=np.nan,
most_common_value_percentage=100,
std=np.nan,
unique_count=0,
unique_percentage=0,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"numerical_feature": [np.nan, 2, 2, 432]}),
FeatureQualityStats(
feature_type="num",
count=3,
infinite_count=0,
infinite_percentage=0.0,
missing_count=1,
missing_percentage=25,
unique_count=2,
unique_percentage=50,
percentile_25=2.0,
percentile_50=2.0,
percentile_75=217.0,
max=432.0,
min=2.0,
mean=145.33,
most_common_value=2,
most_common_value_percentage=50,
std=248.26,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
],
)
def test_data_profile_analyzer_num_features(dataset: pd.DataFrame, expected_metrics: FeatureQualityStats) -> None:
data_profile_analyzer = DataQualityAnalyzer()
data_mapping = ColumnMapping(
numerical_features=["numerical_feature"],
)
result = data_profile_analyzer.calculate(dataset, None, data_mapping)
assert result.reference_features_stats is not None
assert result.reference_features_stats.num_features_stats is not None
assert "numerical_feature" in result.reference_features_stats.num_features_stats
metrics = result.reference_features_stats.num_features_stats["numerical_feature"]
assert metrics == expected_metrics
@pytest.mark.parametrize(
"dataset, expected_metrics",
[
(
pd.DataFrame({"category_feature": []}),
FeatureQualityStats(
feature_type="cat",
count=0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
infinite_count=None,
infinite_percentage=None,
max=None,
min=None,
mean=None,
missing_count=None,
missing_percentage=None,
most_common_value=None,
most_common_value_percentage=None,
std=None,
unique_count=None,
unique_percentage=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"category_feature": [None, None, None, None]}),
FeatureQualityStats(
feature_type="cat",
count=0,
infinite_count=None,
infinite_percentage=None,
missing_count=4,
missing_percentage=100.0,
unique_count=0,
unique_percentage=0.0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value=np.nan,
most_common_value_percentage=100.0,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
new_in_current_values_count=None,
unused_in_current_values_count=None,
),
),
(
pd.DataFrame({"category_feature": [np.nan, 2, 2, 1]}),
FeatureQualityStats(
feature_type="cat",
count=3,
infinite_count=None,
infinite_percentage=None,
missing_count=1,
missing_percentage=25,
unique_count=2,
unique_percentage=50,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value=2,
most_common_value_percentage=50,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"category_feature": ["y", "n", "n/a", "n"]}),
FeatureQualityStats(
feature_type="cat",
count=4,
infinite_count=None,
infinite_percentage=None,
missing_count=0,
missing_percentage=0,
unique_count=3,
unique_percentage=75,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value="n",
most_common_value_percentage=50,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"category_feature": ["n", "d", "p", "n"]}),
FeatureQualityStats(
feature_type="cat",
count=4,
infinite_count=None,
infinite_percentage=None,
missing_count=0,
missing_percentage=0,
unique_count=3,
unique_percentage=75,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value="n",
most_common_value_percentage=50,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
],
)
def test_data_profile_analyzer_cat_features(dataset: pd.DataFrame, expected_metrics: FeatureQualityStats) -> None:
data_profile_analyzer = DataQualityAnalyzer()
for task_type in (None, "regression", "classification"):
result = data_profile_analyzer.calculate(
dataset, None, ColumnMapping(categorical_features=["category_feature"], task=task_type)
)
assert result.reference_features_stats is not None
assert result.reference_features_stats.cat_features_stats is not None
assert "category_feature" in result.reference_features_stats.cat_features_stats
metrics = result.reference_features_stats.cat_features_stats["category_feature"]
assert metrics == expected_metrics
def test_data_profile_analyzer_classification_with_target() -> None:
reference_data = pd.DataFrame(
{
"target": ["cat_1", "cat_1", "cat_2", "cat_3", "cat_1"],
"prediction": ["cat_2", "cat_1", "cat_1", "cat_3", "cat_1"],
}
)
current_data = pd.DataFrame(
{
"target": ["cat_1", "cat_6", "cat_2", None, "cat_1"],
"prediction": ["cat_5", "cat_1", "cat_1", "cat_3", np.nan],
}
)
data_profile_analyzer = DataQualityAnalyzer()
data_mapping = ColumnMapping(task="classification")
result = data_profile_analyzer.calculate(reference_data, current_data, data_mapping)
assert result.reference_features_stats is not None
assert result.reference_features_stats.target_stats is not None
assert result.reference_features_stats.target_stats["target"] == FeatureQualityStats(
feature_type="cat",
count=5,
infinite_count=None,
infinite_percentage=None,
missing_count=0,
missing_percentage=0.0,
unique_count=3,
unique_percentage=60.0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value="cat_1",
most_common_value_percentage=60.0,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
new_in_current_values_count=None,
unused_in_current_values_count=None,
)
assert result.current_features_stats is not None
assert result.current_features_stats.target_stats is not None
assert result.current_features_stats.target_stats["target"] == FeatureQualityStats(
feature_type="cat",
count=4,
infinite_count=None,
infinite_percentage=None,
missing_count=1,
missing_percentage=20.0,
unique_count=3,
unique_percentage=60.0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value="cat_1",
most_common_value_percentage=40.0,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
new_in_current_values_count=2,
unused_in_current_values_count=1,
)
@pytest.mark.parametrize(
"reference_dataset, current_dataset, expected_new, expected_unused",
[
(
pd.DataFrame({"category_feature": ["", "a", "b"]}),
pd.DataFrame({"category_feature": ["a", "b"]}),
0,
1,
),
(
pd.DataFrame({"category_feature": [np.nan, 2, 2, 43]}),
pd.DataFrame({"category_feature": [6, 2, 5, np.nan]}),
2,
1,
),
(
pd.DataFrame({"category_feature": [1, 2, 3, 4]}),
pd.DataFrame({"category_feature": [6, 2, 5, np.nan]}),
3,
3,
),
(
pd.DataFrame({"category_feature": ["a", "b", "c", "d"]}),
pd.DataFrame({"category_feature": ["a", "a", "a"]}),
0,
3,
),
(
pd.DataFrame({"category_feature": [np.nan, np.nan, np.nan, np.nan]}),
pd.DataFrame({"category_feature": ["a", "a", "a"]}),
1,
1,
),
(
pd.DataFrame({"category_feature": [1, 2, 3, np.nan]}),
| pd.DataFrame({"category_feature": [np.nan, np.nan, np.nan]}) | pandas.DataFrame |
# python 2/3 compatibility
from __future__ import division, print_function
import sys
import os.path
import numpy
import pandas
import copy
import difflib
import scipy
import collections
import json
# package imports
import rba
from .rba import RbaModel, ConstraintMatrix, Solver
from .rba_SimulationData import RBA_SimulationData
from .rba_SimulationParameters import RBA_SimulationParameters
from .rba_ModelStructure import RBA_ModelStructure
from .rba_Problem import RBA_Problem
from .rba_Matrix import RBA_Matrix
from .rba_LP import RBA_LP
from .rba_FBA import RBA_FBA
from .rba_LogBook import RBA_LogBook
class RBA_Session(object):
"""
Top level of the RBA API.
Attributes
----------
xml_dir : str
Current Growth rate as numeric value
model : rba.RbaModel
Current Growth rate as numeric value
matrices : rba.ConstraintMatrix
Current Growth rate as numeric value
solver : rba.Solver
Current Growth rate as numeric value
Problem : rbatools.RBA_Problem
Current Growth rate as numeric value
Medium : dict
Current Growth rate as numeric value
ModelStructure : rbatools.RBA_ModelStructure
Current Growth rate as numeric value
Results : dict
Current Growth rate as numeric value
Parameters : dict
Current Growth rate as numeric value
SimulationData : rbatools.RBA_SimulationData
Current Growth rate as numeric value
SimulationParameters : rbatools.RBA_SimulationParameters
Current Growth rate as numeric value
Methods
----------
__init__(xml_dir)
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
rebuild_from_model()
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
reloadModel()
Reloads model from xml-files and then rebuild computational model-representation (matrix).
recordResults(runName)
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
recordParameters(runName)
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
clearResults()
Removes all previosly recorded results and deletes own 'Results'-attribute.
clearParameters()
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
writeResults(session_name='', digits=10)
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
returnExchangeFluxes()
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
ConstraintSaturation(constraints=None)
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
setMedium(changes)
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
setMu(Mu)
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
doSolve(runName='DontSave')
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
findMaxGrowthRate(precision=0.0005, max=4, start_value=None, recording=False)
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
knockOut(gene)
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
FeasibleRange(variables=None)
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
ParetoFront(variable_X, variable_Y, N=10, sign_VY='max')
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
def __init__(self, xml_dir):
"""
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
"""
self.xml_dir = xml_dir
self.LogBook = RBA_LogBook('Controler')
if not hasattr(self, 'ModelStructure'):
if os.path.isfile(str(self.xml_dir+'/ModelStructure.json')):
self.ModelStructure = RBA_ModelStructure()
with open(str(self.xml_dir+'/ModelStructure.json'), 'r') as myfile:
data = myfile.read()
self.ModelStructure.fromJSON(inputString=data)
else:
self.build_ModelStructure()
self.model = RbaModel.from_xml(input_dir=xml_dir)
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.LogBook.addEntry('Model loaded from {}.'.format(self.xml_dir))
self.Problem = RBA_Problem(solver=self.solver)
medium = pandas.read_csv(xml_dir+'/medium.tsv', sep='\t')
self.Medium = dict(zip(list(medium.iloc[:, 0]), [float(i)
for i in list(medium.iloc[:, 1])]))
self.Mu = self.Problem.Mu
self.ExchangeMap = buildExchangeMap(self)
def build_ModelStructure(self):
self.ModelStructure = RBA_ModelStructure()
self.ModelStructure.fromFiles(xml_dir=self.xml_dir)
self.ModelStructure.exportJSON(path=self.xml_dir)
def addExchangeReactions(self):
"""
Adds explicit exchange-reactions of boundary-metabolites to RBA-problem, named R_EX_ followed by metabolite name (without M_ prefix).
"""
Mets_external = [m.id for m in self.model.metabolism.species if m.boundary_condition]
Mets_internal = [m.id for m in self.model.metabolism.species if not m.boundary_condition]
Reactions = [r.id for r in self.model.metabolism.reactions]
full_S = rba.core.metabolism.build_S(
Mets_external+Mets_internal, self.model.metabolism.reactions)
S_M_ext = full_S[:len(Mets_external), ].toarray()
col_indices_toremove = []
for i in range(S_M_ext.shape[1]):
s_col_uniques = list(set(list(S_M_ext[:, i])))
if len(s_col_uniques) == 1:
if s_col_uniques[0] == 0:
col_indices_toremove.append(i)
RemainingReactions = [i for i in Reactions if Reactions.index(
i) not in col_indices_toremove]
S_ext = numpy.delete(S_M_ext, col_indices_toremove, axis=1)
A = numpy.concatenate((S_ext, numpy.eye(len(Mets_external))), axis=1, out=None)
ColNames = RemainingReactions+[str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]
# print(str('R_EX_'+i.split('M_')[-1]))
LBs = list([self.Problem.LP.LB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[-10000]*len(Mets_external))
UBs = list([self.Problem.LP.UB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[10000]*len(Mets_external))
b = [0]*len(Mets_external)
f = list([self.Problem.LP.f[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[0]*len(Mets_external))
ExchangeMatrix = RBA_Matrix()
ExchangeMatrix.A = scipy.sparse.coo_matrix(A)
ExchangeMatrix.b = numpy.array([0]*len(Mets_external))
ExchangeMatrix.f = numpy.array(f)
ExchangeMatrix.LB = numpy.array(LBs)
ExchangeMatrix.UB = numpy.array(UBs)
ExchangeMatrix.row_signs = ['E']*len(Mets_external)
ExchangeMatrix.row_names = Mets_external
ExchangeMatrix.col_names = ColNames
ExchangeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=ExchangeMatrix)
self.ExchangeReactionMap = dict(
zip(Mets_external, [str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]))
def rebuild_from_model(self):
"""
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
"""
self.LogBook.addEntry('Model rebuilt.')
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.Problem = RBA_Problem(solver=self.solver)
self.setMedium(changes=self.Medium)
def reloadModel(self):
"""
Reloads model from xml-files and then rebuild computational model-representation (matrix).
"""
self.LogBook.addEntry('Model reloaded from {}.'.format(self.xml_dir))
self.model = RbaModel.from_xml(input_dir=self.xml_dir)
self.rebuild_from_model()
def recordResults(self, runName):
"""
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Solution recorded under {}.'.format(runName))
if not hasattr(self, 'Results'):
self.Results = {'Reactions': pandas.DataFrame(index=list(self.ModelStructure.ReactionInfo.Elements.keys())),
'Enzymes': pandas.DataFrame(index=list(self.ModelStructure.EnzymeInfo.Elements.keys())),
'Processes': pandas.DataFrame(index=[self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery' for i in self.ModelStructure.ProcessInfo.Elements.keys()]),
'Proteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinMatrix['Proteins'])),
'ProtoProteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinGeneMatrix['ProtoProteins'])),
'Constraints': pandas.DataFrame(index=self.Problem.LP.row_names),
'SolutionType': pandas.DataFrame(index=['SolutionType']),
'Mu': pandas.DataFrame(index=['Mu']),
'ObjectiveFunction': pandas.DataFrame(index=self.Problem.LP.col_names),
'ObjectiveValue': pandas.DataFrame(index=['ObjectiveValue']),
'ExchangeFluxes': pandas.DataFrame(index=list(self.ExchangeMap.keys()))}
Exchanges = self.returnExchangeFluxes()
for i in Exchanges.keys():
self.Results['ExchangeFluxes'].loc[i, runName] = Exchanges[i]
self.Results['Reactions'][runName] = self.Results['Reactions'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Reactions'].index)})
self.Results['Enzymes'][runName] = self.Results['Enzymes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Enzymes'].index)})
self.Results['Processes'][runName] = self.Results['Processes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Processes'].index)})
self.Results['Constraints'][runName] = self.Results['Constraints'].index.map(
{i: self.Problem.DualValues[i] for i in self.Problem.LP.row_names})
self.Results['Proteins'][runName] = self.Results['Proteins'].index.map(
ProteomeRecording(self, runName))
self.Results['ProtoProteins'][runName] = self.Results['ProtoProteins'].index.map(
ProtoProteomeRecording(self, runName, self.Results['Proteins']))
self.Results['SolutionType'][runName] = self.Problem.SolutionType
self.Results['Mu'][runName] = self.Problem.Mu
self.Results['ObjectiveFunction'][runName] = list(self.Problem.getObjective().values())
self.Results['ObjectiveValue'][runName] = self.Problem.ObjectiveValue
def recordParameters(self, runName):
"""
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Coefficients recorded under {}.'.format(runName))
EnzymeCapacities = self.get_parameter_values(
parameter_type='enzyme_efficiencies', species=None, output_format='dict')
ProcessCapacities = self.get_parameter_values(
parameter_type='machine_efficiencies', species=None, output_format='dict')
CompartmentCapacities = self.get_parameter_values(
parameter_type='maximal_densities', species=None, output_format='dict')
TargetValues = self.get_parameter_values(
parameter_type='target_values', species=None, output_format='dict')
if not hasattr(self, 'Parameters'):
self.Parameters = {'EnzymeEfficiencies_FW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'EnzymeEfficiencies_BW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'ProcessEfficiencies': pandas.DataFrame(index=list(ProcessCapacities.keys())),
'CompartmentCapacities': pandas.DataFrame(index=list(CompartmentCapacities.keys())),
'Medium': pandas.DataFrame(index=self.Medium.keys()),
'TargetValues': pandas.DataFrame(index=[TargetValues[i]['Target_id'] for i in list(TargetValues.keys())])}
self.Parameters['EnzymeEfficiencies_FW'][runName] = self.Parameters['EnzymeEfficiencies_FW'].index.map({i: list(
EnzymeCapacities[i]['Forward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['EnzymeEfficiencies_BW'][runName] = self.Parameters['EnzymeEfficiencies_BW'].index.map({i: list(
EnzymeCapacities[i]['Backward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['ProcessEfficiencies'][runName] = self.Parameters['ProcessEfficiencies'].index.map(
{i: list(ProcessCapacities[i].values())[0] for i in list(ProcessCapacities.keys()) if len(list(ProcessCapacities[i].values())) > 0})
self.Parameters['CompartmentCapacities'][runName] = self.Parameters['CompartmentCapacities'].index.map(
{i: list(CompartmentCapacities[i].values())[0] for i in list(CompartmentCapacities.keys()) if len(list(CompartmentCapacities[i].values())) > 0})
self.Parameters['Medium'][runName] = self.Parameters['Medium'].index.map(self.Medium)
self.Parameters['TargetValues'][runName] = self.Parameters['TargetValues'].index.map(
{TargetValues[i]['Target_id']: list(TargetValues[i]['Target_value'].values())[0] for i in list(TargetValues.keys()) if len(list(TargetValues[i]['Target_value'].values())) > 0})
def clearResults(self):
"""
Removes all previosly recorded results and deletes own 'Results'-attribute.
"""
self.LogBook.addEntry('Results cleared.')
delattr(self, 'Results')
def clearParameters(self):
"""
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
"""
self.LogBook.addEntry('Parameters cleared.')
delattr(self, 'Parameters')
def writeResults(self, session_name='', digits=5, loggingIntermediateSteps=False):
"""
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
"""
self.LogBook.addEntry('Data written under {}.'.format(session_name))
if hasattr(self, 'Results'):
self.Results['uniqueReactions'] = mapIsoReactions(Controller=self)
self.Results['SolutionType'] = self.Results['SolutionType']
self.Results['Mu'] = self.Results['Mu'].round(digits)
self.Results['ObjectiveFunction'] = self.Results['ObjectiveFunction'].loc[(
self.Results['ObjectiveFunction'] != 0).any(axis=1)].round(digits)
self.Results['ObjectiveValue'] = self.Results['ObjectiveValue'].round(digits)
self.Results['Proteins'] = self.Results['Proteins'].round(digits)
self.Results['uniqueReactions'] = self.Results['uniqueReactions'].round(digits)
self.Results['Reactions'] = self.Results['Reactions'].round(digits)
self.Results['Enzymes'] = self.Results['Enzymes'].round(digits)
self.Results['Processes'] = self.Results['Processes'].round(digits)
self.Results['Constraints'] = self.Results['Constraints'].round(digits)
self.Results['ExchangeFluxes'] = self.Results['ExchangeFluxes'].round(digits)
self.SimulationData = RBA_SimulationData(StaticData=self.ModelStructure)
self.SimulationData.fromSimulationResults(Controller=self, session_name=session_name)
if hasattr(self, 'Parameters'):
self.Parameters['EnzymeEfficiencies_FW'] = self.Parameters['EnzymeEfficiencies_FW'].round(
digits)
self.Parameters['EnzymeEfficiencies_BW'] = self.Parameters['EnzymeEfficiencies_BW'].round(
digits)
self.Parameters['ProcessEfficiencies'] = self.Parameters['ProcessEfficiencies'].round(
digits)
self.Parameters['CompartmentCapacities'] = self.Parameters['CompartmentCapacities'].round(
digits)
self.Parameters['TargetValues'] = self.Parameters['TargetValues'].round(digits)
self.Parameters['Medium'] = self.Parameters['Medium'].loc[(
self.Parameters['Medium'] != 0).any(axis=1)].round(digits)
self.SimulationParameters = RBA_SimulationParameters(StaticData=self.ModelStructure)
self.SimulationParameters.fromSimulationResults(Controller=self)
def returnExchangeFluxes(self):
"""
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
"""
out = {}
for j in self.ExchangeMap.keys():
netflux = 0
for k in self.ExchangeMap[j].keys():
netflux += self.ExchangeMap[j][k]*self.Problem.SolutionValues[k]
if netflux != 0:
out[j] = netflux
return(out)
def ConstraintSaturation(self, constraints=None):
"""
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
"""
if constraints is None:
ConstraintsInQuestion = self.Problem.LP.row_names
else:
if isinstance(constraints, list):
ConstraintsInQuestion = constraints
elif isinstance(constraints, str):
ConstraintsInQuestion = [constraints]
if len(list(constraints)) > 0:
if isinstance(constraints[0], list):
ConstraintsInQuestion = constraints[0]
if isinstance(constraints[0], str):
ConstraintsInQuestion = [constraints[0]]
if len(list(constraints)) == 0:
ConstraintsInQuestion = self.Problem.LP.row_names
rhs = self.Problem.getRighthandSideValue(ConstraintsInQuestion)
lhs = self.Problem.calculateLefthandSideValue(ConstraintsInQuestion)
RHS = list(rhs.values())
LHS = list(lhs.values())
Out = pandas.DataFrame(columns=['LHS', 'RHS', 'Saturation'], index=ConstraintsInQuestion)
for i in ConstraintsInQuestion:
lhval = LHS[self.Problem.LP.rowIndicesMap[i]]
rhval = RHS[self.Problem.LP.rowIndicesMap[i]]
sat = numpy.nan
if rhval != 0:
sat = lhval/rhval
Out.loc[i, 'LHS'] = lhval
Out.loc[i, 'RHS'] = rhval
Out.loc[i, 'Saturation'] = sat
self.LogBook.addEntry(
'Saturation of constraint {} determined to be {}.'.format(i, sat))
return(Out)
def setMedium(self, changes, loggingIntermediateSteps=False):
"""
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
"""
for species in (changes.keys()):
self.Medium[species] = float(changes[species])
self.Problem.ClassicRBAmatrix.set_medium(self.Medium)
self.Problem.ClassicRBAmatrix.build_matrices(self.Mu)
inputMatrix = RBA_Matrix()
inputMatrix.loadMatrix(matrix=self.Problem.ClassicRBAmatrix)
self.Problem.LP.updateMatrix(matrix=inputMatrix, Ainds=MediumDependentCoefficients_A(
self), Binds=[], CTinds=[], LBinds=None, UBinds=None)
def setMu(self, Mu, loggingIntermediateSteps=False):
"""
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
"""
self.LogBook.addEntry('Growth-rate changed:{} --> {}'.format(self.Mu, float(Mu)))
self.Problem.setMu(Mu=float(Mu), ModelStructure=self.ModelStructure,
logging=loggingIntermediateSteps)
self.Mu = float(Mu)
def doSolve(self, runName='DontSave', loggingIntermediateSteps=False):
"""
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
"""
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
if runName is not 'DontSave':
if runName is 'Auto':
if hasattr(self, 'Results'):
name = str(self.Results['Reactions'].shape[1]+1)
if not hasattr(self, 'Results'):
name = '1'
if runName is not 'Auto':
name = runName
self.recordResults(runName=name)
def findMaxGrowthRate(self, precision=0.0005, max=4, start_value=None, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
"""
minMu = 0
maxMu = max
if start_value is None:
testMu = minMu
else:
testMu = start_value
iteration = 0
while (maxMu - minMu) > precision:
self.setMu(Mu=testMu)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
self.recordResults('DichotomyMu_iteration_'+str(iteration))
minMu = testMu
else:
maxMu = testMu
testMu = numpy.mean([maxMu, minMu])
self.LogBook.addEntry('Maximal growth-rate found to be: {}.'.format(minMu))
if minMu == max:
print('Warning: Maximum growth rate might exceed specified range. Try rerunning this method with larger max-argument.')
self.setMu(Mu=minMu)
self.Problem.solveLP(logging=False)
self.Problem.SolutionType = 'GrowthRate_maximization'
return(minMu)
def knockOut(self, gene, loggingIntermediateSteps=False):
"""
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
"""
if type(gene) is str:
genes = [gene]
if type(gene) is list:
genes = gene
isoform_genes = [g for g in genes if g in list(self.ModelStructure.ProteinInfo.Elements.keys(
))]+[i for g in genes for i in self.ModelStructure.ProteinInfo.Elements.keys() if self.ModelStructure.ProteinInfo.Elements[i]['ProtoID'] == g]
for g in isoform_genes:
self.LogBook.addEntry('Gene {} knocked out.'.format(g))
ConsumersEnzymes = self.ModelStructure.ProteinInfo.Elements[g]['associatedEnzymes']
for i in ConsumersEnzymes:
LikeliestVarName = difflib.get_close_matches(i, self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
ConsumersProcess = self.ModelStructure.ProteinInfo.Elements[g]['SupportsProcess']
for i in ConsumersProcess:
LikeliestVarName = difflib.get_close_matches(
str(self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery'), self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
def FeasibleRange(self, variables=None, loggingIntermediateSteps=False):
"""
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
"""
if variables is None:
VariablesInQuestion = self.Problem.LP.col_names
else:
if isinstance(variables, list):
VariablesInQuestion = variables
elif isinstance(variables, str):
VariablesInQuestion = [variables]
out = {}
for i in VariablesInQuestion:
min = numpy.nan
max = numpy.nan
self.Problem.clearObjective(logging=loggingIntermediateSteps)
self.Problem.setObjectiveCoefficients(
inputDict={i: 1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
min = self.Problem.SolutionValues[i]
self.Problem.setObjectiveCoefficients(
inputDict={i: -1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = self.Problem.SolutionValues[i]
out.update({i: {'Min': min, 'Max': max}})
self.LogBook.addEntry(
'Feasible-range of {} determined to be between {} and {}.'.format(i, min, max))
return(out)
def ParetoFront(self, variable_X, variable_Y, N=10, sign_VY='max', loggingIntermediateSteps=False):
"""
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
if variable_X not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
if variable_Y not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
FR = self.FeasibleRange(variable_X)
cMin = FR[variable_X]['Min']
cMax = FR[variable_X]['Max']
concentrations = [float(cMin+(cMax-cMin)*i/N) for i in range(N+1)]
Out = pandas.DataFrame(columns=[variable_X, variable_Y])
oldLB = self.Problem.getLB(variable_X)
oldUB = self.Problem.getUB(variable_X)
iteration = -1
for conc in concentrations:
iteration += 1
self.Problem.setLB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.clearObjective(logging=loggingIntermediateSteps)
if sign_VY == 'max':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: -1}, logging=loggingIntermediateSteps)
if sign_VY == 'min':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: 1}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = abs(self.Problem.ObjectiveValue)
else:
max = numpy.nan
self.Problem.setLB(inputDict=oldLB, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict=oldUB, logging=loggingIntermediateSteps)
Out.loc[iteration, variable_X] = conc
Out.loc[iteration, variable_Y] = max
self.LogBook.addEntry(
'Pareto-front between {} and {} determined.'.format(variable_X, variable_Y))
return(Out)
### !!! Docstring ###
def buildFBA(self, type='classic', objective='classic', maintenanceToBM=False):
"""
Derives and constructs FBA-problem from RBA-problem and stores it under attribute 'FBA'.
Parameters
----------
type : str
objective : str
maintenanceToBM : boolean
"""
RBAproblem = self.Problem.LP
A = RBAproblem.A.toarray()
if type == 'classic':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith('R_') and not i.startswith('M_') and not i.endswith('_synthesis')]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if 'enzyme' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('M_')]
elif type == 'parsi':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith(
'R_') and not i.startswith('M_') and not i.endswith('_synthesis')]+[RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('R_') and not i.startswith('M_')]
if objective == 'classic':
if 'R_maintenance_atp' in RBAproblem.col_names:
Cols2remove.append(RBAproblem.col_names.index('R_maintenance_atp'))
Anew = numpy.delete(A, Cols2remove, axis=1)
col_namesNew = list(numpy.delete(RBAproblem.col_names, Cols2remove))
LBnew = numpy.delete(RBAproblem.LB, Cols2remove)
UBnew = numpy.delete(RBAproblem.UB, Cols2remove)
fNew = numpy.delete(RBAproblem.f, Cols2remove)
Anew2 = numpy.delete(Anew, Rows2remove, axis=0)
row_namesNew = list(numpy.delete(RBAproblem.row_names, Rows2remove))
row_signsNew = list(numpy.delete(RBAproblem.row_signs, Rows2remove))
bNew = numpy.delete(RBAproblem.b, Rows2remove)
trnaInds = [i for i in range(len(row_namesNew)) if row_namesNew[i].startswith(
'M_') and 'trna' in row_namesNew[i]]
# bNew[trnaInds] = 0
if objective == 'targets':
col_namesNew.append('R_BIOMASS_targetsRBA')
LBnew = numpy.append(LBnew, 0)
UBnew = numpy.append(UBnew, 10000)
fNew = numpy.append(fNew, 0)
BMrxnCol = numpy.ones((len(row_namesNew), 1))
BMrxnCol[:, 0] = bNew
if maintenanceToBM:
MaintenanceTarget = LBnew[col_namesNew.index('R_maintenance_atp')]
BMrxnCol[row_namesNew.index('M_atp_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h2o_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_adp_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_pi_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h_c')] -= MaintenanceTarget
LBnew[col_namesNew.index('R_maintenance_atp')] = 0
Anew2 = numpy.append(Anew2, -BMrxnCol, axis=1)
bNew = numpy.array([0]*Anew2.shape[0])
Matrix1 = RBA_Matrix()
Matrix1.A = scipy.sparse.coo_matrix(Anew2)
Matrix1.b = bNew
Matrix1.LB = LBnew
Matrix1.UB = UBnew
Matrix1.row_signs = row_signsNew
Matrix1.row_names = row_namesNew
Matrix1.col_names = col_namesNew
Matrix1.f = fNew
if type == 'classic':
Matrix1.b = numpy.array([0]*len(row_signsNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
elif type == 'parsi':
MetaboliteRows = {i: Matrix1.row_names.index(
i) for i in Matrix1.row_names if i.startswith('M_')}
EnzymeCols = {i: Matrix1.col_names.index(
i) for i in Matrix1.col_names if i.startswith('R_') and '_enzyme' in i}
Matrix2 = RBA_Matrix()
Matrix2.A = scipy.sparse.coo_matrix(numpy.zeros((len(MetaboliteRows), len(EnzymeCols))))
Matrix2.b = numpy.array(Matrix1.b[list(MetaboliteRows.values())])
Matrix2.LB = numpy.array(Matrix1.LB[list(EnzymeCols.values())])
Matrix2.UB = numpy.array(Matrix1.UB[list(EnzymeCols.values())])
Matrix2.f = numpy.array(Matrix1.f[list(EnzymeCols.values())])
Matrix2.row_signs = [Matrix1.row_signs[i] for i in list(MetaboliteRows.values())]
Matrix2.row_names = list(MetaboliteRows.keys())
Matrix2.col_names = list(EnzymeCols.keys())
Matrix2.mapIndices()
Matrix1.b = numpy.array([0]*len(bNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
LP1.updateMatrix(Matrix2)
self.FBA = RBA_FBA(LP1)
def findMinMediumConcentration(self, metabolite, precision=0.00001, max=100, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the minimal feasible concentration of
growth-substrate in medium, at a previously set growth-rate.
Parameters
----------
metabolite : str
ID of metabolite in medium.
precision : float
Numberic precision with which minimum is approximated.
Default : 0.00001
max : float
Defines the highest concentration rate to be screened for.
Default=100
recording : bool
Records intermediate feasible solutions
while approaching the minimum concentration.
Default : False
Returns
-------
minimum feasible growth-substrate concentration as float.
"""
minConc = 0.0
maxConc = max
testConc = minConc
iteration = 0
oldConc = self.Medium[metabolite]
while (maxConc - minConc) > precision:
self.setMedium(changes={metabolite: testConc})
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
run_name = 'Dichotomy_'+metabolite+'_' + \
str(testConc)+'_iteration_'+str(iteration)
self.recordResults(run_name)
maxConc = testConc
else:
minConc = testConc
testConc = numpy.mean([maxConc, minConc])
self.LogBook.addEntry(
'Minimal required {} concentration found to be: {}.'.format(metabolite, maxConc))
self.setMedium(changes={metabolite: oldConc})
return(maxConc)
def addProtein(self, input):
"""
Adds representation of individual proteins to problem.
Parameters
----------
input : dict or str
If input is str it has to be the ID of a protein in the model.
Then this protein is added to the problem an creates:
One constraint named Protein_'ID' (equality).
One variable named TotalLevel_'ID' representing the total amount.
One variable named Free_'ID'_'respectiveCompartment', this
represents the fraction of the protein not assuming any function.
It however consumes resources for synthesis (precursors and processes),
which are the same as defined in the model files.
And takes up space i the compartment as specified in the model-files
for the protein.
If input is dict it has to have two keys; 'ID' and 'UnusedProteinFraction'.
By specifying this input one can define that the unused franction of the protein
can also reside in other compartments and which processes it requires.
The value to 'ID' is the ID of a protein in the model.
The value to 'UnusedProteinFraction' is another dictionary.
This can have several keys which must be model-compartments.
For each of the keys the value is a dict holding IDs of model-processes as Keys
and process requirements as Values (numerical).
This specifies which processes each of the compartment-species of the protein
requires.
This generates the same constraint and TotalLevel-variable as with the simple input,
however a variable representing each of the compartment-species for the unused fraction
is added and incorporates the specific process requirements.
E.g: input = {'ID': 'proteinA',
'UnusedProteinFraction':{'Cytoplasm':{'Translation':100}, {'Folding':10}],
'Membrane':{'Translation':100}, {'Folding':20}, {'Secretion':100}
}
}
This adds 'proteinA' to the model, where the unused fraction can reside either in
the Cytoplasm or the Membrane. However while the cytosolic-species only requires the
processes 'Translation' and 'Folding'; the membrane-bound species also requires 'Secretion'
and occupies more folding capacity.
Then the constraint 'Protein_proteinA' is added and the 3 variables
'TotalLevel_proteinA', 'Free_proteinA_Cytoplasm' and 'Free_proteinA_Membrane'.
"""
if type(input) is str:
input = {'ID': input}
if 'ID' not in list(input.keys()):
print('Error, no protein ID provided')
return
if input['ID'] not in list(self.ModelStructure.ProteinInfo.Elements.keys()):
print('Error, protein not in model')
return
if 'UnusedProteinFraction' not in list(input.keys()):
input.update({'UnusedProteinFraction':
{self.ModelStructure.ProteinInfo.Elements[input['ID']]['Compartment']:
self.ModelStructure.ProteinInfo.Elements[input['ID']]['ProcessRequirements']}})
self.LogBook.addEntry('Protein {} added with specifications {}.'.format(
input['ID'], str(json.dumps(input))))
Muindexlist = []
## Building RBA_Matrix-object for new constraint-row, representing protein ##
UsedProtein = RBA_Matrix()
UsedProtein.A = scipy.sparse.coo_matrix(
buildUsedProteinConstraint(Controler=self, protein=input['ID']))
UsedProtein.b = numpy.array([float(0)])
UsedProtein.f = numpy.array(self.Problem.LP.f)
UsedProtein.LB = numpy.array(self.Problem.LP.LB)
UsedProtein.UB = numpy.array(self.Problem.LP.UB)
UsedProtein.row_signs = ['E']
UsedProtein.row_names = ['Protein_'+input['ID']]
UsedProtein.col_names = self.Problem.LP.col_names
## Add used protein row to problem ##
self.Problem.LP.addMatrix(matrix=UsedProtein)
## Add used protein row to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UsedProtein)
## Building RBA_Matrix-object for new variable-col, representing total level of protein ##
TotProtein = RBA_Matrix()
TotProtein.A = scipy.sparse.coo_matrix(numpy.array(numpy.matrix(
numpy.array([float(0)]*self.Problem.LP.A.shape[0]+[float(-1)])).transpose()))
TotProtein.f = numpy.array([float(0)])
TotProtein.LB = numpy.array([float(0)])
TotProtein.UB = numpy.array([float(100000.0)])
TotProtein.b = numpy.array(list(self.Problem.LP.b)+list(UsedProtein.b))
TotProtein.row_signs = self.Problem.LP.row_signs+UsedProtein.row_signs
TotProtein.row_names = self.Problem.LP.row_names+UsedProtein.row_names
TotProtein.col_names = ['TotalLevel_'+input['ID']]
## Add total protein col to problem ##
self.Problem.LP.addMatrix(matrix=TotProtein)
## Add total protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=TotProtein)
## Building RBA_Matrix-object for new variable-col,##
## representing each compartment-species of the protein ##
for comp_species in list(input['UnusedProteinFraction'].keys()):
## Initiate RBA_Matrix object##
UnusedProtein = RBA_Matrix()
UnusedProtein.col_names = ['Free_'+input['ID']+'_'+comp_species]
## Extract required processes for protein and the respective demand ##
ProcIDs = list(input['UnusedProteinFraction'][comp_species].keys())
Preq = list(input['UnusedProteinFraction'][comp_species].values())
ProcessCost = dict(
zip([self.ModelStructure.ProcessInfo.Elements[k]['ID'] for k in ProcIDs], Preq))
## Get required charged trna buildingblocks and their stoichiometry in protein ##
composition = self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAcomposition']
## Extract the composition of charged trnas in terms of metabolic species ##
species = self.ModelStructure.ProcessInfo.Elements['Translation']['Components']
## Determine required metabolites and their stoichiometry in protein ##
MetaboliteCost = buildCompositionofUnusedProtein(
species=species, composition=composition)
## Assemble process and metabolite requirements into stoichiometric coloumn vector ##
## And add to RBA_Matrix object ##
colToAdd = numpy.array(numpy.matrix(numpy.array(list(MetaboliteCost.values())+list(ProcessCost.values()) +
[float(1)]+[self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAnumber']])).transpose())
UnusedProtein.A = scipy.sparse.coo_matrix(colToAdd)
## Add other information to RBA_Matrix object ##
UnusedProtein.row_names = list(MetaboliteCost.keys())+[str(pc+'_capacity') for pc in list(
ProcessCost.keys())]+['Protein_'+input['ID']]+[str(comp_species + '_density')]
UnusedProtein.b = numpy.zeros(len(UnusedProtein.row_names))
UnusedProtein.row_signs = ['E']*len(UnusedProtein.row_names)
UnusedProtein.LB = numpy.array([float(0)])
UnusedProtein.UB = numpy.array([float(100000.0)])
UnusedProtein.f = numpy.array([float(0)])
self.ProteinDilutionIndices = list(
zip(list(MetaboliteCost.keys()), UnusedProtein.col_names*len(list(MetaboliteCost.keys()))))
## Add free protein col to problem ##
self.Problem.LP.addMatrix(matrix=UnusedProtein)
## Add free protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UnusedProtein)
## Find coefficients of unused protein column, subject to dilution (Metabolite and Process cost) ##
## And add them to MuDepIndices_A ##
nonZeroEntries = numpy.where(UnusedProtein.A != 0)[0]
self.Problem.MuDepIndices_A += [(UnusedProtein.row_names[i], UnusedProtein.col_names[0]) for i in nonZeroEntries if UnusedProtein.row_names[i]
!= 'Protein_'+input['ID'] and UnusedProtein.row_names[i] not in self.Problem.CompartmentDensities]
self.setMu(self.Problem.Mu)
## !!! ##
def eukaryoticDensities(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
Signs = ['L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L']
totalAA = 3.1*0.71
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*len(Compartments)+['E']
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
AlipidsA = numpy.zeros((7, len(Compartments)))
Alipids = RBA_Matrix()
Alipids.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs += ['E', 'E', 'E', 'E', 'E', 'E', 'E']
Alipids.b = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
Alipids.LB = numpy.array([float(0)]*len(Compartments))
Alipids.UB = numpy.array([float(1)]*len(Compartments))
Alipids.f = numpy.array([float(0)]*len(Compartments))
AlipidsA[Alipids.row_names.index('M_pc_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000883*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00005852*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mIM')] = -0.00003377*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00000873*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'),
Alipids.col_names.index('F_mIM')] = -0.00002*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000039*totalAA
AlipidsA[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = -0.008547*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'),
('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM')]
AlipidsA[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = -0.000636*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0004822*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mOM')] = -0.0001289*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000167*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'), Alipids.col_names.index(
'F_mOM')] = -0.00004467*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000696*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c',
'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')]
Alipids.A = scipy.sparse.coo_matrix(AlipidsA)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), (
'M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), (
'M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
## !!! ##
def eukaryoticDensities2(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.69
m_mIM = 1.11
m_mIMS = 0.7
m_mOM = 7.2
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments)+1)
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
ConstraintMatrix = numpy.zeros((7, 0))
Alipids = RBA_Matrix()
Alipids.col_names = []
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([])
Alipids.UB = numpy.array([])
Alipids.f = numpy.array([])
MudepIndices = []
for pc in self.ModelStructure.ProcessInfo.Elements.keys():
if self.ModelStructure.ProcessInfo.Elements[pc]['ID'] not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
# Alipids.LB = numpy.array(list(Alipids.LB).append(list(self.Problem.LP.LB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.UB = numpy.array(list(Alipids.UB).append(list(self.Problem.LP.UB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.f = numpy.array(list(Alipids.f).append(list(self.Problem.LP.f)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
for p in self.ModelStructure.ProcessInfo.Elements[pc]['Composition'].keys():
lE = sum(list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values(
)))*self.ModelStructure.ProcessInfo.Elements[pc]['Composition'][p]
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ergst_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
ConstraintMatrix = ConstraintMatrixNew
for e in self.ModelStructure.EnzymeInfo.Elements.keys():
if e not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(e)
# xnew = list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)]
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)])])
# Alipids.LB = numpy.array(list(Alipids.LB).append(xnew))
# Alipids.UB = numpy.array(list(Alipids.UB).append(
# list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)]))
# Alipids.f = numpy.array(list(Alipids.f).append(
# list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)]))
for p in self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'].keys():
lE = sum(
list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values()))
lE *= self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'][p]['StochFac']
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
MudepIndices += ('M_ergst_c', e)
ConstraintMatrix = ConstraintMatrixNew
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(ConstraintMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.LP.updateMatrix(MuOneMatrix, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities3(self, totalAA=3.1, VolumeFraction=False, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
# A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
# A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
OccupationMatrix = RBA_Matrix()
# A = numpy.ones((len(Compartments)+1, len(Compartments)))
A = -numpy.eye(len(Compartments))
# Eye = -numpy.eye(len(Compartments))
# A[0:len(Compartments), :] = Eye
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
# OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1)+[float(totalAA)])
OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1))
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
# OccupationMatrix.row_signs = ['E']*(len(Compartments))+['L']
OccupationMatrix.row_signs = ['E']*(len(Compartments))
# OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
# 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalProtein']
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
CompartmentMatrix = RBA_Matrix()
if VolumeFraction:
A = numpy.eye(len(Compartments))*5/float(totalAA)
else:
A = numpy.eye(len(Compartments))/float(totalAA)
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments))
# CompartmentMatrix.row_signs = ['E']*(len(Compartments))
CompartmentMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume',
'm_volume', 'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume']
CompartmentMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
CompartmentMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
VolumeMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
# A[len(Compartments), [1, 5, 6, 8, 9]] = 0
# A[len(Compartments), 8] = 0
VolumeMatrix.A = scipy.sparse.coo_matrix(A)
VolumeMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
VolumeMatrix.f = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.LB = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.UB = numpy.array([float(1)]*len(Compartments))
VolumeMatrix.row_signs = ['L']*(len(Compartments))+['E']
# VolumeMatrix.row_signs = ['E']*(len(Compartments))+['E']
VolumeMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume', 'm_volume',
'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume', 'TotalVolume']
VolumeMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
if not CompartmentRelationships:
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
VolumeMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
VolumeMatrix.row_signs += ['E', 'E', 'E']
VolumeMatrix.b = numpy.array(list(VolumeMatrix.b)+[float(0)]*3)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_mOM')] = -m_mOM
VolumeMatrix.A = scipy.sparse.coo_matrix(Anew)
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
PC_vM = 0.0003635
PE_vM = 0.4156
PI_vM = 0.0001297
PS_vM = 0.00003435
CL_vM = 0.0000068
PA_vM = 0.0000186
ES_vM = 0.0142
PC_n = 0.000055
PE_n = 0.000035
PI_n = 0.000017
PS_n = 0.0000072
CL_n = 0.0
PA_n = 0.0000031
ES_n = 0.0086
PC_gM = 0.00043
PE_gM = 0.00044
PI_gM = 0.00041
PS_gM = 0.0
CL_gM = 0.00022
PA_gM = 0.0
ES_gM = 0.0
PC_n = 0.0
PE_n = 0.0
PI_n = 0.0
PS_n = 0.0
CL_n = 0.0
PA_n = 0.0
ES_n = 0.0
PC_gM = 0.0
PE_gM = 0.0
PI_gM = 0.0
PS_gM = 0.0
CL_gM = 0.0
PA_gM = 0.0
ES_gM = 0.0
PC_vM = 0.0
PE_vM = 0.0
PI_vM = 0.0
PS_vM = 0.0
CL_vM = 0.0
PA_vM = 0.0
ES_vM = 0.0
PC_mIM = 0.0
PE_mIM = 0.0
PI_mIM = 0.0
PS_mIM = 0.0
CL_mIM = 0.0
PA_mIM = 0.0
ES_mIM = 0.0
PC_mOM = 0.0
PE_mOM = 0.0
PI_mOM = 0.0
PS_mOM = 0.0
CL_mOM = 0.0
PA_mOM = 0.0
ES_mOM = 0.0
Alipids = RBA_Matrix()
Alipids.col_names = ['F_mIM', 'F_mOM', 'F_vM', 'F_n', 'F_gM']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([0, 0, 0, 0, 0])
Alipids.UB = numpy.array([1, 1, 1, 1, 1])
Alipids.f = numpy.array([0, 0, 0, 0, 0])
LipidMatrix = numpy.zeros((7, 5))
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mIM')] = PC_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mIM')] = PE_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mIM')] = PI_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mIM')] = PS_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mIM')] = CL_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mIM')] = PA_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = ES_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = PC_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mOM')] = PE_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mOM')] = PI_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mOM')] = PS_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mOM')] = CL_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mOM')] = PA_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mOM')] = ES_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_vM')] = PC_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_vM')] = PE_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_vM')] = PI_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_vM')] = PS_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_vM')] = CL_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_vM')] = PA_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_vM')] = ES_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_n')] = PC_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_n')] = PE_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_n')] = PI_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_n')] = PS_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_n')] = CL_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_n')] = PA_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_n')] = ES_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_gM')] = PC_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_gM')] = PE_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_gM')] = PI_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_gM')] = PS_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_gM')] = CL_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_gM')] = PA_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_gM')] = ES_gM/totalAA
MudepIndices = [('M_pc_SC_c', i) for i in Alipids.col_names]+[('M_pe_SC_c', i) for i in Alipids.col_names]+[('M_ptd1ino_SC_c', i) for i in Alipids.col_names]+[('M_ps_SC_c', i)
for i in Alipids.col_names]+[('M_clpn_SC_m', i) for i in Alipids.col_names]+[('M_pa_SC_c', i) for i in Alipids.col_names]+[('M_ergst_c', i) for i in Alipids.col_names]
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(LipidMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities4(self, CompartmentRelationships=True):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
m_mIM = 0.5
m_mIMS = 1
m_mOM = 5
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': 'AAres_PG_nucleus_DNA'})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'O_total': {'Equation': 'amino_acid_concentration_total - AAres_PG_secreted_Euk', 'Variables': ['amino_acid_concentration_total', 'AAres_PG_secreted_Euk']}})
self.Problem.MuDependencies['FromMatrix']['b'].remove('n_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('vM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIMS_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('m_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('erM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mOM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('x_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('cM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('gM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('c_density')
## !!! ##
def eukaryoticDensities_calibration(self, CompartmentRelationships=False, mitoProportions={}, amino_acid_concentration_total='amino_acid_concentration_total'):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA_parameter = amino_acid_concentration_total
totalAA = 3.1
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
if len(list(mitoProportions.keys())) == 3:
m_mIM = mitoProportions['m_mIM']
m_mIMS = mitoProportions['m_mIMS']
m_mOM = mitoProportions['m_mOM']
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': {'Equation': '-nonenzymatic_proteins_n/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_n', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIM_density': {
'Equation': '-nonenzymatic_proteins_mIM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'vM_density': {
'Equation': '-nonenzymatic_proteins_vM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_vM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIMS_density': {
'Equation': '-nonenzymatic_proteins_mIMS/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIMS', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'m_density': {'Equation': '-nonenzymatic_proteins_m/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_m', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'erM_density': {
'Equation': '-nonenzymatic_proteins_erM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_erM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mOM_density': {
'Equation': '-nonenzymatic_proteins_mOM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mOM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'x_density': {'Equation': '-nonenzymatic_proteins_x/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_x', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'cM_density': {
'Equation': '-nonenzymatic_proteins_cM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_cM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'gM_density': {
'Equation': '-nonenzymatic_proteins_gM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_gM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'c_density': {'Equation': '-nonenzymatic_proteins_c/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_c', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'O_total': {'Equation': '{} - nonenzymatic_proteins_Secreted/inverse_average_protein_length'.format(totalAA_parameter), 'Variables': [
totalAA_parameter, 'nonenzymatic_proteins_Secreted', 'inverse_average_protein_length']}})
# !!! deal with hardcoded parameter_names... !!!
def estimate_specific_Kapps(self, proteomicsData, flux_bounds, mu, biomass_function=None, target_biomass_function=True, parsimonious_fba=True):
"""
Parameters
----------
proteomicsData : pandas.DataFrame (in mmol/gDW)
flux_bounds : pandas.DataFrame (in mmol/(gDW*h))
mu : float (in 1/h)
biomass_function : str
target_biomass_function : bool
atp_maintenance_to_biomassfunction : bool
eukaryotic : bool
"""
from scipy.stats.mstats import gmean
old_model = copy.deepcopy(self.model)
for i in self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements:
if i.species == 'average_protein_c':
new_agg = rba.xml.parameters.Aggregate(id_='total_protein', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='amino_acid_concentration_total'))
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='inverse_average_protein_length'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_protein'
else:
self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements.remove(
i)
for i in self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements:
if i.species == 'mrna':
new_agg = rba.xml.parameters.Aggregate(id_='total_rna', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='RNA_massfraction_CarbonLimitation'))
new_agg.function_references.append(
rba.xml.parameters.FunctionReference(function='RNA_inversemillimolarweight'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_rna'
else:
self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements.remove(
i)
self.rebuild_from_model()
self.setMedium(self.Medium)
self.addExchangeReactions()
self.setMu(mu)
if target_biomass_function:
self.buildFBA(objective='targets', maintenanceToBM=True)
BMfunction = 'R_BIOMASS_targetsRBA'
else:
self.buildFBA(objective='classic', maintenanceToBM=False)
BMfunction = biomass_function
for j in [i for i in self.Medium.keys() if self.Medium[i] == 0]:
Exrxn = 'R_EX_'+j.split('M_')[-1]+'_e'
self.FBA.setUB({Exrxn: 0})
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not pandas.isna(ub):
rxn_UBs.update({rx: ub})
self.FBA.setLB(rxn_LBs)
self.FBA.setUB(rxn_UBs)
self.FBA.clearObjective()
self.FBA.setObjectiveCoefficients({BMfunction: -1})
self.FBA.solveLP()
BMfluxOld = self.FBA.SolutionValues[BMfunction]
if parsimonious_fba:
self.FBA.parsimonise()
self.FBA.setLB(rxn_LBs)
self.FBA.setUB(rxn_UBs)
self.FBA.setLB({BMfunction: BMfluxOld})
self.FBA.setUB({BMfunction: BMfluxOld})
self.FBA.solveLP()
FluxDistribution = pandas.DataFrame(index=list(
self.FBA.SolutionValues.keys()), columns=['FluxValues'])
FluxDistribution['FluxValues'] = list(self.FBA.SolutionValues.values())
BMfluxNew = self.FBA.SolutionValues[BMfunction]
ProtoIDmap = {}
for i in self.ModelStructure.ProteinInfo.Elements.keys():
ProtoID = self.ModelStructure.ProteinInfo.Elements[i]['ProtoID']
if ProtoID in list(proteomicsData['ID']):
if not pandas.isna(proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0]):
if proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0] != 0:
if ProtoID in ProtoIDmap.keys():
ProtoIDmap[ProtoID]['ModelProteins'].append(i)
else:
ProtoIDmap.update(
{ProtoID: {'ModelProteins': [i], 'CopyNumber': proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0]}})
ReactionMap = {}
for i in self.ModelStructure.ReactionInfo.Elements.keys():
if '_duplicate_' in i:
continue
else:
if i in list(FluxDistribution.index):
if FluxDistribution.loc[i, 'FluxValues'] != 0:
ReactionMap.update({i: {'ModelReactions': list(
[i]+self.ModelStructure.ReactionInfo.Elements[i]['Twins']), 'Flux': FluxDistribution.loc[i, 'FluxValues']}})
IsoReaction2ProtoReaction = {}
for i in ReactionMap.keys():
for j in ReactionMap[i]['ModelReactions']:
IsoReaction2ProtoReaction[j] = i
EnzymeMap = {}
for i in self.ModelStructure.EnzymeInfo.Elements.keys():
if self.ModelStructure.EnzymeInfo.Elements[i]['Reaction'] in IsoReaction2ProtoReaction:
CompositionDict = {self.ModelStructure.ProteinInfo.Elements[j]['ProtoID']: self.ModelStructure.EnzymeInfo.Elements[
i]['Subunits'][j] for j in self.ModelStructure.EnzymeInfo.Elements[i]['Subunits'].keys()}
ProtoReaction = IsoReaction2ProtoReaction[self.ModelStructure.EnzymeInfo.Elements[i]['Reaction']]
CopyNumbers = []
Stoichiometries = []
EnzymeNumbers = []
for j in CompositionDict.keys():
if j in ProtoIDmap.keys():
CopyNumbers.append(ProtoIDmap[j]['CopyNumber'])
Stoichiometries.append(CompositionDict[j])
EnzymeNumbers.append(ProtoIDmap[j]['CopyNumber']/CompositionDict[j])
GM_enzymenumber = 0
if len(EnzymeNumbers) > 0:
GM_enzymenumber = gmean(numpy.array(EnzymeNumbers))
EnzymeMap.update(
{i: {'ProtoReaction': ProtoReaction, 'EnzymeNumber': GM_enzymenumber}})
EnzymeMap2 = {}
for i in ReactionMap.keys():
totalIsoEnzymeNumber = 0
for j in ReactionMap[i]['ModelReactions']:
respectiveEnzyme = self.ModelStructure.ReactionInfo.Elements[j]['Enzyme']
if respectiveEnzyme in EnzymeMap.keys():
totalIsoEnzymeNumber += EnzymeMap[respectiveEnzyme]['EnzymeNumber']
for j in ReactionMap[i]['ModelReactions']:
respectiveEnzyme = self.ModelStructure.ReactionInfo.Elements[j]['Enzyme']
if respectiveEnzyme in EnzymeMap.keys():
concentration = EnzymeMap[respectiveEnzyme]['EnzymeNumber']
if concentration != 0:
if numpy.isfinite(concentration):
specificFlux = ReactionMap[i]['Flux'] * \
EnzymeMap[respectiveEnzyme]['EnzymeNumber']/totalIsoEnzymeNumber
EnzymeMap2.update({respectiveEnzyme: {'CopyNumber': EnzymeMap[respectiveEnzyme]['EnzymeNumber'],
'Concentration': concentration, 'Flux': specificFlux, 'Kapp': abs(specificFlux/concentration)}})
self.model = old_model
self.rebuild_from_model()
self.setMedium(self.Medium)
out = pandas.DataFrame()
for i in EnzymeMap2.keys():
# if EnzymeMap2[i]['CopyNumber'] == 0:
# continue
out.loc[i, 'Enzyme_ID'] = i
out.loc[i, 'CopyNumber'] = EnzymeMap2[i]['CopyNumber']
out.loc[i, 'Concentration'] = EnzymeMap2[i]['Concentration']
out.loc[i, 'Flux'] = EnzymeMap2[i]['Flux']
out.loc[i, 'Kapp'] = EnzymeMap2[i]['Kapp']
return(out)
def estimate_default_Kapps(self, target_mu, compartment_densities_and_PGs=None, flux_bounds=None, plateau_limit=4, mu_approximation_precision=0.005, transporter_to_lumen_coefficient=10, default_kapp_LB=0, default_kapp_UB=1000000, start_val=200000, densities_to_fix=None, eukaryotic=False):
"""
Parameters
----------
target_mu : float
compartment_densities_and_PGs : pandas.DataFrame
flux_bounds : pandas.DataFrame
"""
orig_enz = self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value
out = pandas.DataFrame()
for comp in list(compartment_densities_and_PGs['Compartment_ID']):
self.model.parameters.functions._elements_by_id[str(
'fraction_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'Density']
self.model.parameters.functions._elements_by_id[str(
'fraction_non_enzymatic_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'PG_fraction']
self.rebuild_from_model()
self.addExchangeReactions()
self.setMedium(self.Medium)
if densities_to_fix is None:
comp_density_rows = list(self.Problem.CompartmentDensities)
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
else:
if len(densities_to_fix) != 0:
comp_density_rows = densities_to_fix
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not pandas.isna(ub):
rxn_UBs.update({rx: ub})
self.Problem.setLB(rxn_LBs)
self.Problem.setUB(rxn_UBs)
kapp_LB = default_kapp_LB
if default_kapp_UB is not None:
kapp_UB = default_kapp_UB
else:
kapp_UB = orig_enz*1000
# new_kapp = (kapp_UB+kapp_LB)/2
if start_val is not None:
new_kapp = start_val
else:
new_kapp = orig_enz
Mu_pred = self.findMaxGrowthRate(precision=0.005, max=1)
Mus = []
Mus_Error = []
Kapps = []
last_Mu = numpy.nan
plateau_count = 0
if abs(target_mu - Mu_pred) > mu_approximation_precision:
while abs(target_mu - Mu_pred) > mu_approximation_precision:
if plateau_count >= plateau_limit:
break
self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = new_kapp
self.model.parameters.functions._elements_by_id['default_transporter_efficiency'].parameters._elements_by_id[
'CONSTANT'].value = transporter_to_lumen_coefficient*new_kapp
self.rebuild_from_model()
self.addExchangeReactions()
self.setMedium(self.Medium)
self.Problem.setLB(rxn_LBs)
self.Problem.setUB(rxn_UBs)
if densities_to_fix is None:
comp_density_rows = list(self.Problem.CompartmentDensities)
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
else:
if len(densities_to_fix) != 0:
comp_density_rows = densities_to_fix
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
Mu_pred = self.findMaxGrowthRate(precision=0.005, max=1)
Mus_Error.append(abs(target_mu - Mu_pred))
Mus.append(Mu_pred)
Kapps.append(new_kapp)
if Mu_pred > target_mu:
new_kapp_prelim = kapp_LB+(0.5*abs(kapp_LB-new_kapp))
kapp_UB = new_kapp
elif Mu_pred < target_mu:
new_kapp_prelim = kapp_UB-(0.5*abs(new_kapp-kapp_UB))
kapp_LB = new_kapp
new_kapp = new_kapp_prelim
if len(Mus) > 2:
if Mus[-2] == Mu_pred:
plateau_count += 1
else:
plateau_count = 0
else:
Mus.append(Mu_pred)
Mus_Error.append(abs(target_mu - Mu_pred))
Kapps.append(
self.model.parameters.functions._elements_by_id['default_efficiency'].parameters._elements_by_id['CONSTANT'].value)
self.rebuild_from_model()
self.setMedium(self.Medium)
out = pandas.DataFrame()
out['Mu'] = Mus
out['delta_Mu'] = Mus_Error
out['default_efficiency'] = Kapps
out['default_transporter_efficiency'] = [transporter_to_lumen_coefficient*i for i in Kapps]
return(out)
def inject_default_kapps(self, default_kapp, default_transporter_kapp):
if numpy.isfinite(default_kapp):
self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapp
if numpy.isfinite(default_transporter_kapp):
self.model.parameters.functions._elements_by_id[
'default_transporter_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_transporter_kapp
self.rebuild_from_model()
def inject_process_capacities(self, process_efficiencies):
"""
Parameters
----------
process_efficiencies : pandas.DataFrame(columns=['Process','Parameter','Value'])
"""
for i in process_efficiencies.index:
if numpy.isfinite(process_efficiencies.loc[i, 'Value']):
if process_efficiencies.loc[i, 'Process'] in self.model.processes.processes._elements_by_id.keys():
if not pandas.isna(process_efficiencies.loc[i, 'Value']):
self.model.processes.processes._elements_by_id[process_efficiencies.loc[i,
'Process']].machinery.capacity.value = process_efficiencies.loc[i, 'Parameter']
const = rba.xml.parameters.Function(process_efficiencies.loc[i, 'Parameter'], 'constant', parameters={
'CONSTANT': process_efficiencies.loc[i, 'Value']}, variable=None)
if process_efficiencies.loc[i, 'Parameter'] not in self.model.parameters.functions._elements_by_id.keys():
self.model.parameters.functions.append(const)
else:
self.model.parameters.functions._elements_by_id[const.id].parameters._elements_by_id[
'CONSTANT'].value = process_efficiencies.loc[i, 'Value']
self.rebuild_from_model()
def inject_specific_kapps(self, specific_kapps, round_to_digits=0):
"""
Parameters
----------
specific_kapps : pandas.DataFrame
"""
parameterized = []
if 'Enzyme_ID' in list(specific_kapps.columns):
for enz in list(specific_kapps['Enzyme_ID']):
if not pandas.isna(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if numpy.isfinite(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if enz not in parameterized:
all_enzs = self.ModelStructure.EnzymeInfo.Elements[enz]['Isozymes']
all_enzs.append(enz)
parameterized += all_enzs
if len(all_enzs) == 1:
proto_enz = all_enzs[0]
else:
proto_enz = [i for i in all_enzs if not '_duplicate_' in i][0]
val = round(specific_kapps.loc[specific_kapps['Enzyme_ID']
== enz, 'Kapp'].values[0], round_to_digits)
const = rba.xml.parameters.Function(
str(proto_enz + '_kapp__constant'), 'constant', parameters={'CONSTANT': val}, variable=None)
if str(proto_enz + '_kapp__constant') not in self.model.parameters.functions._elements_by_id.keys():
self.model.parameters.functions.append(const)
else:
# self.model.parameters.functions._elements_by_id[const.id] = const
self.model.parameters.functions._elements_by_id[
const.id].parameters._elements_by_id['CONSTANT'].value = val
count = 0
# self.model.parameters.functions._elements_by_id['default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapp
for e in self.model.enzymes.enzymes:
if e.id in all_enzs:
count += 1
e.forward_efficiency = str(proto_enz + '_kapp__constant')
e.backward_efficiency = str(proto_enz + '_kapp__constant')
if count == len(all_enzs):
break
self.rebuild_from_model()
def get_parameter_definition(self, parameter):
if parameter in self.model.parameters.functions._elements_by_id.keys():
function = self.model.parameters.functions._elements_by_id[parameter]
expression = parse_function(function)
elif parameter in self.model.parameters.aggregates._elements_by_id.keys():
function_id_list = get_function_list_of_aggregate(
aggregate=self.model.parameters.aggregates._elements_by_id[parameter])
expression = parse_aggregate(aggregate=self.model.parameters.aggregates._elements_by_id[parameter], function_list=[
self.model.parameters.functions._elements_by_id[f_id] for f_id in function_id_list])
else:
return({})
return(expression)
def get_parameter_value(self, parameter):
if parameter in self.model.parameters.functions._elements_by_id.keys():
function = self.model.parameters.functions._elements_by_id[parameter]
expression = parse_function_with_parameter_values(function)
elif parameter in self.model.parameters.aggregates._elements_by_id.keys():
function_id_list = get_function_list_of_aggregate(
aggregate=self.model.parameters.aggregates._elements_by_id[parameter])
expression = parse_aggregate_with_parameter_values(aggregate=self.model.parameters.aggregates._elements_by_id[parameter], function_list=[
self.model.parameters.functions._elements_by_id[f_id] for f_id in function_id_list])
else:
return({parameter: numpy.nan})
variable_values = {}
for v in expression[parameter]['Variables']:
if v == 'growth_rate':
variable_values[v] = self.Mu
elif v in self.Medium.keys():
variable_values[v] = self.Medium[v]
elif v.endswith('_e'):
if v[:-2] in self.Medium.keys():
variable_values[v] = self.Medium[v[:-2]]
else:
variable_values = {}
return({parameter: numpy.nan})
result = evaluate_expression(expression_dictionary=expression,
variable_values=variable_values)
return(result)
def get_parameter_values(self, parameter_type, species=None, output_format='dict'):
if parameter_type == 'medium_composition':
if species is None:
results = self.Medium
elif type(species) is str:
results = {species: self.Medium[species]}
elif type(species) is list:
results = {sp: self.Medium[sp] for sp in species}
elif parameter_type == 'machine_efficiencies':
if species is None:
parameter_names = {process_name: self.model.processes.processes._elements_by_id[self.ModelStructure.ProcessInfo.Elements[
process_name]['ID']].machinery.capacity.value for process_name in self.ModelStructure.ProcessInfo.Elements.keys()}
elif type(species) is str:
parameter_names = {
species: self.model.processes.processes._elements_by_id[self.ModelStructure.ProcessInfo.Elements[species]['ID']].machinery.capacity.value}
elif type(species) is list:
parameter_names = {
sp: self.model.processes.processes._elements_by_id[self.ModelStructure.ProcessInfo.Elements[sp]['ID']].machinery.capacity.value for sp in species}
results = {pn: self.get_parameter_value(
parameter=parameter_names[pn]) for pn in parameter_names}
elif parameter_type == 'enzyme_efficiencies' or parameter_type == 'enzyme_efficiencies_forward' or parameter_type == 'enzyme_efficiencies_backward':
if species is None:
parameter_names = {enzyme_name: {'Forward': self.model.enzymes.enzymes._elements_by_id[enzyme_name].forward_efficiency, 'Backward': self.model.enzymes.enzymes._elements_by_id[
enzyme_name].backward_efficiency} for enzyme_name in self.ModelStructure.EnzymeInfo.Elements.keys()}
elif type(species) is str:
parameter_names = {species: {'Forward': self.model.enzymes.enzymes._elements_by_id[
species].forward_efficiency, 'Backward': self.model.enzymes.enzymes._elements_by_id[species].backward_efficiency}}
elif type(species) is list:
parameter_names = {enzyme_name: {'Forward': self.model.enzymes.enzymes._elements_by_id[enzyme_name].forward_efficiency,
'Backward': self.model.enzymes.enzymes._elements_by_id[enzyme_name].backward_efficiency} for enzyme_name in species}
if parameter_type == 'enzyme_efficiencies':
results = {pn: {'Forward': self.get_parameter_value(parameter=parameter_names[pn]['Forward']), 'Backward': self.get_parameter_value(
parameter=parameter_names[pn]['Backward'])} for pn in parameter_names.keys()}
elif parameter_type == 'enzyme_efficiencies_forward':
results = {pn: self.get_parameter_value(
parameter=parameter_names[pn]['Forward']) for pn in parameter_names.keys()}
elif parameter_type == 'enzyme_efficiencies_backward':
results = {pn: self.get_parameter_value(
parameter=parameter_names[pn]['Backward']) for pn in parameter_names.keys()}
elif parameter_type == 'maximal_densities':
density_dict = {i.compartment: self.get_parameter_value(
parameter=i.upper_bound) for i in self.model.density.target_densities}
if species is None:
results = density_dict
elif type(species) is str:
results = {species: density_dict[species]
for sp in [species] if sp in density_dict.keys()}
elif type(species) is list:
results = {sp: density_dict[sp] for sp in species if sp in density_dict.keys()}
elif parameter_type == 'target_values':
target_dict = {self.ModelStructure.TargetInfo.Elements[target_ID]['TargetEntity']: {'Target_id': target_ID, 'Target_value': self.get_parameter_value(
parameter=self.ModelStructure.TargetInfo.Elements[target_ID]['TargetValue'])} for target_ID in self.ModelStructure.TargetInfo.Elements.keys()}
if species is None:
results = target_dict
elif type(species) is str:
results = {species: target_dict[species]
for sp in [species] if sp in target_dict.keys()}
elif type(species) is list:
results = {sp: target_dict[sp] for sp in species if sp in target_dict.keys()}
if output_format == 'dict':
return(results)
if output_format == 'json':
return(json.dumps(results))
def get_parameter_value_from_model(function, parameter_ID):
return(function.parameters._elements_by_id[parameter_ID].value)
def make_paramter_function_specific(function_ID, parameter, return_normal=False):
if return_normal:
return(str(parameter))
else:
return(str('{}__parameter__{}'.format(function_ID, parameter)))
def parse_function(function):
independent_variable = function.variable
function_ID = function.id
if function.type == 'constant':
eq = make_paramter_function_specific(
function_ID=function_ID, parameter='CONSTANT', return_normal=True)
latex_string = str(make_paramter_function_specific(
function_ID=function_ID, parameter='CONSTANT', return_normal=True))
function_parameter_values = {'CONSTANT': get_parameter_value_from_model(
function=function, parameter_ID='CONSTANT')}
elif function.type == 'exponential':
eq = 'e**({}*{})'.format(make_paramter_function_specific(function_ID=function_ID,
parameter='RATE', return_normal=True), str(independent_variable))
latex_string = str('e^{'+str(make_paramter_function_specific(function_ID=function_ID,
parameter='RATE', return_normal=True)) + ' '+str(independent_variable)+'}')
function_parameter_values = {'RATE': get_parameter_value_from_model(
function=function, parameter_ID='RATE')}
elif function.type == 'linear':
eq = str('{}+{}*{}'.format(make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_CONSTANT', return_normal=True),
make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_COEF', return_normal=True), str(independent_variable)))
latex_string = str(make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_CONSTANT', return_normal=True) +
make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_COEF', return_normal=True)+' '+str(independent_variable))
function_parameter_values = {'LINEAR_CONSTANT': get_parameter_value_from_model(function=function, parameter_ID='LINEAR_CONSTANT'),
'LINEAR_COEF': get_parameter_value_from_model(function=function, parameter_ID='LINEAR_COEF'),
'X_MIN': get_parameter_value_from_model(function=function, parameter_ID='X_MIN'),
'X_MAX': get_parameter_value_from_model(function=function, parameter_ID='X_MAX'),
'Y_MIN': get_parameter_value_from_model(function=function, parameter_ID='Y_MIN'),
'Y_MAX': get_parameter_value_from_model(function=function, parameter_ID='Y_MAX'), }
elif function.type == 'michaelisMenten':
eq = str('{}*{}/({}+{})'.format(make_paramter_function_specific(function_ID=function_ID, parameter='kmax', return_normal=True),
str(independent_variable), str(independent_variable), make_paramter_function_specific(function_ID=function_ID, parameter='Km', return_normal=True)))
function_parameter_values = {'kmax': get_parameter_value_from_model(function=function, parameter_ID='kmax'),
'Km': get_parameter_value_from_model(function=function, parameter_ID='Km'),
'Y_MIN': get_parameter_value_from_model(function=function, parameter_ID='Y_MIN')}
return({function_ID: {'Type': function.type, 'Equation': eq, 'Variables': [str(independent_variable)], 'Function_parameters': function_parameter_values}})
def parse_function_with_parameter_values(function):
independent_variable = function.variable
function_ID = function.id
if function.type == 'constant':
return({function_ID: {'Equation': '{}'.format(str(get_parameter_value_from_model(function=function, parameter_ID='CONSTANT'))), 'Variables': []}})
elif function.type == 'exponential':
return({function_ID: {'Equation': '{}**({}*{})'.format(str(numpy.e), str(get_parameter_value_from_model(function=function, parameter_ID='RATE')), str(independent_variable)), 'Variables': [str(independent_variable)]}})
elif function.type == 'linear':
return({function_ID: {'Equation': str('{}+{}*{}'.format(str(get_parameter_value_from_model(function=function, parameter_ID='LINEAR_CONSTANT')), str(get_parameter_value_from_model(function=function, parameter_ID='LINEAR_COEF')), str(independent_variable))), 'Variables': [str(independent_variable)]}})
elif function.type == 'michaelisMenten':
return({function_ID: {'Equation': str('{}*{}/({}+{})'.format(str(get_parameter_value_from_model(function=function, parameter_ID='kmax')), str(independent_variable), str(get_parameter_value_from_model(function=function, parameter_ID='Km')), str(independent_variable))), 'Variables': [str(independent_variable)]}})
def get_parameter_of_function(function, parameter):
return(function.parameters._elements_by_id[parameter])
def join_functions_multiplicatively(parsed_function_list):
term_list = []
variable_list = []
for function in parsed_function_list:
function_ID = list(function.keys())[0]
term_list.append(str('('+function[function_ID]['Equation']+')'))
variable_list += function[function_ID]['Variables']
return({'Type': 'Aggregate', 'Equation': '*'.join(term_list), 'Variables': list(set(variable_list))})
def get_function_list_of_aggregate(aggregate):
return([agg.function for agg in aggregate.function_references._elements])
def parse_aggregate_with_parameter_values(aggregate, function_list):
aggregate_ID = aggregate.id
if aggregate.type == 'multiplication':
parsed_function_list = [parse_function_with_parameter_values(
function) for function in function_list]
return({aggregate_ID: join_functions_multiplicatively(parsed_function_list=parsed_function_list)})
else:
return({aggregate_ID: {'Equation': '', 'Variables': []}})
def parse_aggregate(aggregate, function_list):
aggregate_ID = aggregate.id
if aggregate.type == 'multiplication':
parsed_function_list = [parse_function(function) for function in function_list]
result = {aggregate_ID: join_functions_multiplicatively(
parsed_function_list=parsed_function_list)}
result[aggregate_ID]['Multiplicative Terms'] = [f.id for f in function_list]
return(result)
else:
return({aggregate_ID: {'Type': 'Aggregate', 'Equation': '', 'Variables': [], 'Multiplicative Terms': []}})
# def transform_to_latex(equation):
#
def MediumDependentCoefficients_A(Controler):
out = {}
MedDepRxns = [list(i.keys()) for i in list(Controler.ExchangeMap.values())]
MedDepRxnsFlatted = list(set([item for sublist in MedDepRxns for item in sublist]))
for i in Controler.ModelStructure.EnzymeConstraintsInfo.Elements.keys():
if Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['AssociatedReaction'] in MedDepRxnsFlatted:
nonConst = False
for j in Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['CapacityParameter']:
if list(j.values())[0]['FunctionType'] != 'constant':
nonConst = True
if nonConst:
if Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['AssociatedReaction'] in list(out.keys()):
out[Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]
['AssociatedReaction']].append(i)
else:
out.update(
{Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['AssociatedReaction']: [i]})
return([(out[i][0], Controler.ModelStructure.ReactionInfo.Elements[i]['Enzyme'])for i in out.keys()])
def QualitativeMediumChange(Controller, changes, species):
QualitativeMediumChange = False
if float(Controller.Medium[species]) == float(0):
if float(changes[species]) != float(0):
boundValue = 1000.0
QualitativeMediumChange = True
else:
return([QualitativeMediumChange])
if float(Controller.Medium[species]) != float(0):
if float(changes[species]) == float(0):
boundValue = 0.0
QualitativeMediumChange = True
else:
return([QualitativeMediumChange])
return([QualitativeMediumChange, float(boundValue)])
def ProtoProteomeRecording(Controller, run, Proteinlevels):
out = {}
for i in list(Controller.ModelStructure.ProteinGeneMatrix['ProtoProteins']):
row_ind = list(Controller.ModelStructure.ProteinGeneMatrix['ProtoProteins']).index(i)
nonZero = list(numpy.nonzero(
Controller.ModelStructure.ProteinGeneMatrix['Matrix'][row_ind, :])[0])
level = 0
for j in nonZero:
id = Controller.ModelStructure.ProteinGeneMatrix['Proteins'][j]
level += Proteinlevels.loc[id, run]
out.update({i: level})
return(out)
def ProteomeRecording(Controller, run):
EnzDF = pandas.DataFrame(index=Controller.Problem.Enzymes)
PrcDF = pandas.DataFrame(index=Controller.Problem.Processes)
EnzDF[run] = [Controller.Problem.SolutionValues[i]for i in Controller.Problem.Enzymes]
PrcDF[run] = [Controller.Problem.SolutionValues[i]for i in Controller.Problem.Processes]
ProteinProteinMatrix = numpy.array(
Controller.ModelStructure.ProteinMatrix['Matrix']).astype(numpy.float64)
C = Controller.ModelStructure.ProteinMatrix['Consumers']
Consumers = []
for i in C:
if i.startswith('P_'):
# Consumers.append(str(i+'_machinery'))
Consumers.append(str(i))
if not i.startswith('P_'):
Consumers.append(i)
Proteins = Controller.ModelStructure.ProteinMatrix['Proteins']
DF = pandas.concat([EnzDF, PrcDF], axis=0)
ProteinLevels = | pandas.DataFrame(index=Proteins) | pandas.DataFrame |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestPeriodIndexSeriesMethods(tm.TestCase):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assertRaisesRegexp(TypeError, msg):
obj + ng
with tm.assertRaises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assertRaisesRegexp(TypeError, msg):
obj - ng
with tm.assertRaises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.add(ng, obj)
with tm.assertRaises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
self.assertIs(np.subtract(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj + offsets.Hour(2)
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
offsets.Hour(2) + obj
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj - offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, -11, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, 11, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
if _np_version_under1p10:
self.assertIs(result, NotImplemented)
else:
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - | pd.Period('NaT', freq='M') | pandas.Period |
#!/usr/bin/env python3
import git
import pandas as pd
from hourly import get_work_commits, is_clocked_in, is_clocked_out, update_log, commit_log, get_labor
from hourly import get_hours_worked, get_earnings, get_labor_range
from hourly import plot_labor, get_current_user, get_clocks
from hourly import invoice
from hourly import get_local_timezone
import plotly.graph_objs as go
import plotly.offline as po
from omegaconf import OmegaConf, DictConfig, ListConfig
import hydra
from os import path
import os
import sys
import logging
import copy
import numpy as np
import datetime
def handle_errors(cfg, error_msg = None):
if error_msg is not None:
print(error_msg)
if cfg.handle_errors == 'exit':
sys.exit()
else:
raise
def commit_(repo, commit_message, logfile = None):
if logfile is not None:
repo.index.add([logfile])
commit = repo.index.commit(commit_message)
return commit
def identify_user(user, cfg):
user_id = []
for id_type in cfg.commit.identity:
if id_type in ['name', 'email']:
user_id.append(getattr(user, id_type))
if len(user_id) > 1:
return tuple(user_id)
else:
return user_id[0]
def process_commit(cfg, work, repo):
"""commits clock-in/out message
If only a message is supplied, commits without clocking in/out
"""
header_depth = '#'*cfg.work_log.header_depth
commit_message = cfg.commit.message or ''
log_message = ''
if len(commit_message) > 0:
log_message = '{} {}\n'.format(cfg.work_log.bullet, commit_message)
if 'clock' in cfg.commit:
if cfg.commit.clock is not None:
tminus = cfg.commit.tminus or ''
if len(tminus) != 0:
commit_message = "T-{} {}".format(tminus.strip('T-'), commit_message)
if cfg.commit.clock.lower() == 'in':
last_in = is_clocked_in(work)
if last_in is not None:
time_since_in = pd.datetime.now(last_in.tzinfo) - last_in
raise IOError(
"You are still clocked in!\n" + \
"\tlast clock in: {} ({:.2f} hours ago)".format(
last_in,
time_since_in.total_seconds()/3600.))
else:
if len(commit_message) == 0:
commit_message = "clock-in"
else:
commit_message = "clock-in: {}".format(commit_message)
log_message = "\n{} {}: {}\n\n".format(
header_depth,
pd.datetime.now(),
commit_message)
print("clocking in with message: {} ".format(commit_message))
elif cfg.commit.clock.lower() == 'out': # prevent clock in and out at the same time
last_out = is_clocked_out(work)
if last_out is not None:
time_since_out = pd.datetime.now(last_out.tzinfo) - last_out
raise IOError(
"You already clocked out!\n" + \
"\tlast clock out: {} ({:.2f} hours ago)".format(
last_out,
time_since_out.total_seconds()/3600.))
else:
if len(commit_message) == 0:
commit_message = "clock-out"
else:
commit_message = "clock-out: {}".format(commit_message)
log_message = "{} {}: {}\n\n".format(
header_depth,
pd.datetime.now(),
commit_message)
print("clocking out with message: {} ".format(commit_message))
else:
raise IOError("unrecocgnized clock value: {}".format(cfg.commit.clock))
# logfile = hydra.utils.to_absolute_path(cfg.work_log.filename)
logfile = os.path.abspath(cfg.work_log.filename)
if len(log_message) > 0:
update_log(logfile, log_message)
return commit_(repo, commit_message, logfile)
def flatten_dict(d, sep = '.'):
'''flattens a dictionary into list of
courtesy of MYGz https://stackoverflow.com/a/41801708
returns [{k.sub_key:v},...]
'''
return pd.io.json.json_normalize(d, sep=sep).to_dict(orient='records')[0]
def config_override(cfg):
"""Overrides with user-supplied configuration
hourly will override its configuration using
hourly.yaml if it is in the base git directory
or users can set an override config:
config_override=path/to/myconfig.yaml
"""
# change to the git directory of the original working dir
original_path = hydra.utils.get_original_cwd()
change_git_dir(original_path, verbosity = cfg.verbosity)
# get the full path of the override file if available
override_path = os.path.abspath(cfg.config_override)
if path.exists(override_path):
if cfg.verbosity > 0:
print("overriding config with {}".format(override_path))
override_conf = OmegaConf.load(override_path)
# merge overrides first input with second
cfg = OmegaConf.merge(cfg, override_conf)
else:
if cfg.verbosity > 0:
print("override path does not exist: {}".format(override_path))
# merge in command line arguments
cli_conf = OmegaConf.from_cli()
cfg = OmegaConf.merge(cfg, cli_conf)
return cfg
def get_user_work(work, current_user, identifier):
for user_id, user_work in work.groupby(identifier):
if user_id == current_user:
return user_work
def resolve(cfg):
"""Expands a configuration, interpolating variables"""
cfg_dict = cfg.to_container(resolve = True)
return OmegaConf.create(cfg_dict)
def localize(t):
if t is None:
return t
if t.tzinfo is None:
LOCAL_TIMEZONE = get_local_timezone()
return t.tz_localize(LOCAL_TIMEZONE)
else:
return t
def get_avg_time(cfg, labor, total_hours):
tdelta = labor.set_index('TimeIn').TimeDelta.groupby(pd.Grouper(freq = cfg.vis.frequency)).sum()
tmin = tdelta.index.min()
tmax = tdelta.index.max()
time_range_sec = (tmax - tmin).total_seconds()
if cfg.verbosity:
print('freq: {}'.format(cfg.vis.frequency))
print('time range [sec]: {}'.format(time_range_sec))
bin_size_val, bin_size_unit = cfg.vis.frequency.split(' ')
bin_size = pd.Timedelta(float(bin_size_val), unit = bin_size_unit)
bin_size_sec = bin_size.total_seconds()
if cfg.verbosity:
print('bin size : {} [sec]: {}'.format(bin_size, bin_size_sec))
time_bins = (time_range_sec/bin_size_sec) + 1
avg_time = total_hours/time_bins
if cfg.verbosity:
print('hours: {} bins: {} average time: {}'.format(total_hours, time_bins, avg_time))
return avg_time, tmin, tmax
def divide_labor(cfg, labor):
"""divides labor among multiple repo names"""
rows = []
for _, row in labor.iterrows():
if isinstance(row.repo, tuple):
if cfg.verbosity > 1:
print("!!!!!!!Found multiple names {} !!!!!!".format(row.repo))
# divide evenly among the tags
tag_count = len(row.repo)
row_tag = row
row_tag.TimeDelta = row.TimeDelta/tag_count
row_tag.Hours = row.Hours/tag_count
for repo_name in row.repo:
row_tag.repo = repo_name
rows.append(pd.DataFrame(row_tag).T)
if cfg.verbosity > 1:
print(' appending {}'.format(repo_name))
else:
if cfg.verbosity > 1:
print('appending {}'.format(row.repo))
rows.append(pd.DataFrame(row).T)
return pd.concat(rows)
def run_report(cfg):
if cfg.verbosity > 1:
print(cfg.pretty())
repos = resolve(cfg.report.repos)
if 'start_date' in cfg.repo:
start_date = pd.to_datetime(cfg.repo.start_date)
start_date = localize(start_date)
else:
start_date = None
if 'end_date' in cfg.repo:
end_date = pd.to_datetime(cfg.repo.end_date)
end_date = localize(end_date)
else:
end_date = None
if 'pandas' in cfg.report:
pd_opts = flatten_dict(
OmegaConf.to_container(cfg.report.pandas))
for k,v in pd_opts.items():
pd.set_option(k,v)
clocks = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 15:26:14 2019
@author: <NAME>
"""
from PIL import Image
from PIL import ExifTags as ExifTags
import pandas as pd
import numpy as np
from os import walk
# User-defined variables
input_path = '/media/loibldav/TOSHIBA EXT/Drone-Data/KIR19/Cognac/Mavic1'
#input_path = '/media/loibldav/TOSHIBA EXT/Drone-Data/KIR19/Cognac/Mavic2'
output_path = '/home/loibldav/Processing/exif-experiment'
gcp_csv = '/home/loibldav/Processing/exif-experiment/GCPs_Cognac.csv'
verbosity = 1 # Reporting level
# gcp_coords = [[42.5852, 76.102575]]
max_offset = 0.0005
# Functions
def get_exif(fn):
ret = {}
i = Image.open(fn)
info = i._getexif()
for tag, value in info.items():
decoded = ExifTags.TAGS.get(tag, tag)
ret[decoded] = value
return ret
def calc_decdeg_coords(gps_info_array):
''' Calculates decimal degree coordinates from EXIF gpsinfo data. '''
decdeg_coords = gps_info_array[0][0] + gps_info_array[1][0] / 60 + gps_info_array[2][0] / 36000000
return decdeg_coords
# Process ground control point coordinates
gcp_data = pd.read_csv(gcp_csv, header=0)
gcp_x_y_data = gcp_data[['Y', 'X', 'name']].copy()
gcp_coords = gcp_x_y_data.values.tolist()
# Read files recursively
(_, _, filenames) = next(walk(input_path))
filenames = sorted(filenames) #.sort()
print('Found '+ str(len(filenames)) + ' in input directory.')
# 103 degree, 41 minute, 1052 centisecond, 103+41/60+1052/(3600*100)
exif_coords = []
lat_coords = []
lon_coords = []
file_counter = 1
for filename in filenames:
exif = get_exif(input_path + '/' + filename)
# print(exif)
gpsinfo = {}
for key in exif['GPSInfo'].keys():
decode = ExifTags.GPSTAGS.get(key,key)
gpsinfo[decode] = exif['GPSInfo'][key]
# print(gpsinfo)
latitude = calc_decdeg_coords(gpsinfo['GPSLatitude']) # [0][0] + gpsinfo['GPSLatitude'][1][0] / 60 + gpsinfo['GPSLatitude'][2][0] / 360000
longitude = calc_decdeg_coords(gpsinfo['GPSLongitude']) # [0][0] + gpsinfo['GPSLongitude'][1][0] / 60 + gpsinfo['GPSLongitude'][2][0] / 360000
exif_coords.append([filename, latitude, longitude])
lat_coords.append(latitude)
lon_coords.append(longitude)
# print('Latitude: '+ str(latitude))
# print('Longitude: '+ str(longitude))
# data = pd.read_csv(input_path + '/' + filename, header=0) # , usecols=['LS_DATE','SC_median']
if verbosity >= 1:
print('Working on ' + filename + ' ('+ str(file_counter) +' of '+ str(len(filenames)) + ') ...', end='\r', flush=True)
'''
# print(str, end='\r')
# sys.stdout.flush()
if verbosity >= 2:
print(data.shape)
print(data.size)
'''
file_counter += 1
print('\nFinished EXIF input data processing\n')
result_df_labels = ['filename', 'lat_gps', 'lon_gps', 'gcp_name', 'lat_gcp', 'lon_gcp', 'file_path']
result_df_full = | pd.DataFrame(columns=result_df_labels) | pandas.DataFrame |
#!@PYTHON3@
#%%
import struct
import pickle
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
plt.rcParams['font.family'] = 'serif'
plt.rcParams['mathtext.fontset'] = 'dejavuserif'
plt.rcParams.update({'font.size': 13})
# plt.rcParams.update({'font.size': 15})
# expected coverage for transcripts
def ReadRawCorrection(filename):
ExpectedProb={}
fp=open(filename, 'rb')
numtrans=struct.unpack('i', fp.read(4))[0]
for i in range(numtrans):
namelen=struct.unpack('i', fp.read(4))[0]
seqlen=struct.unpack('i', fp.read(4))[0]
name=""
correction=np.zeros(seqlen)
for j in range(namelen):
name+=struct.unpack('c', fp.read(1))[0].decode('utf-8')
for j in range(seqlen):
correction[j]=struct.unpack('d', fp.read(8))[0]
ExpectedProb[name]=correction
fp.close()
print("Finish reading theoretical distributions for {} transcripts.".format(len(ExpectedProb)))
return ExpectedProb
ExpectedProb=ReadRawCorrection("/home/priyamvada/data/ERR030875/correction.dat")
#ExpectedProb=ReadRawCorrection("/home/congm1/savanna/savannacong33/SADrealdata/HumanBodyMap/salmon_Full_ERR030873/correction.dat")
for key, value in ExpectedProb.items():
print(key, ' : ', value)
df=pd.DataFrame(ExpectedProb.items(),columns=['Transcript Name', 'Expected coverage'])
print(df)
# import struct
# import pickle
# import numpy as np
# from matplotlib import pyplot as plt
# import pandas as pd
plt.rcParams['font.family'] = 'serif'
plt.rcParams['mathtext.fontset'] = 'dejavuserif'
plt.rcParams.update({'font.size': 13})
# plt.rcParams.update({'font.size': 15})
# observed data coverage for transcripts
def ReadRawStartPos(filename):
TrueRaw={}
fp=open(filename, 'rb')
numtrans=struct.unpack('i', fp.read(4))[0]
for i in range(numtrans):
namelen=struct.unpack('i', fp.read(4))[0]
seqlen=struct.unpack('i', fp.read(4))[0]
name=""
poses=np.zeros(seqlen, dtype=np.int)
counts=np.zeros(seqlen)
for j in range(namelen):
name+=struct.unpack('c', fp.read(1))[0].decode('utf-8')
for j in range(seqlen):
poses[j]=struct.unpack('i', fp.read(4))[0]
for j in range(seqlen):
counts[j]=struct.unpack('d', fp.read(8))[0]
tmp=np.zeros(poses[-1]+1)
for j in range(len(poses)):
tmp[poses[j]] = counts[j]
TrueRaw[name]=tmp
fp.close()
print("Finish reading actual distribution for {} transcripts.".format(len(TrueRaw)))
return TrueRaw
TrueRaw=ReadRawStartPos("/home/priyamvada/data/ERR030875/startpos.dat")
#TrueRaw=ReadRawStartPos("/home/congm1/savanna/savannacong33/SADrealdata/HumanBodyMap/salmon_Full_ERR030873/startpos.dat")
for key, value in TrueRaw.items():
print(key, ' : ', value)
#len(TrueRaw)
#import pickle
# key_to_lookup = 'ENST00000387405.1'
# if key_to_lookup in TrueRaw:
# print ("Key exists")
# else:
# print ("Key does not exist")
df1=pd.DataFrame(TrueRaw.items(),columns=['Transcript Name', 'observed coverage'])
print(df1)
dfmerge=pd.merge(df,df1,on='Transcript Name')
print(dfmerge)
# extract and label (1) adjustable anomalous transcripts
import pandas as pd
dfa = pd.read_csv("/home/priyamvada/data/ERR030875/test_correctapprox9/test_adjusted_quantification.tsv", sep="\t")
#dfa = pd.read_csv("/home/congm1/savanna/savannacong33/SADrealdata/HumanBodyMap/salmon_Full_ERR030873/test_correctapprox9/test_adjusted_quantification.tsv", sep="\t")
print (dfa)
dfa1 = dfa.iloc[:,0:1]
print(dfa1)
dfa1=dfa1.rename(columns={'# Name': 'Transcript Name'})
print(dfa1)
dfa2=dfa1.assign(Label='1')
print(dfa2)
#%%
#extract and label (2) non-adjustable anomalous transcripts
dfan = pd.read_csv("/home/priyamvada/data/ERR030875/test_correctapprox9/test_unadjustable_pvalue.tsv", sep="\t")
#dfan = pd.read_csv("/home/congm1/savanna/savannacong33/SADrealdata/HumanBodyMap/salmon_Full_ERR030873/test_correctapprox9/test_unadjustable_pvalue.tsv", sep="\t")
print (dfan)
dfan1 = dfan.iloc[:,0:1]
print(dfan1)
dfan1=dfan1.rename(columns={'#Name': 'Transcript Name'})
print(dfan1)
dfan2=dfan1.assign(Label='1')
print(dfan2)
anomalous_merge = pd.concat([dfa2, dfan2], axis=0)
print(anomalous_merge)
len(anomalous_merge)
allabnormal_merge=pd.merge(dfmerge,anomalous_merge,on='Transcript Name')
print(allabnormal_merge)
# make list of anomalous transcripts names
anomal_list = list(allabnormal_merge['Transcript Name'])
#print(anomal_list)
#%%
Normal_Transcripts=[]
for key,_ in TrueRaw.items():
Normal_Transcripts.append(key)
#print(Normal_Transcripts)
#print(len(Normal_Transcripts))
#print('normaldone')
l3 = [x for x in Normal_Transcripts if x not in anomal_list]
print(l3)
print(len(l3))
#%%
normaldf= | pd.DataFrame(l3,columns=['Transcript Name']) | pandas.DataFrame |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using convolutional networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_cnn_model(features, labels, mode):
"""Character level convolutional neural network model to predict classes."""
features_onehot = tf.one_hot(features[CHARS_FEATURE], 256)
input_layer = tf.reshape(
features_onehot, [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
input_layer,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = | pandas.Series(dbpedia.train.target) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[107]:
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# to ignore seaborn warnings (nobody likes those)
import warnings
warnings.filterwarnings('ignore')
plt.style.use('classic')
get_ipython().run_line_magic('matplotlib', 'inline')
pd.set_option('display.max_columns', 144)
# In[3]:
# Import the King_Country_House_prices_dataset via csv file
df = | pd.read_csv('King_County_House_prices_dataset.csv') | pandas.read_csv |
"""
*********************************************************
hacker_ols.py
Created by <NAME> on 8/15/20
Functions to validate Hubble's Law with Hacker Statistics
for OLS Simple Linear Regression & Hypothesis Testing
*********************************************************
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import probplot, ttest_ind
import seaborn as sns
from statsmodels.formula.api import ols
from statsmodels.regression.linear_model import RegressionResultsWrapper
from statsmodels.stats.power import TTestIndPower
# set seaborn style
sns.set_style('white')
def load_hubble_data() -> pd.DataFrame:
"""
Load Edwin Hubble dataset retreived from Source:
A relation between distance and radial velocity among extra-galactic nebulae
by <NAME>
PNAS March 15, 1929 15 (3) 168-173; https://doi.org/10.1073/pnas.15.3.168
Communicated January 17, 1929
column names = Object Name, Distance [Mpc], Velocity [Km/second]
Notes on units: 1 parsec = 3.26 light years, 1 Mpc = megaparsec = 10 parsees.
Purpose:
load Edwin Hubble data .csv file for hacker stats regression
Returns:
pandas DataFrame with Hubble's data
"""
return | pd.read_csv("hubble_data.csv", header=9) | pandas.read_csv |
"""
Copyright 2021 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ---
import os
import random
import sys
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import rdkit.Chem as Chem
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
# --- JT-VAE
from jtnn import * # not cool, but this is how they do it ...
from jtnn.datautils import ToxPropDataset
# --- disable rdkit warnings
from rdkit import RDLogger
from torch.utils import data
from toxsquad.data import *
from toxsquad.losses import *
from toxsquad.modelling import *
from toxsquad.visualizations import Visualizations
# --- toxsquad
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
import os
import pickle
# ------------ PRE-PROCESSING ROUTINES ------------
from mol_tree import *
def save_object(obj, filename):
with open(filename, "wb") as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def open_object(filename):
with open(filename, "rb") as input:
reopened = pickle.load(input)
return reopened
def get_vocab(assay_dir, assay_id, toxdata):
filename = assay_dir + "/jtvae/" + str(assay_id) + "-vocab.pkl"
if os.path.isfile(filename):
print("Re-opening vocabulary file")
vocab = open_object(filename)
else:
print("Deriving vocabulary")
vocab = set()
for (
smiles
) in toxdata.smiles: # I guess here we should only use the training data??
mol = MolTree(smiles)
for c in mol.nodes:
vocab.add(c.smiles)
vocab = Vocab(list(vocab))
save_object(vocab, filename)
return vocab
# ------------ MODEL OPTIMIZATION ROUTINES ------------
def derive_inference_model(
toxdata,
vocab,
infer_dir,
model_params,
vis,
device,
model_name,
base_lr=0.003,
beta=0.005,
num_threads = 24,
weight_decay = 0.000
):
from jtnn.jtprop_vae import JTPropVAE
smiles = toxdata.smiles
props = toxdata.val
dataset = ToxPropDataset(smiles, props)
batch_size = 8
dataloader = data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_threads,
collate_fn=lambda x: x,
drop_last=True,
)
from jtnn.jtprop_vae import JTPropVAE
model = JTPropVAE(vocab, **model_params).to(device)
optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
# --- pre-train AE
total_step_count = 0
total_step_count = pre_train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
infer_dir,
vis,
total_step_count,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# train (set a smaller initial LR, beta to 0.005)
optimizer = optim.Adam(model.parameters(), lr=0.0003,weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
print("[DEBUG] TRAINING")
total_step_count = train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
infer_dir,
vis,
total_step_count,
beta=0.005,
model_name=model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# --- fine tune AE
# optimizer = optim.Adam(model.parameters(), lr=0.0003)
# scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
# scheduler.step()
# total_step_count = train_jtvae(model, optimizer, scheduler, dataloader, device, infer_dir, vis, total_step_count, 0.005, model_name, MAX_EPOCH=36, PRINT_ITER=5)
def cross_validate_jtvae(
toxdata,
partitions,
xval_dir,
vocab,
model_params,
device,
model_name,
base_lr=0.003,
vis_host=None,
vis_port=8097,
assay_name="",
num_threads = 24,
weight_decay = 0.0000
):
"""
:todo ensure same training parameters are used for inference and cross-val models
"""
MAX_EPOCH = 36
PRINT_ITER = 5
run = 0
scores = []
for partition in partitions:
# I/O
save_dir = xval_dir + "/run-" + str(run)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# vis
if vis_host is not None:
vis = Visualizations(
env_name="jtvae-xval-" + str(assay_name) + "-run-" + str(run), server=vis_host, port=vis_port
)
else:
vis = None
# data
smiles = toxdata.smiles.loc[partition["train"]]
props = toxdata.val.loc[partition["train"]]
dataset = ToxPropDataset(smiles, props)
batch_size = 8
dataloader = data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_threads,
collate_fn=lambda x: x,
drop_last=True,
)
# model
from jtnn.jtprop_vae import JTPropVAE
model = JTPropVAE(vocab, **model_params).to(device)
optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
# pretrain
print("[DEBUG] PRETRAINING")
total_step_count = pre_train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
save_dir,
vis,
0,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# train (set a smaller initial LR, beta to 0.005)
optimizer = optim.Adam(model.parameters(), lr=0.0003,weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
print("[DEBUG] TRAINING")
total_step_count = train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
save_dir,
vis,
total_step_count,
beta=0.005,
model_name=model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# evaluate (only property prediction accuracy for now)
scores.append(
evaluate_predictions_model(
model,
toxdata.smiles.loc[partition["test"]],
toxdata.val.loc[partition["test"]],
vis,
)
)
# memory management
del model
del optimizer
torch.cuda.empty_cache()
run = run + 1
return scores
def pre_train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
model_dir,
vis,
total_step_count,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
):
my_log = open(model_dir + "/loss-pre.txt", "w")
for epoch in range(MAX_EPOCH):
print("pre epoch: " + str(epoch))
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
for it, batch in enumerate(dataloader):
for mol_tree, _ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
torch.cuda.empty_cache()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta=0)
loss.backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc = prop_acc / PRINT_ITER
if vis is not None:
vis.plot_loss(word_acc, total_step_count, 1, model_name, "word-acc")
vis.plot_loss(prop_acc, total_step_count, 1, model_name, "mse")
print(
"Epoch: %d, Step: %d, KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f"
% (
epoch,
it + 1,
kl_div,
word_acc,
topo_acc,
assm_acc,
steo_acc,
prop_acc,
),
file=my_log,
flush=True,
)
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
del loss
del kl_div
total_step_count = total_step_count + 1
torch.cuda.empty_cache()
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
torch.save(
model.cpu().state_dict(), model_dir + "/model-pre.iter-" + str(epoch)
)
torch.cuda.empty_cache()
model = model.to(device)
my_log.close()
return total_step_count
def train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
model_dir,
vis,
total_step_count,
beta,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
):
my_log = open(model_dir + "/loss-ref.txt", "w")
for epoch in range(MAX_EPOCH):
print("epoch: " + str(epoch))
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
for it, batch in enumerate(dataloader):
for mol_tree, _ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
torch.cuda.empty_cache()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta)
loss.backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc /= PRINT_ITER
if vis is not None:
vis.plot_loss(word_acc, total_step_count, 1, model_name, "word-acc")
vis.plot_loss(prop_acc, total_step_count, 1, model_name, "mse")
print(
"Epoch: %d, Step: %d, KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f"
% (
epoch,
it + 1,
kl_div,
word_acc,
topo_acc,
assm_acc,
steo_acc,
prop_acc,
),
file=my_log,
flush=True,
)
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
# if (it + 1) % 1500 == 0: # Fast annealing
# # does this make sense? With the smaller datasets
# # we don't get to 1500? Why is this happening?
# # I don't quite trust it
# # But here, since we call model.cpu()
# # we need to move the model to the device again
# # else we ran onto that weird issue!
# scheduler.step()
# print("learning rate: %.6f" % scheduler.get_lr()[0])
# #torch.save(
# # model.cpu().state_dict(),
# # model_dir + "/model-ref.iter-%d-%d" % (epoch, it + 1),
# #)
# model.to(device)
del loss
del kl_div
total_step_count = total_step_count + 1
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
torch.save(
model.cpu().state_dict(), model_dir + "/model-ref.iter-" + str(epoch)
) # is this the expensive part?
model = model.to(device)
my_log.close()
return total_step_count
# ------------ MODEL EVALUATION ROUTINES ------------
def evaluate_predictions_model(model, smiles, props, vis):
"""
Return evaluation objects for JT-VAE model.
This function will return a list of [mse, r2] for the smiles passed in,
and also return a 2-col matrix for plotting predicted vs. actual.
vis object allows us to use Visdom to directly update
a live performance plot view.
:param model: JT-VAE model
:param smiles: Pandas series with SMILES as entries
We usually pass toxdata.smiles
:param props: Pandas series with molecular activity or property to predict
:param vis: Visualization object from toxsquad.visualizations
:returns: Scores, coords
- Scores is a list of mean squared error and correlation coefficient
(for entire smiles batch). This is of length 2.
- coords are x, y coordinates for the "performance plot"
(where x=actual and y=predicted).
"""
predictions = dict()
n_molecules = len(smiles)
coords = np.zeros((n_molecules, 2))
# k = 0;
model = model.eval()
for k, idx in enumerate(smiles.index):
print_status(k, n_molecules)
sml = smiles.loc[idx]
prop = props.loc[idx]
# model.predict(sml) returns a torch tensor
# on which we need to call .item()
# to get the actual floating point value out.
predictions[idx] = model.predict(sml).item()
coords[k, 0] = prop.item()
coords[k, 1] = predictions[idx]
# k = k + 1;
model = model.train()
mse = np.mean((coords[:, 1] - coords[:, 0]) ** 2)
corr = np.corrcoef(coords[:, 1], coords[:, 0])[0, 1]
print("MSE: " + str(mse))
print("Corr: " + str(corr))
scores = []
scores.append(mse)
scores.append(corr)
# TODO do reconstruction test
if vis is not None:
vis.plot_scatter_gt_predictions(
coords, f"{mse:.2f}" + "-r: " + f"{corr:.2f}", ""
)
return scores, coords
# ------------ LATENT SPACE ROUTINES ------------
from numpy.random import choice
from rdkit import DataStructs
from rdkit.Chem import AllChem
def get_neighbor_along_direction_tree(sample_latent, direction, step_size):
"""
Direction should be normalized
Direction is in tree space
"""
tree_vec, mol_vec = torch.chunk(sample_latent, 2, dim=1)
new_tree_vec = tree_vec + (direction * step_size)
new_sample = torch.cat([new_tree_vec, mol_vec], dim=1)
return new_sample
def get_neighbor_along_direction_graph(sample_latent, direction, step_size):
"""
Direction should be normalized
"""
tree_vec, mol_vec = torch.chunk(sample_latent, 2, dim=1)
# update graph
new_mol_vec = mol_vec + (
direction * step_size
) # maybe the step size will have to be different?
new_sample = torch.cat([tree_vec, new_mol_vec], dim=1)
return new_sample
def get_neighbors_along_directions_tree_then_graph(
model,
smiles,
directions,
scale_factors,
direction_graph,
scale_factor_graph,
n_neighbors=10,
val_to_beat=-2,
max_cosine_distance=1.6,
direction_graph_plus=None,
convert_to_pac50=False,
):
sample_latent = model.embed(smiles)
n_directions = len(directions)
new_samples = []
int_step_sizes = np.arange(-n_neighbors, n_neighbors + 1, 1)
idx = int_step_sizes == 0
int_step_sizes = np.delete(int_step_sizes, np.where(idx)[0][0])
actual_n_neighbors = len(int_step_sizes)
# dynamic range (this adds a loot of additional samples ... just takes longer)
step_sizes_graph = np.arange(-n_neighbors, n_neighbors + 1, 1)
step_sizes_graph = step_sizes_graph * scale_factor_graph
# fixed range (original implementation)
step_sizes_graph_original = np.arange(-1, 2, 1)
step_sizes_graph_original = (
step_sizes_graph_original * 0.5
) # so here the step size is also fixed!
step_sizes_graph = np.concatenate(
(step_sizes_graph, step_sizes_graph_original), axis=None
)
actual_n_neighbors_graph = len(step_sizes_graph)
# this is pretty quick, as it's just arimethic operations in latent space
# todo: since cosine similarity in latent space correlates to an extent with
# chemical similarity, we could further reduce the number of evaluations based on that
cos = nn.CosineSimilarity(dim=1)
for k in range(n_directions): # iterate over axes
step_sizes = int_step_sizes * scale_factors[k]
for i in range(actual_n_neighbors): # iterate over steps along axis
sample = get_neighbor_along_direction_tree(
sample_latent, directions[k], step_sizes[i]
) # tree sample
for j in range(actual_n_neighbors_graph): # iterate along graph axis
graph_sample = get_neighbor_along_direction_graph(
sample, direction_graph, step_sizes_graph[j]
)
# check cosine
cdistance = 1 - cos(sample_latent, graph_sample)
if cdistance.item() < max_cosine_distance:
new_samples.append(graph_sample)
# additional direction
if direction_graph_plus is not None:
graph_sample = get_neighbor_along_direction_graph(
sample, direction_graph_plus, step_sizes_graph[j]
)
# check cosine
cdistance = 1 - cos(sample_latent, graph_sample)
if cdistance.item() < max_cosine_distance:
new_samples.append(graph_sample)
# predict activity and decode samples (probably should be another function, also because this happens ALL the time)
new_smiles, new_activities, new_samples = predict_and_decode_strict(
model, new_samples, val_to_beat, convert_to_pac50
)
return (
new_samples,
new_smiles,
new_activities,
sample_latent.squeeze().cpu().detach().numpy(),
)
# I guess the min val should be informed also relative to the MSE of the model
#
def predict_and_decode_strict(model, new_samples, min_val, convert_to_pac50=False):
n_samples = len(new_samples)
new_smiles = []
new_activities = []
my_bar = None
filtered_samples = []
try:
import streamlit as st
st.write("Decoding progress")
my_bar = st.progress(0)
except ImportError:
pass
for i in range(n_samples):
if my_bar is not None:
my_bar.progress((i + 1) / n_samples)
print_status(i, n_samples)
prediction = (
model.propNN(new_samples[i]).squeeze().cpu().detach().numpy()
) # compute the activity predictions
if convert_to_pac50:
prediction = (prediction - 6) * -1
# HIGHER IS BETTER
prediction_condition = prediction > min_val
if prediction_condition:
new_activities.append(prediction)
tree_vec, mol_vec = torch.chunk(new_samples[i], 2, dim=1)
more_smiles = model.decode(tree_vec, mol_vec, prob_decode=False)
new_smiles.append(more_smiles)
new_samples[i] = new_samples[i].squeeze().cpu().detach().numpy()
filtered_samples.append(new_samples[i])
return new_smiles, new_activities, filtered_samples
def predict_and_decode(model, new_samples, show_st=False):
n_samples = len(new_samples)
new_smiles = []
new_activities = []
my_bar = None
if show_st:
try:
import streamlit as st
st.write("Decoding progress")
my_bar = st.progress(0)
except ImportError:
pass
for i in range(n_samples):
if my_bar is not None:
my_bar.progress((i + 1) / n_samples)
print_status(i, n_samples)
prediction = (
model.propNN(new_samples[i]).squeeze().cpu().detach().numpy()
) # compute the activity predictions
new_activities.append(prediction)
tree_vec, mol_vec = torch.chunk(new_samples[i], 2, dim=1)
more_smiles = model.decode(tree_vec, mol_vec, prob_decode=False)
new_smiles.append(more_smiles)
new_samples[i] = new_samples[i].squeeze().cpu().detach().numpy()
return new_smiles, new_activities
def sample_gaussian(mean, sigma, n_samples):
center = mean
covariance = sigma
m = torch.distributions.MultivariateNormal(center, covariance)
samples = []
for i in range(n_samples):
samples.append(m.sample())
samples = torch.stack(samples)
return samples
def sample_gaussian_and_predict(model, n_samples, mean, sigma):
dim = int(model.latent_size)
center = mean
covariance = sigma
m = torch.distributions.MultivariateNormal(center, covariance)
samples = []
for i in range(n_samples):
samples.append(m.sample())
samples = torch.stack(samples)
cur_vec = create_var(samples.data, False)
predictions = model.propNN(cur_vec).squeeze()
vectors = cur_vec.cpu().detach().numpy()
predictions = predictions.cpu().detach().numpy()
return vectors, predictions
def get_embeddings(model, toxdata):
k = 0
n_molecules = len(toxdata)
vectors = {}
for idx in toxdata.smiles.index:
print_status(k, n_molecules)
sml = toxdata.smiles.loc[idx]
vectors[idx] = model.embed(sml).cpu().detach().numpy().ravel()
k = k + 1
return vectors
from rdkit import DataStructs
from rdkit.Chem import AllChem
from sklearn.metrics.pairwise import cosine_similarity
def sample_latent_space(model, latent, n_samples=2000, decode=False):
mu = torch.from_numpy(np.mean(latent).values).float()
sigma = torch.from_numpy(np.cov(latent.values.transpose())).float()
return sample_latent_space_pass_normal(model, mu, sigma, n_samples, decode)
def sample_latent_space_pass_normal(model, mu, sigma, n_samples=2000, decode=False):
samples, samples_predictions = model.sample_gaussian_and_predict(
n_samples, mu, sigma
) # this is fast
samples = samples.astype("float64")
samples_predictions = samples_predictions.astype("float64")
# dim = int(model_params["latent_size"] / 2)
dim = int(model.latent_size / 2)
tree_vec = create_var(torch.from_numpy(samples[:, 0:dim]).float())
mol_vec = create_var(torch.from_numpy(samples[:, dim : dim * 2]).float())
samples_decoded = []
if decode:
for i in range(n_samples):
print_status(i, n_samples)
samples_decoded.append(
model.decode(
tree_vec[i, :].reshape(1, -1),
mol_vec[i, :].reshape(1, -1),
prob_decode=False,
)
) # this is slow
samples_decoded_df = pd.DataFrame(data=samples_decoded)
samples_decoded_df.columns = ["smiles"]
else:
samples_decoded_df = None
return samples, samples_predictions, samples_decoded_df
# ------------ MISC ROUTINES ------------
def print_status(i, maxSteps):
percent = "0.00"
percentage = (float(i) / float(maxSteps)) * 100
divisor = 5
if i % divisor == 0:
sys.stdout.write("Progress: %d%% \r" % (percentage))
sys.stdout.flush()
# ------------ DISTANCES ROUTINES ------------
def normalize_morgans(morgans):
morgans_normalized = {}
for key in morgans.keys():
fp = morgans[key]
fp_array = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fp, fp_array)
morgans_normalized[key] = normalize_to_unity(fp_array)
return morgans_normalized
def normalize_to_unity(fp):
if np.sum(fp) == 0:
print("invalid fp")
return fp
else:
return fp / np.sum(fp)
import cadd.sascorer as sascorer
import networkx as nx
# ------------ CHEMISTRY ROUTINES ------------
from rdkit.Chem import Descriptors, rdmolops
from rdkit.Chem.Descriptors import ExactMolWt
def get_cycle_score(mol):
cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(mol)))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
current_cycle_score = cycle_length
return current_cycle_score
import cadd.sascorer as sascorer
# toxdata should include a mols value
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem.Descriptors import ExactMolWt
NumHDonors = lambda x: rdMolDescriptors.CalcNumHBD(x)
NumHAcceptors = lambda x: rdMolDescriptors.CalcNumHBA(x)
from rdkit.Chem import Descriptors
TPSA = lambda x: Descriptors.TPSA(x)
def compute_properties(toxdata):
n_molecules = len(toxdata)
k = 0
mw = {}
na = {}
log_p = {}
sas = {}
cycle_scores = {}
# more properties
nhdon= {}
nhacc = {}
tpsa = {}
for idx in toxdata.index:
print_status(k, n_molecules)
mol = toxdata.loc[idx].mols
try:
mw[idx] = ExactMolWt(mol)
log_p[idx] = Descriptors.MolLogP(mol)
sas[idx] = sascorer.calculateScore(mol)
cycle_scores[idx] = get_cycle_score(mol)
na[idx] = mol.GetNumAtoms()
nhdon[idx] = NumHDonors(mol)
nhacc[idx] = NumHAcceptors(mol)
tpsa[idx] = TPSA(mol)
except:
print("[DEBUG] Error computing properties")
mw[idx] = np.nan
log_p[idx] = np.nan
sas[idx] = np.nan
cycle_scores[idx] = np.nan
na[idx] = np.nan
nhdon[idx] = np.nan
nhacc[idx] = np.nan
tpsa[idx] = np.nan
continue
k = k + 1
props = [
pd.DataFrame.from_dict(mw, orient="index"),
pd.DataFrame.from_dict(log_p, orient="index"),
pd.DataFrame.from_dict(sas, orient="index"),
pd.DataFrame.from_dict(cycle_scores, orient="index"),
pd.DataFrame.from_dict(na, orient="index"),
pd.DataFrame.from_dict(nhdon, orient="index"),
pd.DataFrame.from_dict(nhacc, orient="index"),
pd.DataFrame.from_dict(tpsa, orient="index"),
]
props_df = pd.concat(props, axis=1)
props_df.columns = ["mw", "log_p", "sas", "cycle_scores", "n_atoms","HBD",
"HBA",
"TPSA"]
toxdata_props = | pd.merge(toxdata, props_df, left_index=True, right_index=True) | pandas.merge |
import logging
import numpy as np
import pandas as pd
from icubam.analytics import dataset
SPREAD_CUM_JUMPS_MAX_JUMP = {
"n_covid_deaths": 10,
"n_covid_transfered": 10,
"n_covid_refused": 10,
"n_covid_healed": 10,
}
def format_data(d: pd.DataFrame) -> pd.DataFrame:
d["datetime"] = pd.to_datetime(d["create_date"])
d["date"] = d["datetime"].dt.date
d["department"] = d["icu_dept"]
d["region"] = d["icu_region_name"]
d["region_id"] = d["icu_region_id"]
d = d[dataset.ALL_COLUMNS]
return d
def preprocess_bedcounts(
d: pd.DataFrame,
spread_cum_jump_correction: bool = False,
max_date: bool = None,
) -> pd.DataFrame:
"""This will process the bedcounts data to make analysis easier.
There are five steps to the processing;
1) Run a low-pass filter over all timeseries to remove single spikes that
generally represent a data entry error.
2) Aggregate the timeseries into their closest T-min intervals (T=15).
This helps remove repeate updates, and takes the most recent update
for a T-min window, such that if there was a correction to bad data
that will be the only value for that time window.
3) Guarantee monotonicity on cumulative counts by replacing any decreasing
values in the timeseries with their previous count: x_t = max(x_t, x_{t+1}).
4) Imput missing data with two strategies: For holes > 3 days, impute data
by linearly interpolating between the two end-points of the missing set
Subsequently, guarantee that each ICU has data for the whole timeseries,
either by forward-propagating data at day t for impution, or setting to 0 for
days before the ICU started its data collection.
5) (Optional) Spread out sudden jumps in data that reflect onboardings or
change in reporting habit.
Args:
spread_cum_jump_correction : Whether to apply step 4) to the data.
max_date : Only return data up to this date.
"""
# Extract useful columns and recast date properly:
d = format_data(d)
d = d.fillna(0)
if "Mulhouse-Chir" in d.icu_name.unique():
d.loc[d.icu_name == "Mulhouse-Chir", "n_covid_healed"] = np.clip(
(
d.loc[d.icu_name == "Mulhouse-Chir", "n_covid_healed"] -
d.loc[d.icu_name == "Mulhouse-Chir", "n_covid_transfered"]
).values,
a_min=0,
a_max=None,
)
icu_to_first_input_date = dict(
d.groupby("icu_name")[["date"]].min().itertuples(name=None)
)
# Apply steps 1) 2) & 3)
d = aggregate_multiple_inputs(d, "15Min")
# Step 3)
d = fill_in_missing_days(d, "3D")
d = enforce_daily_values_for_all_icus(d)
# Step 4)
if spread_cum_jump_correction:
d = spread_cum_jumps(d, icu_to_first_input_date)
d = d[dataset.ALL_COLUMNS]
d = d.sort_values(by=["date", "icu_name"])
if max_date is not None:
logging.info("data loaded's max date will be %s (excluded)" % max_date)
d = d.loc[d.date < pd.to_datetime(max_date).date()]
return d
def aggregate_multiple_inputs(d, agg_time_delta="15Min"):
"""Aggregate the timeseries into time bins.
This will aggregate the timeseries into regular time intervals, and use the
most recent update prior to time t to populate the bin at time t.
"""
res_dfs = []
for icu_name, dg in d.groupby("icu_name"):
dg = dg.set_index("datetime")
dg = dg.sort_index()
td_diff = dg.index.to_series().diff(1)
mask = td_diff > pd.Timedelta(agg_time_delta)
mask = mask.shift(-1).fillna(True).astype(bool)
dg = dg.loc[mask]
# This will run low-pass filters to remove spurious outliers:
# Rolling median average, 5 points (for cumulative qtities):
# breakpoint()
for col in dataset.CUM_COLUMNS:
dg[col] = (
dg[col].rolling(5, center=True, min_periods=1).median().astype(int)
)
# Rolling median average, 3 points (for non-cumulative qtities):
for col in dataset.NCUM_COLUMNS:
dg[col] = dg[col].fillna(0)
dg[col] = (
dg[col].rolling(3, center=True, min_periods=1).median().astype(int)
)
# Force cumulative columns to be monotonic by bringing any decreases in
# the value up to their previous values i.e. x_t = max(x_t, x_{t-1}):
dg[dataset.CUM_COLUMNS
] = np.maximum.accumulate(dg[dataset.CUM_COLUMNS].values, axis=0)
res_dfs.append(dg.reset_index())
return pd.concat(res_dfs)
def fill_in_missing_days(d, time_delta_threshold="3D"):
"""Group the timeseries into days, and impute data linearly for holes
in the data superior to 3 days.
"""
res_dfs = []
for icu_name, dg in d.groupby("icu_name"):
dg = dg.sort_values(by=["datetime"])
time_delta = dg["datetime"].diff(1)
for i, td in enumerate(time_delta):
if td > pd.Timedelta(time_delta_threshold):
n_days = td // pd.Timedelta("1D")
val_init = dg.iloc[i - 1]
val_final = dg.iloc[i]
for added_day in range(n_days):
added_datetime = (val_init.datetime + pd.Timedelta("1D") * added_day)
added_date = val_init.date + pd.Timedelta("1D") * added_day
new_row = {
"datetime":
added_datetime,
"icu_name":
val_init.icu_name,
"date":
added_date,
"department":
val_init.department,
"n_covid_deaths":
np.round(
val_init.n_covid_deaths +
(val_final.n_covid_deaths - val_init.n_covid_deaths) *
added_day * 1.0 / n_days,
4,
),
"n_covid_healed":
np.round(
val_init.n_covid_healed +
(val_final.n_covid_healed - val_init.n_covid_healed) *
added_day * 1.0 / n_days,
4,
),
"n_covid_transfered":
np.round(
val_init.n_covid_transfered +
(val_final.n_covid_transfered - val_init.n_covid_transfered) *
added_day * 1.0 / n_days,
4,
),
"n_covid_refused":
np.round(
val_init.n_covid_refused +
(val_final.n_covid_refused - val_init.n_covid_refused) *
added_day * 1.0 / n_days,
4,
),
"n_covid_free":
np.round(
val_init.n_covid_free +
(val_final.n_covid_free - val_init.n_covid_free) * added_day *
1.0 / n_days,
4,
),
"n_ncovid_free":
np.round(
val_init.n_ncovid_free +
(val_final.n_ncovid_free - val_init.n_ncovid_free) * added_day *
1.0 / n_days,
4,
),
"n_covid_occ":
np.round(
val_init.n_covid_occ +
(val_final.n_covid_occ - val_init.n_covid_occ) * added_day *
1.0 / n_days,
4,
),
"n_ncovid_occ":
np.round(
val_init.n_ncovid_occ +
(val_final.n_ncovid_occ - val_init.n_ncovid_occ) * added_day *
1.0 / n_days,
4,
),
}
dg = dg.append(pd.Series(new_row), ignore_index=True)
dg = dg.sort_values(by=["datetime"])
res_dfs.append(dg)
return pd.concat(res_dfs)
def enforce_daily_values_for_all_icus(d):
"""Guarantee that each ICU has a continuous daily timeseries.
Each missing day in the series is imputed by forward-filling from
the most recent day with data.
"""
dates = np.sort(d.date.unique())
def reindex_icu(x):
# Process data for an ICU.
# For repeated entries per day, only keep the last entry.
# This is necessary as we cannot re-index indexes with duplicates.
x = x.sort_values('datetime').drop_duplicates(['date'], keep='last')
# forward fill all missing values
x = x.set_index(['date']).reindex(dates, method='ffill').reset_index()
# backward fill categorical variables (that don't change with time)
cat_columns = ['icu_name', 'department', 'region', 'region_id']
x[cat_columns] = x[cat_columns].fillna(method='bfill')
# Set all other variables to 0 before first observation
int_columns = dataset.CUM_COLUMNS + dataset.NCUM_COLUMNS
x[int_columns] = x[int_columns].fillna(0)
# Leave all unknown variables as NaN
return x
df = d.groupby('icu_name').apply(reindex_icu)
# Reproduce behaviour of earlier versions of this function
df['datetime'] = df['date']
df['create_date'] = df['date']
return df.reset_index(drop=True)
def spread_cum_jumps(d, icu_to_first_input_date):
assert np.all(d.date.values == d.datetime.values)
# TODO: do not hardcode this value
date_begin_transfered_refused = pd.to_datetime("2020-03-25").date()
dfs = []
for icu_name, dg in d.groupby("icu_name"):
dg = dg.sort_values(by="date")
dg = dg.reset_index()
already_fixed_col = set()
for switch_point, cols in (
(icu_to_first_input_date[icu_name], dataset.CUM_COLUMNS),
(
date_begin_transfered_refused,
["n_covid_transfered", "n_covid_refused"],
),
):
beg = max(
dg.date.min(),
switch_point - pd.Timedelta("2D"),
)
end = min(
dg.date.max(),
switch_point + pd.Timedelta("2D"),
)
for col in cols:
if col in already_fixed_col:
continue
beg_val = dg.loc[dg.date == beg, col]
if not len(beg_val):
continue
beg_val = beg_val.values[0]
end_val = dg.loc[dg.date == end, col]
if not len(end_val):
continue
end_val = end_val.values[0]
diff = end_val - beg_val
if diff >= SPREAD_CUM_JUMPS_MAX_JUMP[col]:
spread_beg = dg.date.min()
spread_end = end
spread_range = pd.date_range(spread_beg, spread_end, freq="1D").date
spread_value = diff // (spread_end - spread_beg).days
remaining = diff % (spread_end - spread_beg).days
dg.loc[dg.date.isin(spread_range), col] = np.clip(
np.cumsum(np.repeat(spread_value, len(spread_range))),
a_min=0,
a_max=end_val,
)
dg.loc[dg.date == end, col] = np.clip(
dg.loc[dg.date == end, col].values[0] + remaining,
a_min=0,
a_max=end_val,
)
already_fixed_col.add(col)
dfs.append(dg)
return | pd.concat(dfs) | pandas.concat |
from scipy.ndimage.filters import gaussian_filter1d
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from functools import reduce
import sys
import seaborn as sns
import pytz
class ModelCompare:
"""
functions used to compare models
** error computations
** error plots
"""
def __init__(self,models_predictions,true_values,user_time_index=None,segments_index=None):
"""
models_predictions is a dictionary model_name: predictiond dataframe
true_values is the ground truth dataframe
user_time_index: array/list of datetime like objects (default None)
the filter on predicted hours
segments_index : array/list of str (default None)
the filter on predicted sections
"""
# fontsize on plots
self.fontsize= 10
# list of markers used on plots
self.markers=dict(zip(models_predictions.keys(),[
'->',
':o',
':v',
':^',
':<',
':s',
':p',
':*',
':h',
':H',
':+',
':x',
':D',
':d',
':1',
':2',
':3',
':4',
':|',
':_'
]))
self.models_predictions = models_predictions
self.true_values = true_values
self.segments_index=segments_index
# filtering sections
if not segments_index is None :
self.true_values=self.true_values.loc[segments_index].copy()
for model_name, model_data in self.models_predictions.items() :
self.models_predictions[model_name]=self.models_predictions[model_name].loc[segments_index].copy()
# match time indexes of all models
if not self.compatibility() :
print("check data shape",file=sys.stderr)
print("shape used is the intersection of all columns")
self.time_index = reduce(np.intersect1d, [df.columns for df in self.models_predictions.values()])
self.time_index = np.intersect1d(self.time_index,self.true_values.columns)
self.time_index = pd.to_datetime(self.time_index).tz_localize("utc").tz_convert(pytz.timezone("Europe/Paris"))
print(self.time_index.shape)
# filtering time index
if not user_time_index is None :
self.time_index = user_time_index
def compatibility(self):
"""
test for time index compatibility between models and true values
"""
true_shape = self.true_values.shape
compatible = True
for model_name, model_data in self.models_predictions.items() :
if true_shape != model_data.shape :
print(model_name +" has different shape from true values, shapes : model "+str(model_data.shape)+" true shape "+str(true_shape),file=sys.stderr)
compatible=False
return compatible
def plotDiscreteSpeedError(self,intercept =0,xlabel=""):
"""
plots average absolute error per discrete speed
intercept : int or dataframe(same shape as ground truth)
whether to restore intercept before plotting
xlabel : str
"""
if not type(intercept) is int :
intercept = intercept[self.time_index]
if not self.segments_index is None :
intercept = intercept.loc[self.segments_index].copy()
for model_name, model_data in self.models_predictions.items() :
error = abs((model_data[self.time_index] -self.true_values[self.time_index]).values.flatten().round())
true_y=(self.true_values[self.time_index] + intercept).values.flatten().round()
arsort=true_y.argsort()
error = error[arsort]
true_y = true_y[arsort]
y_idx=np.unique(true_y,return_index=True)[0]
split_idx = np.unique(true_y,return_index=True)[1][1:]
y_mean_error=np.fromiter([np.mean(x) for x in np.split(error ,split_idx)],dtype=float)
plt.plot(y_idx,y_mean_error,self.markers[model_name],label=model_name)
plt.xlabel(xlabel,fontsize=self.fontsize)
plt.ylabel("mean absolute error",fontsize=self.fontsize)
plt.legend(loc=2)
plt.twinx(plt.gca())
(self.true_values[self.time_index] + intercept).round().stack().value_counts().sort_index().plot(label="counts",style='k:',grid=False)
plt.ylabel("counts",fontsize=self.fontsize);
plt.legend(loc=1)
def comparisonTable(self):
"""
compute MSE and MAE error for all models
return dataframe of error for each model
"""
results =[]
for model_name, model_data in self.models_predictions.items() :
preds = model_data[self.time_index]
true = self.true_values[self.time_index]
results.append({
'model_name':model_name,
'mse':self.mse(preds.values.flatten(),true.values.flatten()),
'mae':self.mae(preds.values.flatten(),true.values.flatten())
}
)
return pd.DataFrame(results).set_index('model_name')
def plotTimeError(self):
"""
plot error for each time period
"""
for model_name, model_data in self.models_predictions.items() :
preds = model_data[self.time_index]
true = self.true_values[self.time_index]
error = abs(preds - true).groupby( | pd.to_datetime(self.time_index) | pandas.to_datetime |
# Copyright 2021 Prayas Energy Group(https://www.prayaspune.org/peg/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from rumi.io import filemanager
from rumi.io import config
from rumi.io import common
from rumi.io import constant
from rumi.io import loaders
from rumi.io import utilities
import logging
import os
import functools
import numpy as np
import itertools
import math
logger = logging.getLogger(__name__)
def load_param(param_name, subfolder):
"""Loader function to be used by yaml framework. do not use this
directly.
"""
filepath = filemanager.find_filepath(param_name, subfolder)
logger.debug(f"Reading {param_name} from file {filepath}")
df = loaders.read_csv(param_name, filepath)
return df
def get_filtered_parameter(param_name):
"""Returns supply parameter at balancing time and balancing area.
This function will do necessary collapsing and expansion of
parameter data. It will do this operation on all float64 columns.
other columns will be treated as categorical.
:param: param_name
:returns: DataFrame
"""
param_data_ = loaders.get_parameter(param_name)
if not isinstance(param_data_, pd.DataFrame) and param_data_ == None:
return param_data_
original_order = [c for c in param_data_.columns]
param_data = utilities.filter_empty(param_data_) # for test data
specs = filemanager.supply_specs()
if param_name in specs:
param_specs = specs[param_name]
folder = param_specs.get("nested")
geographic = param_specs.get("geographic")
time = param_specs.get("time")
if geographic:
param_data = filter_on_geography(
param_data, geographic, folder)
if time:
param_data = filter_on_time(param_data, time, folder)
param_data = preserve_column_order(
param_data, original_order)
return param_data.fillna("")
def preserve_column_order(dataframe, original_order):
class DummyDFColumns:
"""A class to simulate df.columns from pa.DataFrame
"""
def __init__(self, cols):
self.columns = list(cols)
def indexof_geo(oldcols):
subset_cols = utilities.get_geographic_columns_from_dataframe(
oldcols)
return oldcols.columns.index(subset_cols[-1])+1
def indexof_time(oldcols):
subset_cols = utilities.get_time_columns_from_dataframe(oldcols)
return oldcols.columns.index(subset_cols[-1])+1
def extra_geo(dataframe, oldcols):
geo = utilities.get_geographic_columns_from_dataframe(dataframe)
return [c for c in geo if c not in oldcols.columns]
def extra_time(dataframe, oldcols):
time = utilities.get_time_columns_from_dataframe(dataframe)
return [c for c in time if c not in oldcols.columns]
def new_order(dataframe, oldcols):
cols = [c for c in oldcols]
oldcols_ = DummyDFColumns(cols)
if utilities.get_geographic_columns_from_dataframe(oldcols_):
for i, c in enumerate(extra_geo(dataframe, oldcols_),
start=indexof_geo(oldcols_)):
cols.insert(i, c)
oldcols_ = DummyDFColumns(cols)
if utilities.get_time_columns_from_dataframe(oldcols_):
for i, c in enumerate(extra_time(dataframe, oldcols_),
start=indexof_time(oldcols_)):
cols.insert(i, c)
return cols
return dataframe.reindex(columns=new_order(dataframe, original_order))
def filter_empty_columns(data, filtercols):
rows = len(data)
empty = [c for c in filtercols if data[c].isnull(
).sum() == rows or (data[c] == "").sum() == rows]
return data[[c for c in data.columns if c not in empty]]
def filter_empty_geography(data):
"""filter out empty geographic columns"""
return filter_empty_columns(data,
utilities.get_geographic_columns_from_dataframe(data))
def filter_empty_time(data):
"""filter out empty time columns"""
return filter_empty_columns(data,
utilities.get_time_columns_from_dataframe(data))
def finest_geography_from_balancing(entities):
g = [common.get_geographic_columns(
common.balancing_area(e)) for e in entities]
return max(g, key=len)
@functools.lru_cache(maxsize=1)
def get_all_carriers():
carrriers = ["PhysicalPrimaryCarriers",
"PhysicalDerivedCarriers", "NonPhysicalDerivedCarriers"]
allcarriers = []
for carrrier in carrriers:
allcarriers.extend(
list(loaders.get_parameter(carrrier)['EnergyCarrier']))
return allcarriers
def finest_time_from_balancing(entities):
t = [common.get_time_columns(common.balancing_time(e)) for e in entities]
return max(t, key=len)
@functools.lru_cache(maxsize=16)
def find_EC(entity, value):
if entity == 'EnergyCarrier':
return value
elif entity == 'EnergyConvTech':
EnergyConvTechnologies = loaders.get_parameter(
'EnergyConvTechnologies')
ect = EnergyConvTechnologies.set_index('EnergyConvTech')
return ect.loc[value]['OutputDEC']
else:
EnergyStorTechnologies = loaders.get_parameter(
'EnergyStorTechnologies')
est = EnergyStorTechnologies.set_index('EnergyStorTech')
return est.loc[value]['StoredEC']
def get_entity_type(folder):
if folder == "Carriers":
return 'EnergyCarrier'
elif folder == "Storage":
return 'EnergyStorTech'
else:
return 'EnergyConvTech'
def filter_on_time(data, granularity, folder):
"""granularity is either 'fine' or 'coarse' and folder is one of 'Carriers',
'Technologies', 'Storage'
"""
entity = get_entity_type(folder)
entities = get_all_carriers()
timecols = finest_time_from_balancing(entities)
dfs = []
if granularity == "fine":
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_time(data.query(q))
balancing_time = common.balancing_time(find_EC(entity, item))
d = group_by_time(d, balancing_time, timecols)
dfs.append(d)
else:
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_time(data.query(q))
balancing_time = common.balancing_time(find_EC(entity, item))
d = expand_by_time(d, entity, balancing_time, timecols)
dfs.append(d)
return pd.concat(dfs).reset_index(drop=True)
def get_nontime_columns(d):
return [c for c in d.columns if (not pd.api.types.is_float_dtype(d[c])) and c not in constant.TIME_SLICES]
def group_by_time(d, balancing_time, superset_cols):
timecols_ = common.get_time_columns(balancing_time)
othercols = get_nontime_columns(d)
d = utilities.groupby_time(d.fillna(""), othercols, balancing_time).copy()
rows = len(d)
diff = [c for c in superset_cols if c not in timecols_]
for c in diff:
d[c] = | pd.Series([""]*rows, dtype=str, name=c) | pandas.Series |
"""
main module used for running the inference on simple network sim
"""
from __future__ import annotations
import datetime as dt
import logging.config
import sys
import time
from abc import ABC, abstractmethod
from typing import Tuple, List, Dict, Type, ClassVar, Union
import numpy as np
import pandas as pd
import scipy.stats as stats
from data_pipeline_api import standard_api
from more_itertools import pairwise
from . import loaders, common
from . import network_of_populations as ss
from . import sampleUseOfModel as sm
sys.path.append('..')
logger = logging.getLogger(__name__)
def uniform_pdf(
x: Union[float, np.array],
a: Union[float, np.array],
b: Union[float, np.array]
) -> Union[float, np.array]:
"""pdf function for uniform distribution
:param x: value at which to evaluate the pdf
:param a: lower bound of the distribution
:param b: upper bound of the distribution
"""
return ((a <= x) & (x <= b)) / (b - a)
def lognormal(mean: float, stddev: float, stddev_min: float = -np.inf) -> stats.rv_continuous:
"""Constructs Scipy lognormal object to match a given mean and std
dev passed as input. The parameters to input in the model are inverted
from the formulas:
.. math::
if X~LogNormal(mu, scale)
then:
E[X] = exp{mu + sigma^2 * 0.5}
Var[X] = (exp{sigma^2} - 1) * exp{2 * mu + sigma^2}
The stddev is taken as a % of the mean, floored at 10. This
allows natural scaling with the size of the population inside the
nodes, always allowing for a minimal uncertainty.
:param mean: Mean to match
:param stddev: Std dev to match
:param stddev_min: Minimum std dev to match
:return: Distribution object representing a lognormal distribution with
the given mean and std dev
"""
stddev = np.maximum(mean * stddev, stddev_min)
sigma = np.sqrt(np.log(1 + (stddev**2 / mean**2)))
mu = np.log(mean / np.sqrt(1 + (stddev**2 / mean**2)))
return stats.lognorm(s=sigma, loc=0., scale=np.exp(mu))
def split_dataframe(multipliers, partitions, col="Contact_Multiplier"):
df = multipliers.copy()
df.Date = | pd.to_datetime(df.Date) | pandas.to_datetime |
import unittest
import pandas as pd
import numpy as np
import datetime
import pytz
from variable_explorer_helpers import describe_pd_dataframe
class TestDataframeDescribe(unittest.TestCase):
def test_dataframe(self):
df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
result = describe_pd_dataframe(df)
self.assertEqual(result['row_count'], 2)
self.assertEqual(result['column_count'], 2)
self.assertEqual(len(result['rows_top']), 2)
self.assertEqual(result['rows_bottom'], None)
self.assertEqual(result['columns'][0]['name'], 'col1')
def test_dataframe_sort(self):
df = pd.DataFrame(data={'col1': [3, 1, 2]})
result = describe_pd_dataframe(df.sort_values('col1'))
self.assertEqual(result['rows_top'][0]['col1'], 1)
self.assertEqual(result['rows_top'][1]['col1'], 2)
self.assertEqual(result['rows_top'][2]['col1'], 3)
# _deepnote_index_column is hidden on frontend. See variable_explorer_helpers for more info.
self.assertEqual(result['rows_top'][0]['_deepnote_index_column'], 1)
# TODO: Support non-hashable types like []
def test_categorical_columns(self):
df = pd.DataFrame(data={
'cat1': ['a', 'b', 'c', 'd'],
'cat2': ['a', 'b', None, 'd'],
# 'cat3': [1, (2,3), '4', []],
'cat3': [1, (2,3), '4', 5],
'cat4': [True, True, True, False],
})
result = describe_pd_dataframe(df)
self.assertEqual(result['row_count'], 4)
self.assertEqual(result['column_count'], 4)
self.assertEqual(len(result['rows_top']), 4)
self.assertEqual(result['rows_bottom'], None)
self.assertDictEqual(result['columns'][0], {
'name': 'cat1',
'dtype': 'object',
'stats': {
'unique_count': 4,
'nan_count': 0,
'categories': [
{'name': 'a', 'count': 1},
{'name': 'b', 'count': 1},
{'name': '2 others', 'count': 2},
]
},
})
self.assertEqual(result['columns'][1]['stats']['categories'], [
{'name': 'a', 'count': 1},
{'name': '2 others', 'count': 2},
{'name': 'Missing', 'count': 1},
])
# TODO: Support for big ints which can't be converted to float64 and complex numbers
def test_numerical_columns(self):
df = pd.DataFrame(data={
'col1': [1, 2, 3, 4],
'col2': [1, 2, None, 4],
# 'col3': [1, 2.1, complex(-1.0, 0.0), 10**1000]
'col3': [1, 2.1, 3, 4]
})
result = describe_pd_dataframe(df)
self.assertEqual(result['row_count'], 4)
self.assertEqual(result['column_count'], 3)
self.assertEqual(len(result['rows_top']), 4)
self.assertEqual(result['rows_bottom'], None)
self.assertEqual(result['columns'][0]['name'], 'col1')
def test_big_dataframe(self):
import numpy as np
df = pd.DataFrame(data={
'col1': np.arange(100000),
'col2': np.arange(100000),
'col3': np.arange(100000),
})
result = describe_pd_dataframe(df)
self.assertEqual(result['row_count'], 100000)
self.assertEqual(result['column_count'], 3)
self.assertEqual(len(result['rows_top']), 166)
self.assertEqual(len(result['rows_bottom']), 167)
self.assertTrue('stats' in result['columns'][0])
self.assertTrue('stats' not in result['columns'][1])
df = pd.DataFrame(data={
'col1': np.arange(200000),
'col2': np.arange(200000),
'col3': np.arange(200000),
})
result = describe_pd_dataframe(df)
self.assertTrue('stats' not in result['columns'][0])
def test_no_rows(self):
df = pd.DataFrame(data={
'col1': [],
'col2': [],
})
result = describe_pd_dataframe(df)
self.assertEqual(result['row_count'], 0)
self.assertEqual(result['column_count'], 2)
def test_no_columns(self):
df = pd.DataFrame(data={})
result = describe_pd_dataframe(df)
self.assertEqual(result['row_count'], 0)
self.assertEqual(result['column_count'], 0)
def test_duplicate_columns(self):
df = pd.DataFrame(data={
'col1': ['a', 'b', 'c', 'd'],
'col2': [1, 2, 3, 4],
})
df.columns = ['col1', 'col1']
result = describe_pd_dataframe(df)
self.assertEqual(result['row_count'], 4)
self.assertEqual(result['column_count'], 2)
self.assertEqual(result['columns'][0]['name'], 'col1')
self.assertEqual(result['columns'][1]['name'], 'col1.1')
def test_nans(self):
df = pd.DataFrame(data={
'col1': [None, None, None],
})
result = describe_pd_dataframe(df)
self.assertEqual(result['row_count'], 3)
self.assertEqual(result['column_count'], 1)
self.assertEqual(result['columns'][0]['stats'], {
'unique_count': 0,
'nan_count': 3,
'categories': [
{'name': 'Missing', 'count': 3},
]
})
def test_datetime(self):
df1 = pd.DataFrame(data={
'col1': [1,2],
'col2': [datetime.date(2000,1,1), datetime.time(10,30)],
'col3': [datetime.datetime.now().astimezone(pytz.timezone('UTC')), datetime.datetime.now().astimezone(None)]
})
result1 = describe_pd_dataframe(df1)
self.assertEqual(result1['row_count'], 2)
self.assertEqual(result1['column_count'], 3)
df2 = pd.DataFrame(np.random.randn(2, 3), index= | pd.date_range('1/1/2000', periods=2) | pandas.date_range |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = pd.date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
ser = Series(10, index=dti)
ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
assert ser.index is dti
assert ser_utc.index is dti_utc
def test_arithmetic_with_duplicate_index(self):
# GH#8363
# integer ops with a non-unique index
index = [2, 2, 3, 3, 4]
ser = Series(np.arange(1, 6, dtype="int64"), index=index)
other = Series(np.arange(5, dtype="int64"), index=index)
result = ser - other
expected = Series(1, index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# GH#8363
# datetime ops with a non-unique index
ser = Series(date_range("20130101 09:00:00", periods=5), index=index)
other = Series(date_range("20130101", periods=5), index=index)
result = ser - other
expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
def test_comparison_flex_basic(self, axis, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
result = getattr(left, op)(right, axis=axis)
expected = getattr(operator, op)(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_bad_axis(self, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
@pytest.mark.parametrize(
"values, op",
[
([False, False, True, False], "eq"),
([True, True, False, True], "ne"),
([False, False, True, False], "le"),
([False, False, False, False], "lt"),
([False, True, True, False], "ge"),
([False, True, False, False], "gt"),
],
)
def test_comparison_flex_alignment(self, values, op):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values, op, fill_value",
[
([False, False, True, True], "eq", 2),
([True, True, False, False], "ne", 2),
([False, False, True, True], "le", 0),
([False, False, False, True], "lt", 0),
([True, True, True, False], "ge", 0),
([True, True, False, False], "gt", 0),
],
)
def test_comparison_flex_alignment_fill(self, values, op, fill_value):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right, fill_value=fill_value)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# interval dtype
if op in [operator.eq, operator.ne]:
# interval dtype comparisons not yet implemented
ii = pd.interval_range(start=0, periods=5, name=names[0])
ser = Series(ii).rename(names[1])
result = op(ser, ii)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype("category")
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid="ignore"):
expected = (left > right).astype("O")
expected[:3] = np.nan
tm.assert_almost_equal(result, expected)
s = Series(["a", "b", "c"])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
# -----------------------------------------------------------------
# Categorical Dtype Comparisons
def test_categorical_comparisons(self):
# GH#8938
# allow equality comparisons
a = Series(list("abc"), dtype="category")
b = Series(list("abc"), dtype="object")
c = Series(["a", "b", "cc"], dtype="object")
d = Series(list("acb"), dtype="object")
e = Categorical(list("abc"))
f = Categorical(list("acb"))
# vs scalar
assert not (a == "a").all()
assert ((a != "a") == ~(a == "a")).all()
assert not ("a" == a).all()
assert (a == "a")[0]
assert ("a" == a)[0]
assert not ("a" != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert (~(a == e) == (a != e)).all()
assert (~(e == a) == (e != a)).all()
assert (~(a == f) == (a != f)).all()
assert (~(f == a) == (f != a)).all()
# non-equality is not comparable
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
a < b
with pytest.raises(TypeError, match=msg):
b < a
with pytest.raises(TypeError, match=msg):
a > b
with pytest.raises(TypeError, match=msg):
b > a
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError, match=msg):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
msg = "Invalid comparison between dtype=category and str"
with pytest.raises(TypeError, match=msg):
cat < "d"
with pytest.raises(TypeError, match=msg):
cat > "d"
with pytest.raises(TypeError, match=msg):
"d" < cat
with pytest.raises(TypeError, match=msg):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# -----------------------------------------------------------------
def test_comparison_tuples(self):
# GH#11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
tm.assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
#
# Copyright (c) Sinergise, 2019 -- 2021.
#
# This file belongs to subproject "field-delineation" of project NIVA (www.niva4cap.eu).
# All rights reserved.
#
# This source code is licensed under the MIT license found in the LICENSE
# file in the root directory of this source tree.
#
import gc
import logging
import os
import sys
from glob import glob
import fiona
import geopandas as gpd
import pandas as pd
from shapely.geometry import Polygon
from fd.utils import multiprocess
from fd.vectorisation import merge_intersecting, split_intersecting
logging.basicConfig(format='%(asctime)s | %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
def get_bounds(gdf):
with fiona.open(gdf) as src:
return Polygon.from_bounds(*src.bounds)
def _join_overlapping_gdfs(gdf1, gdf2):
assert gdf1.crs == gdf2.crs, f'The inputs [{gdf1}, {gdf2}] are not in the same CRS!'
bounds1 = Polygon.from_bounds(*list(gdf1.total_bounds))
bounds2 = Polygon.from_bounds(*list(gdf2.total_bounds))
overlap = bounds1.intersection(bounds2)
non_overlaps1, overlaps1 = split_intersecting(gdf1, overlap)
non_overlaps2, overlaps2 = split_intersecting(gdf2, overlap)
intersecting = merge_intersecting(overlaps1, overlaps2)
out = gpd.GeoDataFrame( | pd.concat([non_overlaps1, non_overlaps2, intersecting]) | pandas.concat |
import pandas as pd
from xml_reader import read_tweet_text, read_all
def get_users():
users = []
with open("data/truth.txt") as file_in:
for line in file_in:
array = line.split(":::")
user = array[0]
truth = int(array[1][0])
users.append([user, truth])
return users
def create_separated_set():
data = []
users = get_users()
for user in users:
read_all(data, user[0] + '.xml', user[1])
return data
def assemble_pandas(dataset, column):
dataset = | pd.DataFrame(dataset) | pandas.DataFrame |
from cProfile import label
from numpy import append
import pyaudio
import numpy
import warnings
from datetime import datetime
import tensorflow as tf
from pandas import DataFrame
def my_publish_callback(envelope, status):
if status.is_error():
... #handle error here
print(str(status.error_data.exception))
else:
print(envelope)
def get_noice_data():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
FORMAT = pyaudio.paFloat32
SAMPLEFREQ = 44100
FRAMESIZE = 1376
NOFFRAMES = 32
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,channels=1,rate=SAMPLEFREQ,input=True,frames_per_buffer=FRAMESIZE)
data = stream.read(NOFFRAMES*FRAMESIZE)
decoded = numpy.fromstring(data, 'float32')
return [decoded]
def append_to_excel(data):
df = | DataFrame([data]) | pandas.DataFrame |
__author__ = "<NAME>"
import pandas as pd
import numpy as np
from functools import partial
import pyproj
from shapely.ops import transform
from shapely.geometry import Point
import geopandas
import matplotlib.pyplot as plt
proj_wgs84 = pyproj.Proj(init='epsg:4326')
def geodesic_point_buffer(lat, lon, km):
# Azimuthal equidistant projection
aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0'
project = partial(
pyproj.transform,
pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)),
proj_wgs84)
buf = Point(0, 0).buffer(km * 1000) # distance in metres
return transform(project, buf).exterior.coords[:]
# Example
#b = geodesic_point_buffer(45.4, -75.7, 100.0)
#print(b)
path = "../data/localData/"
nc = pd.read_csv(path+"newCases.csv") # to gen run convertDataTo...
temp = pd.read_csv(path+"temperature.csv") # to gen run joinTempCsv...
# ---------- CALCULATING RADIUS FOR NEW CASES ----------
countries = nc["Unnamed: 0"]
latlong = | pd.DataFrame() | pandas.DataFrame |
"""
Prepare training and testing datasets as CSV dictionaries
Created on 10/30/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
def tile_ids_in(inp):
ids = []
try:
for id in os.listdir(inp['path']):
if '_{}.png'.format(str(inp['sldnum'])) in id:
ids.append([inp['slide'], inp['level'], inp['path']+'/'+id, inp['weight'], inp['percent_tumor_nuclei'],
inp['percent_total_cellularity'], inp['percent_necrosis'], inp['age'], inp['label']])
except FileNotFoundError:
print('Ignore:', inp['path'])
return ids
# Get all svs images with its label as one file; level is the tile resolution level
def big_image_sum(pmd, path='../tiles/', ref_file='../feature_summary.csv'):
ref = pd.read_csv(ref_file, header=0)
ref = ref.loc[ref['used_in_proteome'] == True]
ref = ref.rename(columns={pmd: 'label'})
ref = ref.dropna(subset=['label'])
ref['sldnum'] = ref['slide_id'].str.split("-", n=2, expand=True)[2]
ref = ref[['case_id', 'sldnum', 'weight', 'percent_tumor_nuclei', 'percent_total_cellularity', 'percent_necrosis',
'age', 'label']]
ref = ref.rename(columns={'case_id': 'slide'})
ref1 = ref.copy()
ref2 = ref.copy()
ref['level'] = 0
ref['path'] = path + ref['slide'] + "/level" + ref['level'].map(str)
ref1['level'] = 1
ref1['path'] = path + ref1['slide'] + "/level" + ref1['level'].map(str)
ref2['level'] = 2
ref2['path'] = path + ref2['slide'] + "/level" + ref2['level'].map(str)
datapd = pd.concat([ref, ref1, ref2])
datapd = datapd.astype({'weight': 'int64', 'percent_tumor_nuclei': 'int64', 'percent_total_cellularity': 'int64',
'percent_necrosis': 'int64', 'age': 'int64', 'level': 'int64', 'label': 'int64'})
return datapd
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cls, level=None, cut=0.3, batchsize=64):
trlist = []
telist = []
valist = []
if level:
alll = alll[alll['level'] == int(level)]
CPTAC = alll
for i in range(cls):
subset = CPTAC.loc[CPTAC['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut / 2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq) * cut / 2):int(len(unq) * cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
test = | pd.concat(telist) | pandas.concat |
"""Tests for time-related quality control functions."""
from datetime import datetime
import pytz
import pytest
import pandas as pd
from pandas.util.testing import assert_series_equal
from pvanalytics.quality import time
@pytest.fixture
def times():
"""One hour in Mountain Standard Time at 10 minute intervals.
Notes
-----
Copyright (c) 2019 SolarArbiter. See the file
LICENSES/SOLARFORECASTARBITER_LICENSE at the top level directory
of this distribution and at `<https://github.com/pvlib/
pvanalytics/blob/master/LICENSES/SOLARFORECASTARBITER_LICENSE>`_
for more information.
"""
MST = pytz.timezone('MST')
return pd.date_range(start=datetime(2018, 6, 15, 12, 0, 0, tzinfo=MST),
end=datetime(2018, 6, 15, 13, 0, 0, tzinfo=MST),
freq='10T')
def test_timestamp_spacing_date_range(times):
"""An index generated by pd.date_range has the expected spacing."""
assert_series_equal(
time.spacing(times, times.freq),
pd.Series(True, index=times)
)
def test_timestamp_spacing_one_timestamp(times):
"""An index with only one timestamp has uniform spacing."""
assert_series_equal(
time.spacing(times[[0]], times.freq),
pd.Series(True, index=[times[0]])
)
def test_timestamp_spacing_one_missing(times):
"""The timestamp following a missing timestamp will be marked False."""
assert_series_equal(
time.spacing(times[[0, 2, 3]], times.freq),
pd.Series([True, False, True], index=times[[0, 2, 3]])
)
def test_timestamp_spacing_too_frequent(times):
"""Timestamps with too high frequency will be marked False."""
assert_series_equal(
time.spacing(times, '30min'),
pd.Series([True] + [False] * (len(times) - 1), index=times)
)
def _get_sunrise(location, tz):
# Get sunrise times for 2020
days = pd.date_range(
start='1/1/2020',
end='1/1/2021',
freq='D',
tz=tz
)
return location.get_sun_rise_set_transit(
days, method='spa'
).sunrise
@pytest.mark.parametrize("tz, observes_dst", [('MST', False),
('America/Denver', True)])
def test_has_dst(tz, observes_dst, albuquerque):
sunrise = _get_sunrise(albuquerque, tz)
dst = time.has_dst(sunrise, 'America/Denver')
expected = pd.Series(False, index=sunrise.index)
expected.loc['2020-03-08'] = observes_dst
expected.loc['2020-11-01'] = observes_dst
assert_series_equal(
expected,
dst,
check_names=False
)
@pytest.mark.parametrize("tz, observes_dst", [('MST', False),
('America/Denver', True)])
def test_has_dst_input_series_not_localized(tz, observes_dst, albuquerque):
sunrise = _get_sunrise(albuquerque, tz)
sunrise = sunrise.tz_localize(None)
expected = pd.Series(False, index=sunrise.index)
expected.loc['2020-03-08'] = observes_dst
expected.loc['2020-11-01'] = observes_dst
dst = time.has_dst(sunrise, 'America/Denver')
assert_series_equal(
expected,
dst
)
@pytest.mark.parametrize("tz, observes_dst", [('MST', False),
('America/Denver', True)])
@pytest.mark.parametrize("freq", ['15T', '30T', 'H'])
def test_has_dst_rounded(tz, freq, observes_dst, albuquerque):
sunrise = _get_sunrise(albuquerque, tz)
# With rounding to 1-hour timestamps we need to reduce how many
# days we look at.
window = 7 if freq != 'H' else 1
expected = pd.Series(False, index=sunrise.index)
expected.loc['2020-03-08'] = observes_dst
expected.loc['2020-11-01'] = observes_dst
dst = time.has_dst(
sunrise.dt.round(freq),
'America/Denver',
window=window
)
assert_series_equal(expected, dst, check_names=False)
def test_has_dst_missing_data(albuquerque):
sunrise = _get_sunrise(albuquerque, 'America/Denver')
sunrise.loc['3/5/2020':'3/10/2020'] = pd.NaT
sunrise.loc['7/1/2020':'7/20/2020'] = pd.NaT
# Doesn't raise since both sides still have some data
expected = pd.Series(False, index=sunrise.index)
expected['3/8/2020'] = True
expected['11/1/2020'] = True
assert_series_equal(
time.has_dst(sunrise, 'America/Denver'),
expected
)
missing_all_before = sunrise.copy()
missing_all_after = sunrise.copy()
missing_all_before.loc['3/1/2020':'3/5/2020'] = pd.NaT
missing_all_after.loc['3/8/2020':'3/14/2020'] = pd.NaT
missing_data_message = r'No data at .*\. ' \
r'Consider passing a larger `window`.'
# Raises for missing data before transition date
with pytest.raises(ValueError, match=missing_data_message):
time.has_dst(missing_all_before, 'America/Denver')
# Raises for missing data after transition date
with pytest.raises(ValueError, match=missing_data_message):
time.has_dst(missing_all_after, 'America/Denver')
# Raises for missing data before and after the shift date
sunrise.loc['3/1/2020':'3/7/2020'] = pd.NaT
sunrise.loc['3/9/2020':'3/14/2020'] = pd.NaT
with pytest.raises(ValueError, match=missing_data_message):
time.has_dst(sunrise, 'America/Denver')
with pytest.warns(UserWarning, match=missing_data_message):
result = time.has_dst(sunrise, 'America/Denver', missing='warn')
expected.loc['3/8/2020'] = False
assert_series_equal(expected, result)
sunrise.loc['3/1/2020':'3/14/2020'] = pd.NaT
with pytest.warns(UserWarning, match=missing_data_message):
result = time.has_dst(sunrise, 'America/Denver', missing='warn')
assert_series_equal(expected, result)
with pytest.raises(ValueError, match=missing_data_message):
time.has_dst(sunrise, 'America/Denver')
def test_has_dst_gaps(albuquerque):
sunrise = _get_sunrise(albuquerque, 'America/Denver')
sunrise.loc['3/5/2020':'3/10/2020'] = pd.NaT
sunrise.loc['7/1/2020':'7/20/2020'] = pd.NaT
sunrise.dropna(inplace=True)
expected = pd.Series(False, index=sunrise.index)
expected['11/1/2020'] = True
assert_series_equal(
time.has_dst(sunrise, 'America/Denver'),
expected
)
def test_has_dst_no_dst_in_date_range(albuquerque):
sunrise = _get_sunrise(albuquerque, 'America/Denver')
july = sunrise['2020-07-01':'2020-07-31']
february = sunrise['2020-02-01':'2020-03-05']
expected_july = pd.Series(False, index=july.index)
expected_march = pd.Series(False, index=february.index)
assert_series_equal(
expected_july,
time.has_dst(july, 'America/Denver')
)
assert_series_equal(
expected_march,
time.has_dst(february, 'MST')
)
@pytest.fixture(scope='module', params=['H', '15T', 'T'])
def midday(request, albuquerque):
solar_position = albuquerque.get_solarposition(
pd.date_range(
start='1/1/2020', end='3/1/2020', closed='left',
tz='MST', freq=request.param
)
)
mid_day = (solar_position['zenith'] < 87).groupby(
solar_position.index.date
).apply(
lambda day: (day[day].index.min()
+ ((day[day].index.max() - day[day].index.min()) / 2))
)
mid_day = mid_day.dt.hour * 60 + mid_day.dt.minute
mid_day.index = pd.DatetimeIndex(mid_day.index, tz='MST')
return mid_day
def requires_ruptures(test):
"""Skip `test` if ruptures is not installed."""
try:
import ruptures # noqa: F401
has_ruptures = True
except ImportError:
has_ruptures = False
return pytest.mark.skipif(
not has_ruptures, reason="requires ruptures")(test)
@requires_ruptures
def test_shift_ruptures_no_shift(midday):
"""Daytime mask with no time-shifts yields a series with 0s for
shift amounts."""
shift_mask, shift_amounts = time.shifts_ruptures(
midday, midday
)
assert not shift_mask.any()
assert_series_equal(
shift_amounts,
pd.Series(0, index=midday.index, dtype='int64'),
check_names=False
)
@requires_ruptures
def test_shift_ruptures_positive_shift(midday):
"""Every day shifted 1 hour later yields a series with shift
of 60 for each day."""
shifted = _shift_between(
midday, 60,
start='2020-01-01',
end='2020-02-29'
)
expected_shift_mask = pd.Series(False, index=midday.index)
expected_shift_mask['2020-01-01':'2020-02-29'] = True
shift_mask, shift_amounts = time.shifts_ruptures(shifted, midday)
assert_series_equal(shift_mask, expected_shift_mask, check_names=False)
assert_series_equal(
shift_amounts,
pd.Series(60, index=shifted.index, dtype='int64'),
check_names=False
)
@requires_ruptures
def test_shift_ruptures_negative_shift(midday):
shifted = _shift_between(
midday, -60,
start='2020-01-01',
end='2020-02-29'
)
expected_shift_mask = | pd.Series(False, index=midday.index) | pandas.Series |
#%%
#==============================================================================#
# #
# Title: Make PostCodes Dataset #
# Purpose: To download and process the data for the App #
# Notes: ... #
# Author: chrimaho #
# Created: 26/Dec/2020 #
# References: ... #
# Sources: ... #
# Edited: ... #
# #
#==============================================================================#
#------------------------------------------------------------------------------#
# #
# Set Up ####
# #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Import packages ####
#------------------------------------------------------------------------------#
# -*- coding: utf-8 -*- #
# import click #<-- Interactivity
import logging #<-- For ease of debugging
from pathlib import Path #<-- Because we need a path forward
from dotenv import find_dotenv, load_dotenv #<-- It's nice to have an environment
import pandas as pd #<-- Frame your Data
from pprint import pprint
import os
import sys
#------------------------------------------------------------------------------#
# Import sources ####
#------------------------------------------------------------------------------#
# Set root directory ----
project_dir = Path(__file__).resolve().parents[2]
# Add directory to Sys path ----
try:
# The directory "." is added to the Path environment so modules can easily be called between files.
if not os.path.abspath(project_dir) in sys.path:
sys.path.append(os.path.abspath(project_dir))
except:
raise ModuleNotFoundError("The custom modules were not able to be loaded.")
# Import modules ----
from src import utils
from src import sources
#------------------------------------------------------------------------------#
# #
# Main Part ####
# #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Process Data ####
#------------------------------------------------------------------------------#
# Extract the data ----
def set_DataFrame(raw):
# Assertions
assert isinstance(raw, dict)
assert list(raw)==['header','dataSets','structure']
# Get data
data = raw['dataSets'][0]['observations']
# Coerce to DataFrame
data = | pd.DataFrame(data) | pandas.DataFrame |
import matplotlib as mpl
# mpl.use('Agg')
import matplotlib.pyplot as plt
from shutil import copyfile
import fortranformat as ff
from itertools import zip_longest
from scipy.signal import argrelextrema, argrelmin
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from ast import literal_eval as make_tuple
import pyshtools
from scipy.io import loadmat
from pathlib import Path
from scipy.special import lpmn
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import copy
import cartopy.feature as cfeature
"""
author: <NAME>
contact: <EMAIL>
description: A scipt containing tools to post-process the orbits, obtained from a forward simulation (and also recovery), from epos-oc.
"""
def create_element_lines(ffp, splitstring):
#get titles
with open(ffp) as f:
LINES = f.readlines()
starts = []
for i,line in enumerate(LINES):
if line.startswith(splitstring):
starts.append(i)
ends=[]
for i in range(len(starts)):
ends.append(starts[i]+16)
blocks = list(zip(starts,ends))
format_float = ff.FortranRecordWriter('(E19.13)')
for block in blocks:
with open(ffp) as fp:
for i, line in enumerate(fp):
if i in range(block[0],block[1]):
if i==block[0]:
outfile = open('%s_ELEMENTSnew.txt' %line.strip(),'w')
outfile.write('\n')
outfile.write(' --- Begin initial elements GRACE-C\n')
if i>block[0]+1:
if line.startswith('Sat'):
outfile.write(' --- End initial elements GRACE-C\n')
outfile.write('\n')
outfile.write(' --- Begin initial elements GRACE-D\n')
if line.startswith('ELEMENT'):
val = line.strip().split()
val[5] = str(format_float.write([np.float(val[5])])).replace('E','e')
val[6] = str(format_float.write([np.float(val[6])])).replace('E', 'e')
if val[7] == '0201201': val[7] = '1804701'
if val[7] == '0201202': val[7] = '1804702'
str_new2 = ('%7.7s' '%4.3s' '%2.1i' '%2.1i' '%2.1i' '%20.19s' '%20.19s' '%8.7s') \
% (val[0], val[1], int(val[2]), int(val[3]),
int(val[4]), val[5], val[6], val[7])
outfile.write('%s\n' %str_new2)
if i==block[1]-1:
outfile.write(' --- End initial elements GRACE-D')
break
#
#
# def create_element_lines(ffp, splitstring):
# #input: Unformatted file that contains orbit elements needed to start each of the runs for GRACE-FO simulation
# #output: Orbit elements that can be used as input for prepare_EPOSIN_4_orbit_integration.sh (located at
# #/GFZ/project/f_grace/NGGM_SIM/SIM_FORWARD )
# with open(ffp) as f:
# lines = f.read().splitlines()
# splits = [i for i in lines if i.startswith(splitstring)]
# print(splits)
# n = 2 # group size
# m = 1 # overlap size
# splits_grouped = [splits[i:i + n] for i in range(0, len(splits), n - m)]
# print(splits_grouped)
#
#
# # # print(lines)
# # split = [i for i in lines if i.startswith('PP')]
# for i in splits_grouped:
# if len(i) > 1:
# start = i[0]
# end = i[1]
# out = '%s_ELEMENT_lines.txt' % (start.strip())
# with open(ffp) as infile, open(out, 'w') as outfile:
# copy = False
# titlewritten0 = False
# titlewritten1 = False
# firsttime6 = False
# linesread = 0
# outfile.write("\n")
#
# for line in infile:
# if line.strip() == start.strip():
# copy = True
# continue
# elif line.strip() == end.strip():
# copy = False
# continue
# elif copy:
# linesread += 1
#
# if not titlewritten0:
# outfile.write(' --- Begin initial elements GRACE-C\n')
# titlewritten0 = True
# if line.startswith(
# 'ELEMENT') and titlewritten0: # if line starts with ELEMENT and the first title has been written
# val = list(filter(None, line.strip().split(' ')))[0:-3]
# format_float = ff.FortranRecordWriter('(E19.13)')
# val5 = str(format_float.write([np.float(val[5])]))
# val6 = str(format_float.write([np.float(val[6])]))
#
# val5 = val5.replace('E', 'e')
# val6 = val6.replace('E', 'e')
#
#
# if val[7] == '0201201': val[7] = '1804701'
# if val[7] == '0201202': val[7] = '1804702'
# str_new2 = ('%7.7s' '%4.3s' '%2.1i' '%2.1i' '%2.1i' '%20.19s' '%20.19s' '%8.7s') % (val[0], val[1], int(val[2]), int(val[3]), int(val[4]), val5, val6, val[7])
#
#
# # outfile.write("\n")
# if int(val[2]) < 6:
# outfile.write(str_new2)
# outfile.write("\n")
#
# if int(val[
# 2]) == 6 and not titlewritten1: # if element six has been reached and no 'end1' has been written yet:
# if not firsttime6:
# titlewritten1 = True
# # titlewritten2 = True
# outfile.write(str_new2)
# outfile.write("\n")
# outfile.write(' --- End initial elements GRACE-C\n\n')
# outfile.write(' --- Begin initial elements GRACE-D\n')
#
# if int(val[2]) == 6:
# print(linesread)
# if linesread > 7:
# outfile.write(str_new2)
# outfile.write("\n")
#
# outfile.write(' --- End initial elements GRACE-D')
# outfile.write("\n")
# outfile.write('\n')
# outfile.close()
# infile.close()
def files(path):
#input: path to a directory
#output: files within the directory (omitting nested directories)
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
yield file
def create_case_directories(fp, fp_out):
#function to prepare the case directories for each of the simulations specified for the GRACE-FO project.
#It will
element_files = []
# current_dir = os.path.dirname(__file__)
for file in files(fp):
element_files.append(file)
IDs = ['PP.1', 'PP.2']
altitudes = [490, 490]
extens = [0, 0]
angles = [89, 89]
seperations = [200, 100]
repeats = [30, 30]
simdirs = ['FD', 'FD']
df = | pd.DataFrame(columns=['id', 'altitude', 'extens', 'seperation', 'repeatvals', 'sim_direction']) | pandas.DataFrame |
"""Principal component analysis module."""
from ase.atoms import Atoms
import pandas as pd
from sklearn.preprocessing import StandardScaler
from ase.io import Trajectory
import numpy as np
from ase.db import connect
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from ase.constraints import constrained_indices
from finetuna.ml_potentials.ocp_models.gemnet_t.int_descriptor_gemnet_t import (
IntDescriptorGemNetT,
)
from tqdm import tqdm
class TrajPCA:
"""
Perform PCA on a given trajectory object. Then save that analysis
for use on later atoms objects parameters.
"""
def __init__(
self,
traj,
gemnet_descriptor_model_checkpoint_path=None,
):
"""
Arguments
----------
traj: Trajectory
the parent Trajectory for this system to be compared to
"""
if gemnet_descriptor_model_checkpoint_path is not None:
self.des_type = "ocp"
self.descriptor_model = IntDescriptorGemNetT(
gemnet_descriptor_model_checkpoint_path
)
else:
from flare_pp._C_flare import Structure, B2
self.flare_structure_class = Structure
self.des_type = "flare"
self.species_map = init_species_map(traj[0])
self.b2calc = B2(
"chebyshev",
"quadratic",
[0, 5],
[],
[len(self.species_map), 12, 3],
)
energies = []
des_list = []
energies.append([j.get_potential_energy() for j in traj])
for j, atoms in tqdm(
enumerate(traj),
total=len(traj),
position=1,
desc="init PCA",
disable=False,
):
des = self.get_des(atoms)
des_reshape = []
for a in des:
for b in a:
des_reshape.extend(np.ravel(np.array(b)))
des_list.append(des_reshape)
columns = []
for i in range(np.shape(des_list[0])[-1]):
columns.append(i)
df = | pd.DataFrame(des_list) | pandas.DataFrame |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import warnings
from pathlib import Path
import neurom as nm
import pandas as pd
from neurom.apps import morph_stats as ms
from neurom.exceptions import ConfigError
from neurom.features import _NEURITE_FEATURES, _MORPHOLOGY_FEATURES, _POPULATION_FEATURES
import pytest
from numpy.testing import assert_array_equal, assert_almost_equal
from pandas.testing import assert_frame_equal
DATA_PATH = Path(__file__).parent.parent / 'data'
SWC_PATH = DATA_PATH / 'swc'
REF_CONFIG = {
'neurite': {
'section_lengths': ['max', 'sum'],
'section_volumes': ['sum'],
'section_branch_orders': ['max', 'raw'],
'segment_midpoints': ['max'],
'max_radial_distance': ['mean'],
},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL'],
'morphology': {
'soma_radius': ['mean'],
'max_radial_distance': ['mean'],
}
}
REF_CONFIG_NEW = {
'neurite': {
'section_lengths': {'modes': ['max', 'sum']},
'section_volumes': {'modes': ['sum']},
'section_branch_orders': {'modes': ['max', 'raw']},
'segment_midpoints': {'modes': ['max']},
'max_radial_distance': {'modes': ['mean']},
},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL'],
'morphology': {
'soma_radius': {'modes': ['mean']},
'max_radial_distance': {'modes': ['mean']},
}
}
REF_OUT = {
'morphology': {
'mean_soma_radius': 0.13065629648763766,
'mean_max_radial_distance': 99.5894610648815,
},
'axon': {
'sum_section_lengths': 207.87975220908129,
'max_section_lengths': 11.018460736176685,
'max_section_branch_orders': 10,
'raw_section_branch_orders': [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10],
'sum_section_volumes': 276.73857657289523,
'max_segment_midpoints_0': 0.0,
'max_segment_midpoints_1': 0.0,
'max_segment_midpoints_2': 49.520305964149998,
'mean_max_radial_distance': 82.44254511788921,
},
'all': {
'sum_section_lengths': 840.68521442251949,
'max_section_lengths': 11.758281556059444,
'max_section_branch_orders': 10,
'raw_section_branch_orders': [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10],
'sum_section_volumes': 1104.9077419665782,
'max_segment_midpoints_0': 64.401674984050004,
'max_segment_midpoints_1': 48.48197694465,
'max_segment_midpoints_2': 53.750947521650005,
'mean_max_radial_distance': 99.5894610648815,
},
'apical_dendrite': {
'sum_section_lengths': 214.37304577550353,
'max_section_lengths': 11.758281556059444,
'max_section_branch_orders': 10,
'raw_section_branch_orders': [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10],
'sum_section_volumes': 271.9412385728449,
'max_segment_midpoints_0': 64.401674984050004,
'max_segment_midpoints_1': 0.0,
'max_segment_midpoints_2': 53.750947521650005,
'mean_max_radial_distance': 99.5894610648815,
},
'basal_dendrite': {
'sum_section_lengths': 418.43241643793476,
'max_section_lengths': 11.652508126101711,
'max_section_branch_orders': 10,
'raw_section_branch_orders': [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10],
'sum_section_volumes': 556.22792682083821,
'max_segment_midpoints_0': 64.007872333250006,
'max_segment_midpoints_1': 48.48197694465,
'max_segment_midpoints_2': 51.575580778049996,
'mean_max_radial_distance': 94.43342438865741,
},
}
def test_extract_stats_single_morphology():
m = nm.load_morphology(SWC_PATH / 'Neuron.swc')
res = ms.extract_stats(m, REF_CONFIG)
assert set(res.keys()) == set(REF_OUT.keys())
for k in ('morphology', 'all', 'axon', 'basal_dendrite', 'apical_dendrite'):
assert set(res[k].keys()) == set(REF_OUT[k].keys())
for kk in res[k].keys():
assert_almost_equal(res[k][kk], REF_OUT[k][kk], decimal=4)
def test_extract_stats_new_format():
m = nm.load_morphology(SWC_PATH / 'Neuron.swc')
res = ms.extract_stats(m, REF_CONFIG_NEW)
assert set(res.keys()) == set(REF_OUT.keys())
for k in ('morphology', 'all', 'axon', 'basal_dendrite', 'apical_dendrite'):
assert set(res[k].keys()) == set(REF_OUT[k].keys())
for kk in res[k].keys():
assert_almost_equal(res[k][kk], REF_OUT[k][kk], decimal=4)
def test_stats_new_format_set_arg():
m = nm.load_morphology(SWC_PATH / 'Neuron.swc')
config = {
'neurite': {
'section_lengths': {'kwargs': {'neurite_type': 'AXON'}, 'modes': ['max', 'sum']},
},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL'],
'morphology': {
'soma_radius': {'modes': ['mean']},
}
}
res = ms.extract_stats(m, config)
assert set(res.keys()) == {'morphology', 'axon'}
assert set(res['axon'].keys()) == {'max_section_lengths', 'sum_section_lengths'}
assert set(res['morphology'].keys()) == {'mean_soma_radius'}
def test_extract_stats_scalar_feature():
m = nm.load_morphology(DATA_PATH / 'neurolucida' / 'bio_neuron-000.asc')
config = {
'neurite_type': ['ALL'],
'neurite': {
'number_of_forking_points': ['max'],
},
'morphology': {
'soma_volume': ['sum'],
}
}
res = ms.extract_stats(m, config)
assert res == {'all': {'max_number_of_forking_points': 277},
'morphology': {'sum_soma_volume': 1424.4383771584492}}
def test_extract_dataframe():
# Vanilla test
morphs = nm.load_morphologies([SWC_PATH / 'Neuron.swc', SWC_PATH / 'simple.swc'])
actual = ms.extract_dataframe(morphs, REF_CONFIG_NEW)
# drop raw features as they require too much test data to mock
actual = actual.drop(columns='raw_section_branch_orders', level=1)
expected = pd.read_csv(Path(DATA_PATH, 'extracted-stats.csv'), header=[0, 1], index_col=0)
assert_frame_equal(actual, expected, check_dtype=False)
# Test with a single morphology in the population
morphs = nm.load_morphologies(SWC_PATH / 'Neuron.swc')
actual = ms.extract_dataframe(morphs, REF_CONFIG_NEW)
# drop raw features as they require too much test data to mock
actual = actual.drop(columns='raw_section_branch_orders', level=1)
assert_frame_equal(actual, expected.iloc[[0]], check_dtype=False)
# Test with a config without the 'morphology' key
morphs = nm.load_morphologies([Path(SWC_PATH, name)
for name in ['Neuron.swc', 'simple.swc']])
config = {'neurite': {'section_lengths': ['sum']},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL']}
actual = ms.extract_dataframe(morphs, config)
idx = pd.IndexSlice
expected = expected.loc[:, idx[:, ['name', 'sum_section_lengths']]]
assert_frame_equal(actual, expected, check_dtype=False)
# Test with a Morphology argument
m = nm.load_morphology(Path(SWC_PATH, 'Neuron.swc'))
actual = ms.extract_dataframe(m, config)
assert_frame_equal(actual, expected.iloc[[0]], check_dtype=False)
# Test with a List[Morphology] argument
morphs = [nm.load_morphology(Path(SWC_PATH, name))
for name in ['Neuron.swc', 'simple.swc']]
actual = ms.extract_dataframe(morphs, config)
assert_frame_equal(actual, expected, check_dtype=False)
# Test with a List[Path] argument
morphs = [Path(SWC_PATH, name) for name in ['Neuron.swc', 'simple.swc']]
actual = ms.extract_dataframe(morphs, config)
assert_frame_equal(actual, expected, check_dtype=False)
# Test without any neurite_type keys, it should pick the defaults
config = {'neurite': {'total_length_per_neurite': ['sum']}}
actual = ms.extract_dataframe(morphs, config)
expected_columns = pd.MultiIndex.from_tuples(
[('property', 'name'),
('axon', 'sum_total_length_per_neurite'),
('basal_dendrite', 'sum_total_length_per_neurite'),
('apical_dendrite', 'sum_total_length_per_neurite'),
('all', 'sum_total_length_per_neurite')])
expected = pd.DataFrame(
columns=expected_columns,
data=[['Neuron.swc', 207.87975221, 418.43241644, 214.37304578, 840.68521442],
['simple.swc', 15., 16., 0., 31., ]])
| assert_frame_equal(actual, expected, check_dtype=False) | pandas.testing.assert_frame_equal |
## Change backend to tensorflow by editing $HOME/.keras/keras.json
## "backend": "tensorflow"
## Add modules that are necessary
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
import seaborn as sns
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from scipy import interp
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
##read in the data
shared = | pd.read_table("data/baxter.0.03.subsample.shared") | pandas.read_table |
#!python3
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
'''
This source code is a sample of using pandas.
data concat each dataframe.
'''
df1 = | pd.read_csv('201704health.csv', encoding='utf-8', index_col='日付', parse_dates=True) | pandas.read_csv |
import pandas as pd
import sys
import os
import pysam
import json
sys.stderr = open(snakemake.log[0], "w")
sys.stdout = open(snakemake.log[0], "a")
KRAKEN_FILTER_KRITERIA = "D"
print(snakemake.params.get("voc"))
initial_reads_df = pd.DataFrame()
for sample, file in zip(snakemake.params.samples, snakemake.input.reads_unfiltered):
with open(file) as read_json:
number_reads = json.load(read_json)
raw_reads = int(number_reads["summary"]["before_filtering"]["total_reads"])
trimmed_reads = int(number_reads["summary"]["after_filtering"]["total_reads"])
initial_reads_df = initial_reads_df.append(
{
"# raw reads": raw_reads,
"# trimmed reads": trimmed_reads,
"sample": sample,
},
ignore_index=True,
)
initial_reads_df = initial_reads_df.set_index("sample")
initial_reads_df = initial_reads_df[["# raw reads", "# trimmed reads"]]
filtered_reads_df = pd.DataFrame()
for sample, file in zip(snakemake.params.samples, snakemake.input.reads_used_for_assembly):
infile = open(file, "r")
for line in infile.read().splitlines():
try:
num_reads = int(line)/4*2
except:
num_reads = 0
filtered_reads_df = filtered_reads_df.append(
{
"# used reads": int(num_reads),
"sample": sample,
},
ignore_index=True,
)
infile.close()
filtered_reads_df = filtered_reads_df.set_index("sample")
initial_df = pd.DataFrame()
for sample, file in zip(snakemake.params.samples, snakemake.input.initial_contigs):
contigs = {}
if os.stat(file).st_size == 0:
contigs[sample] = ""
else:
with open(file, "r") as fasta_unordered:
for line in fasta_unordered.read().splitlines():
if line.startswith(">"):
key = line
contigs[key] = ""
else:
contigs[key] += line
length_initial = 0
for key in contigs:
if len(contigs[key]) > length_initial:
length_initial = len(contigs[key])
initial_df = initial_df.append(
{
"initial contig (bp)": int(length_initial),
"sample": sample,
},
ignore_index=True,
)
final_df = | pd.DataFrame() | pandas.DataFrame |
import zipfile
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import requests
import urllib
proxy="http://127.0.0.1:1080"
# 创建一个ProxyHandler对象
proxy_support=urllib.request.ProxyHandler({'http':proxy})
# 创建一个opener对象
opener = urllib.request.build_opener(proxy_support)
# 给request装载opener
urllib.request.install_opener(opener)
print("download and unzip files")
dates = pd.date_range(pd.to_datetime('2001-05-01'), | pd.to_datetime('2018-06-30') | pandas.to_datetime |
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ts_charting.figure as figure
from ts_charting.figure import process_series
class Testprocess_data(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_already_aligned(self):
plot_index = pd.date_range(start="2000", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
plot_series = process_series(series, plot_index)
tm.assert_almost_equal(series, plot_series)
tm.assert_almost_equal(plot_series.index, plot_index)
def test_partial_plot(self):
"""
Test plotting series that is a subset of plot_index.
Should align and fill with nans
"""
plot_index = pd.date_range(start="2000", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
series = series[:50] # only first 50
plot_series = process_series(series, plot_index)
# have same index
tm.assert_almost_equal(plot_series.index, plot_index)
assert plot_series.count() == 50
assert np.all(plot_series[50:].isnull()) # method=None so fill with nan
assert np.all(plot_series[:50] == series[:50])
def test_unaligned_indexes(self):
"""
Test when series.index and plot_index have no common datetimes
"""
plot_index = pd.date_range(start="2000", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
# move days to 11 PM the night before
shift_series = series.tshift(-1, '1h')
plot_series = process_series(shift_series, plot_index)
# without method, data doesn't align and we nothing but nans
tm.assert_almost_equal(plot_series.index, plot_index) # index aligh properly
assert np.all(plot_series.isnull()) # no data
# method = 'ffill'
plot_series = process_series(shift_series, plot_index, method='ffill')
# without method, data doesn't align and we nothing but nans
tm.assert_almost_equal(plot_series.index, plot_index) # index align
# since we're forward filling a series we tshifted into past
# plot_series should just equal the original series
tm.assert_almost_equal(plot_series, series)
def test_different_freqs(self):
"""
Tests indexes of differeing frequencies. This is more of repeat
test of test_partial_plot but with many holes instead of one half missing
value.
"""
plot_index = pd.date_range(start="2000-01-01", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
grouped_series = series.resample('MS', 'max')
plot_series = process_series(grouped_series, plot_index)
| tm.assert_almost_equal(plot_series.index, plot_index) | pandas.util.testing.assert_almost_equal |
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import QuantileTransformer
from defaults import DATA_PATH, MAX_INTEGER
def anova_prep(df):
df = df.groupby(['subjectNo', 'condition'])['response'].mean()
df = df.unstack()
tmp = []
for subject in df.index:
for condition in df.loc[subject].keys():
tmp.append(
{
'subjectNo': subject,
'condition': condition,
'rating': df.loc[subject, condition],
}
)
return pd.DataFrame(tmp)
def average_condition_rating_within_subject(df):
tmp = df.groupby(['subjectNo', 'condition'])['response'].mean()
return tmp.unstack()
def average_std_of_ratings(df):
return df.groupby(['subjectNo', 'condition'])['response'].std().\
groupby('condition').mean()
def box_plot(df, study_type, savefig=False, dpi=300):
tmp = df.groupby(['subjectNo', 'condition'])['response'].mean()
tmp = tmp.unstack()
tmp.columns = [s.replace('_', ' ') for s in tmp.columns]
plt.figure(figsize=(16, 6))
sns.boxplot(data=tmp)
plt.ylabel('rating')
plt.xlabel('condition')
if savefig:
plt.savefig(f"figs/study_type_{study_type}_boxplot.png", dpi=dpi)
else:
plt.show()
def extract_condition(df):
if 'condition' not in df:
def process(x):
return "_".join(x[-1].split('_')[:-1])
df['condition'] = df['stimulus'].str.split('/').apply(process)
return df
def extract_subject(df):
if 'subjectNo' not in df:
df['subjectNo'] = df['stimulus'].str.split("/").apply(
lambda x: x[1].split("_")[1])
return df
def extract_trials(df):
df = df[df['trial_type'] == 'audio-slider-response']
df = df[~df['stimulus'].str.contains("train")]
return df
def filter_by_basic(df, threshold=0.6):
"""Find subjectNo where the BASIC condition was rated below threshold."""
tmp1 = df[df['condition'] == 'BASIC'].groupby(['subjectNo'])[
'response'].min() > threshold
tmp2 = tmp1[tmp1]
print(f"N = {len(tmp2)}")
return df[df['subjectNo'].isin(tmp2.keys())]
def filter_by_control(df, threshold=0.6):
"""Find subjectNo where the CONTROL was rated greater than threshold."""
tmp1 = df[df['condition'] == 'CONTROL'].groupby(['subjectNo'])[
'response'].min() > threshold
tmp2 = tmp1[tmp1]
print(f"N = {len(tmp2)}")
return df[df['subjectNo'].isin(tmp2.keys())]
def get_good_participants(num_reject=2):
"""Filter participants by number of rejections"""
pattern = os.path.join(DATA_PATH, 'participant_demographic_data/*.csv')
files = glob(pattern)
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
### Training data
# Downloads, unpacks and reads DBpedia dataset.
dbpedia = learn.datasets.load_dataset('dbpedia')
X_train, y_train = | pandas.DataFrame(dbpedia.train.data) | pandas.DataFrame |
import chargerGuide
import chargingMethod
import findCurLoc
import findDestination
import findTheDistanceBetweenCoordinates as Dis
import fastChargerMarker
import slowChargerMarker
import chargingStationMarker
import pandas as pd
import folium as g
import osmnx as ox
import networkx as nx
import sys
import io
from PyQt5 import QtWidgets, QtWebEngineWidgets
cur_lat = 0.0
cur_lng = 0.0
dst_lat = 0.0
dst_lng = 0.0
chargingMethod.guide()
excel_source = pd.read_excel(
'ProjPrac/전기차-충전소-설치현황_20220316.xlsx', usecols=[1, 2, 3, 4, 5])
print("Please enter the area where you want to find the location of the charging station. ex)청주")
str_want_go = input()
int_line = excel_source['주소'].str.contains(str_want_go)
want_go_excel = excel_source[int_line]
want_go_excel.to_excel(
'ProjPrac/result.xlsx', sheet_name='Result')
cur_lat, cur_lng = findCurLoc.Find()
excel_source = pd.read_excel(
'ProjPrac/result.xlsx', usecols=[1, 2, 3, 4, 5])
print("Please enter the type of car you want.\n"
"SM3 Z.E, 레이EV, 소울EV, 닛산리프, 아이오닉EV, BMW i3, 스파크EV, 볼트EV, 테슬라")
car = input()
line = excel_source['지원차종'].str.contains(car)
want_go_excel = excel_source[line]
want_go_excel.to_excel(
'ProjPrac/result1.xlsx', sheet_name='Result')
excel_source = pd.read_excel(
'ProjPrac/result1.xlsx', usecols=[1, 2, 3, 4, 5])
chargerGuide.guide()
print("Pick a charger that you want between a fast charger and a slow charger\n"
"Fast, Slow, No Problem")
charger = input()
if charger == "Fast":
int_line1 = excel_source['급속충전기(대)'].astype(str).str.contains("1")
int_line2 = excel_source['급속충전기(대)'].astype(str).str.contains("2")
int_line3 = excel_source['급속충전기(대)'].astype(str).str.contains("3")
int_line4 = excel_source['급속충전기(대)'].astype(str).str.contains("4")
int_line5 = excel_source['급속충전기(대)'].astype(str).str.contains("5")
int_line6 = excel_source['급속충전기(대)'].astype(str).str.contains("6")
int_line7 = excel_source['급속충전기(대)'].astype(str).str.contains("7")
int_line8 = excel_source['급속충전기(대)'].astype(str).str.contains("8")
int_line9 = excel_source['급속충전기(대)'].astype(str).str.contains("9")
want_go_excel = excel_source[int_line1]
want_go_excel.to_excel(
'ProjPrac/result_charger_1.xlsx', sheet_name='Fast_Charger')
want_go_excel = excel_source[int_line2]
want_go_excel.to_excel(
'ProjPrac/result_charger_2.xlsx', sheet_name='Fast_Charger')
want_go_excel = excel_source[int_line3]
want_go_excel.to_excel(
'ProjPrac/result_charger_3.xlsx', sheet_name='Fast_Charger')
want_go_excel = excel_source[int_line4]
want_go_excel.to_excel(
'ProjPrac/result_charger_4.xlsx', sheet_name='Fast_Charger')
want_go_excel = excel_source[int_line5]
want_go_excel.to_excel(
'ProjPrac/result_charger_5.xlsx', sheet_name='Fast_Charger')
want_go_excel = excel_source[int_line6]
want_go_excel.to_excel(
'ProjPrac/result_charger_6.xlsx', sheet_name='Fast_Charger')
want_go_excel = excel_source[int_line7]
want_go_excel.to_excel(
'ProjPrac/result_charger_7.xlsx', sheet_name='Fast_Charger')
want_go_excel = excel_source[int_line8]
want_go_excel.to_excel(
'ProjPrac/result_charger_8.xlsx', sheet_name='Fast_Charger')
want_go_excel = excel_source[int_line9]
want_go_excel.to_excel(
'ProjPrac/result_charger_9.xlsx', sheet_name='Fast_Charger')
excel_names = ['ProjPrac/result_charger_1.xlsx', 'ProjPrac/result_charger_2.xlsx', 'ProjPrac/result_charger_3.xlsx', 'ProjPrac/result_charger_4.xlsx',
'ProjPrac/result_charger_5.xlsx', 'ProjPrac/result_charger_6.xlsx', 'ProjPrac/result_charger_7.xlsx', 'ProjPrac/result_charger_8.xlsx',
'ProjPrac/result_charger_9.xlsx']
excels = [pd.ExcelFile(name) for name in excel_names]
frames = [x.parse(x.sheet_names[0], header=None, index_col=None)
for x in excels]
frames[1:] = [df[1:] for df in frames[1:]]
combined = pd.concat(frames)
combined.to_excel(
"ProjPrac/result_charger.xlsx", header=False, index=False)
elif charger == "Slow":
int_line1 = excel_source['완속충전기(대)'].astype(str).str.contains("1")
int_line2 = excel_source['완속충전기(대)'].astype(str).str.contains("2")
int_line3 = excel_source['완속충전기(대)'].astype(str).str.contains("3")
int_line4 = excel_source['완속충전기(대)'].astype(str).str.contains("4")
int_line5 = excel_source['완속충전기(대)'].astype(str).str.contains("5")
int_line6 = excel_source['완속충전기(대)'].astype(str).str.contains("6")
int_line7 = excel_source['완속충전기(대)'].astype(str).str.contains("7")
int_line8 = excel_source['완속충전기(대)'].astype(str).str.contains("8")
int_line9 = excel_source['완속충전기(대)'].astype(str).str.contains("9")
want_go_excel = excel_source[int_line1]
want_go_excel.to_excel(
'ProjPrac/result_charger_1.xlsx', sheet_name='Normal_Charger')
want_go_excel = excel_source[int_line2]
want_go_excel.to_excel(
'ProjPrac/result_charger_2.xlsx', sheet_name='Normal_Charger')
want_go_excel = excel_source[int_line3]
want_go_excel.to_excel(
'ProjPrac/result_charger_3.xlsx', sheet_name='Normal_Charger')
want_go_excel = excel_source[int_line4]
want_go_excel.to_excel(
'ProjPrac/result_charger_4.xlsx', sheet_name='Normal_Charger')
want_go_excel = excel_source[int_line5]
want_go_excel.to_excel(
'ProjPrac/result_charger_5.xlsx', sheet_name='Normal_Charger')
want_go_excel = excel_source[int_line6]
want_go_excel.to_excel(
'ProjPrac/result_charger_6.xlsx', sheet_name='Normal_Charger')
want_go_excel = excel_source[int_line7]
want_go_excel.to_excel(
'ProjPrac/result_charger_7.xlsx', sheet_name='Normal_Charger')
want_go_excel = excel_source[int_line8]
want_go_excel.to_excel(
'ProjPrac/result_charger_8.xlsx', sheet_name='Normal_Charger')
want_go_excel = excel_source[int_line9]
want_go_excel.to_excel(
'ProjPrac/result_charger_9.xlsx', sheet_name='Result')
excel_names = ['ProjPrac/result_charger_1.xlsx', 'ProjPrac/result_charger_2.xlsx', 'ProjPrac/result_charger_3.xlsx', 'ProjPrac/result_charger_4.xlsx',
'ProjPrac/result_charger_5.xlsx', 'ProjPrac/result_charger_6.xlsx', 'ProjPrac/result_charger_7.xlsx', 'ProjPrac/result_charger_8.xlsx',
'ProjPrac/result_charger_9.xlsx']
excels = [pd.ExcelFile(name) for name in excel_names]
frames = [x.parse(x.sheet_names[0], header=None, index_col=None)
for x in excels]
frames[1:] = [df[1:] for df in frames[1:]]
combined = | pd.concat(frames) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 22:55:35 2020
@author: jingci
"""
import numpy as np
import pandas as pd
import pickle
from RLBrain import RLBrain
'''
Q-learning models for learning the position of target
'''
class QLearningTable1(RLBrain):
def __init__(self, actions, state):
self.actions = actions # a list
if state == "RAW":
#The q_table without previous knowledge
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
elif state == "MAP":
#The q_table after map training
f = open(RLBrain.FILEPATH + 'q_table1.txt', 'rb')
self.q_table = pickle.load(f)
f.close()
elif state == "PATH":
#The q_table after path training
f = open(RLBrain.FILEPATH + 'path_qtable1.txt', 'rb')
self.q_table = pickle.load(f)
f.close()
class QLearningTable2(RLBrain):
def __init__(self, actions, state):
self.actions = actions # a list
if state == "RAW":
#The q_table without previous knowledge
self.q_table = | pd.DataFrame(columns=self.actions, dtype=np.float64) | pandas.DataFrame |
# Copyright (c) 2020, 2021, NECSTLab, Politecnico di Milano. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NECSTLab nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# * Neither the name of Politecnico di Milano nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 20 09:43:46 2020
@author: alberto.parravicini
"""
import pandas as pd
import json
import os
import numpy as np
import functools
from scipy.stats.mstats import gmean
DEFAULT_RES_DIR = "../../../../grcuda-data/results/scheduling"
# ASYNC_POLICY_NAME = "async" # If parsing new results;
ASYNC_POLICY_NAME = "default" # If parsing older results;
def load_data(input_date: str, skip_iter=0, remove_inf=True, remove_time_zero=True, benchmark="", phases=None) -> pd.DataFrame:
"""
Load the benchmark results located in the input sub-folder
:param input_date: name of the folder where results are located, as a subfolder of DEFAULT_RES_DIR
:param skip_iter: skip the first iterations for each benchmark, as they are considered warmup
:param remove_inf: remove rows with infinite speedup value, as they are not useful
:param remove_time_zero: if True, remove rows with 0 computation time;
:param benchmark: load data only for the specified benchmark
:param phases: list of benchmark phases to add as columns
:return: a DataFrame containing the results
"""
input_path = os.path.join(DEFAULT_RES_DIR, input_date)
# Load results as JSON;
data_dict = {}
for res in os.listdir(input_path):
with open(os.path.join(input_path, res)) as f:
if not benchmark or res.split("_")[6] == benchmark:
data_dict[res] = json.load(f)
phases_names = []
# Turn results into a pd.DataFrame;
rows = []
for k, v in data_dict.items():
row = []
# Parse filename;
benchmark, exec_policy, new_stream_policy, parent_stream_policy, dependency_policy, force_prefetch = k.split("_")[6:12]
force_prefetch = force_prefetch == "True"
row += [benchmark, exec_policy, new_stream_policy, parent_stream_policy, dependency_policy, force_prefetch, 0, 0, ""]
# Retrieve other information;
total_iterations = v["num_iterations"]
cpu_validation = v["cpu_validation"]
random_init = v["random_init"]
size_dict = v["benchmarks"][benchmark][ASYNC_POLICY_NAME]
row += [int(total_iterations), bool(cpu_validation), bool(random_init)]
# Parse data for each input data size, and other settings;;
for size, val_size in size_dict.items():
for realloc, val_realloc in val_size.items():
for reinit, val_reinit in val_realloc.items():
for block_size, val_block_size in val_reinit.items():
# Process each iteration;
block_size_1d, block_size_2d = block_size.split(",")
row[-6] = int(block_size_1d)
row[-5] = int(block_size_2d)
row[-4] = block_size_1d + ",8" # block_size_1d + "," + block_size_2d]
for curr_iteration in val_block_size:
num_iter = curr_iteration["iteration"]
gpu_result = curr_iteration["gpu_result"]
total_time_sec = curr_iteration["total_time_sec"]
overhead_sec = curr_iteration["overhead_sec"]
computation_sec = curr_iteration["computation_sec"]
# Process phases;
phases_time = []
if phases:
phases_time = [p["time_sec"] for p in curr_iteration["phases"] if p["name"] in phases]
if not phases_names:
phases_names = [p["name"] for p in curr_iteration["phases"] if p["name"] in phases]
# Add a new row;
if (num_iter >= skip_iter):
rows += [row + [int(size), bool(realloc), bool(reinit), num_iter - skip_iter, gpu_result, total_time_sec, overhead_sec, computation_sec] + phases_time]
columns = ["benchmark", "exec_policy", "new_stream_policy", "parent_stream_policy",
"dependency_policy", "force_prefetch", "block_size_1d", "block_size_2d", "block_size_str",
"total_iterations", "cpu_validation", "random_init", "size", "realloc", "reinit",
"num_iter", "gpu_result", "total_time_sec", "overhead_sec", "computation_sec"] + (phases_names if phases else [])
data = pd.DataFrame(rows, columns=columns).sort_values(by=columns[:14], ignore_index=True)
# Clean columns with 0 computation time;
if remove_time_zero:
data = data[data["computation_sec"] > 0].reset_index(drop=True)
# Compute speedups;
compute_speedup(data, ["benchmark", "new_stream_policy", "parent_stream_policy",
"dependency_policy", "force_prefetch", "block_size_1d", "block_size_2d",
"total_iterations", "cpu_validation", "random_init", "size", "realloc", "reinit"])
# Clean columns with infinite speedup;
if remove_inf:
data = data[data["computation_speedup"] != np.inf].reset_index(drop=True)
return data
def load_data_cuda(input_date: str, skip_iter=0, remove_inf=True, remove_time_zero=True, add_prefetch_as_policy=True) -> pd.DataFrame:
"""
Load the benchmark results located in the input sub-folder
:param input_date: name of the folder where results are located, as a subfolder of DEFAULT_RES_DIR
:param skip_iter: skip the first iterations for each benchmark, as they are considered warmup
:param remove_inf: if True, remove rows with infinite speedup
:param remove_time_zero: if True, remove rows with 0 computation time;
:param add_prefetch_as_policy: if True, consider prefetching as part of the policy, to compute speedups w.r.t. sync with no prefetching
:return: a DataFrame containing the results
"""
input_path = os.path.join(DEFAULT_RES_DIR, input_date)
# Load results as pd.DataFrames;
data_tmp = []
for f in os.listdir(input_path):
# Parse filename;
try:
benchmark, exec_policy, size, block_size_1d, block_size_2d, force_prefetch, total_iterations, num_blocks = os.path.splitext(f)[0].split("_")[7:]
force_prefetch = force_prefetch == "True"
except ValueError:
benchmark, exec_policy, size, block_size_1d, block_size_2d, total_iterations, num_blocks, force_prefetch = os.path.splitext(f)[0].split("_")[7:] + [False]
tmp_data = pd.read_csv(os.path.join(input_path, f))
# Skip first lines;
tmp_data = tmp_data.iloc[skip_iter:, :]
# Add other information;
tmp_data["benchmark"] = benchmark
tmp_data["exec_policy"] = exec_policy
tmp_data["force_prefetch"] = bool(force_prefetch)
tmp_data["size"] = int(size)
tmp_data["block_size_1d"] = int(block_size_1d)
tmp_data["block_size_2d"] = int(block_size_2d)
tmp_data["block_size_str"] = block_size_1d + ",8" # block_size_1d + "," + block_size_2d
tmp_data["total_iterations"] = int(total_iterations)
data_tmp += [tmp_data]
data = | pd.concat(data_tmp) | pandas.concat |
import pandas as pd
from scipy import stats
import numpy as np
import re
from mne.utils import warn
import nilearn
def glm_to_tidy(info, statistic, design_matrix, wide=True, order=None):
"""
Export GLM regression or contrast results in tidy format.
Creates a long pandas data frame from regression results or contrast
as computed by run_glm or compute_contrast.
Parameters
----------
info : MNE.Info
Instance of MNE info.
statistic : nilearn data,
Either dict of nilearn.stats.regression.RegressionResults as returned
by run_glm, or nilearn.stats.contrasts.Contrast as returned by
compute_contrast.
design_matrix : DataFrame
As specified in Nilearn.
wide : Bool
Should the returned dataframe be in wide format. If False, then the
returned data will be in long format.
order : list
Order that the channels should be returned with.
Returns
-------
df : Tidy data frame,
Data from statistic object in tidy data form.
"""
if isinstance(statistic, dict) and \
isinstance(statistic[list(statistic.keys())[0]],
nilearn.glm.regression.RegressionResults):
df = _tidy_RegressionResults(info, statistic, design_matrix)
elif isinstance(statistic, nilearn.glm.contrasts.Contrast):
df = _tidy_Contrast(info, statistic, design_matrix)
else:
raise TypeError(
'Unknown statistic type. Expected dict of RegressionResults '
f'or Contrast type. Received {type(statistic)}')
if wide:
df = _tidy_long_to_wide(df, expand_output=True)
if order is not None:
df['old_index'] = df.index
df = df.set_index('ch_name')
df = df.loc[order, :]
df['ch_name'] = df.index
df.index = df['old_index']
df.drop(columns='old_index', inplace=True)
df.rename_axis(None, inplace=True)
return df
def _tidy_Contrast(data, glm_est, design_matrix):
df = pd.DataFrame()
for idx, ch in enumerate(data.ch_names):
df = pd.concat([
df,
pd.DataFrame({'ch_name': ch,
'ContrastType': glm_est.contrast_type,
'variable': "effect",
'value': glm_est.effect[0][idx]},
index=[0]),
pd.DataFrame({'ch_name': ch,
'ContrastType': glm_est.contrast_type,
'variable': "p_value",
'value': glm_est.p_value()[idx]},
index=[1]),
pd.DataFrame({'ch_name': ch,
'ContrastType': glm_est.contrast_type,
'variable': "stat",
'value': glm_est.stat()[idx]},
index=[2]),
pd.DataFrame({'ch_name': ch,
'ContrastType': glm_est.contrast_type,
'variable': "z_score",
'value': glm_est.z_score()[idx]},
index=[3]),
], ignore_index=True)
return df
def _tidy_RegressionResults(data, glm_est, design_matrix):
if not (data.ch_names == list(glm_est.keys())):
warn("MNE data structure does not match regression results")
theta_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
t_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
df_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
p_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
mse_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
se_estimates = np.zeros((len(glm_est), len(design_matrix.columns)))
for idx, name in enumerate(glm_est.keys()):
theta_estimates[idx, :] = glm_est[name].theta.T
df_estimates[idx, :] = glm_est[name].df_model
mse_estimates[idx, :] = glm_est[name].MSE[0]
for cond_idx, cond in enumerate(design_matrix.columns):
t_estimates[idx, cond_idx] = glm_est[name].t(
column=cond_idx)
p_estimates[idx, cond_idx] = 2 * stats.t.cdf(
-1.0 * np.abs(t_estimates[idx, cond_idx]),
df=df_estimates[idx, cond_idx])
se_estimates[idx, cond_idx] = np.sqrt(np.diag(
glm_est[name].vcov()))[cond_idx]
list_vals = [0] * ((len(data.ch_names) *
len(design_matrix.columns) * 6))
idx = 0
for ch_idx, ch in enumerate(data.ch_names):
for cond_idx, cond in enumerate(design_matrix.columns):
list_vals[0 + idx] = {'ch_name': ch, 'Condition': cond,
'variable': "theta",
'value': theta_estimates[ch_idx][cond_idx]}
list_vals[1 + idx] = {'ch_name': ch, 'Condition': cond,
'variable': "t",
'value': t_estimates[ch_idx][cond_idx]}
list_vals[2 + idx] = {'ch_name': ch, 'Condition': cond,
'variable': "df",
'value': df_estimates[ch_idx][cond_idx]}
list_vals[3 + idx] = {'ch_name': ch, 'Condition': cond,
'variable': "p_value",
'value': p_estimates[ch_idx][cond_idx]}
list_vals[4 + idx] = {'ch_name': ch, 'Condition': cond,
'variable': "mse",
'value': mse_estimates[ch_idx][cond_idx]}
list_vals[5 + idx] = {'ch_name': ch, 'Condition': cond,
'variable': "se",
'value': se_estimates[ch_idx][cond_idx]}
idx += 6
dict_vals, i = {}, 0
for entry in list_vals:
dict_vals[i] = {"ch_name": entry['ch_name'],
"Condition": entry['Condition'],
"variable": entry['variable'],
"value": entry['value']}
i = i + 1
df = | pd.DataFrame.from_dict(dict_vals, "index") | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import cv2, glob
import numpy as np
import pandas as pd
from os import path
from math import isnan
from sklearn.metrics.pairwise import euclidean_distances
from JPP_precision import load_JPP_ply
from Modules.utils import get_parameter, get_args, figure_disappears, enum_test_files
from Modules.features_labels import make_labels
from Modules.coordinate_conversion import project_point_cloud
def make_ground_truth(test_filename):
n_joints = 19
ground_truth = np.ones((n_joints, 2))
label_img = cv2.imread("%s.png" % test_filename)[:, :, :3][:, :, ::-1]
label_array = make_labels(label_img).reshape(label_img.shape[:2])
parts2joints_map = np.array((0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 5, 18, 18, 18, 18, 6, 7, 8, 9, 10, 11, 18, 18, 18, 18, 12, 13, 14, 15, 16, 17, 18))
for j in range(n_joints):
ground_truth[j, :] = np.mean(np.array(np.where(parts2joints_map[label_array] == j)), axis=1)
return ground_truth[:-1, :]
def JPP_precision():
args = get_args()
discr_setting_type = args.discr_setting_type
num_train_images = args.n_train_images
data_path = args.data_path
jpp_path = data_path + "Main/JointPositionPrediction/"
jpp_gt_path = jpp_path + "GroundTruth/"
jpp_out_path = jpp_path + "Output/"
eval_path = jpp_path + "Evaluation/"
test_path = args.test_path
n_test_images = args.n_test_images
device = "Kinect" if "SyntheticImages" in test_path else "Xtion"
target_joint_names = ["Head", "neck", "Chest", "Waist",
"rShoulder", "lShoulder", "rElbow", "lElbow", "rWrist", "lWrist", "rHand", "lHand",
"rKnee", "lKnee", "rAnkle", "lAnkle", "rFoot", "lFoot"]
n_joints = len(target_joint_names)
test_filenames = enum_test_files(data_path, args.test_path, n_test_images)
setting_str = "_" + str(num_train_images) + ("_%s" % discr_setting_type if discr_setting_type else "")
average_error_path = eval_path + "JPP_average_error_px" + setting_str + ".csv"
sum_prediction_error = np.zeros((n_joints+1,))
for test_filename in test_filenames:
test_filename_id = "/".join(test_filename.split("/")[-2:])
print(test_filename_id)
test_JPP_path = jpp_out_path + test_filename_id + setting_str + "_JPP.ply"
test_gt_path = jpp_gt_path + test_filename_id + "_px_gt.csv"
error_path = eval_path + test_filename_id + setting_str + "_JPP_error_px.csv"
if path.exists(test_gt_path):
gt_joint_positions = np.array( | pd.read_csv(test_gt_path, header=None) | pandas.read_csv |
import argparse
import csv
import gzip
import re
import numpy as np
import zarr
from scipy import sparse
from zarr import Blosc
import pandas as pd
import logging
COMPRESSOR = Blosc(cname="lz4", clevel=5, shuffle=Blosc.SHUFFLE, blocksize=0)
# the number of rows in a chunk for expression counts
CHUNK_ROW_SIZE = 10000
CHUNK_COL_SIZE = 10000
logging.basicConfig(level=logging.INFO)
def init_zarr(sample_id, path, file_format, schema_version):
"""Initializes the zarr output.
Args:
sample_id (str): sample or cell id
path (str): path to the zarr output
file_format (str): zarr file format [DirectoryStore, ZipStore]
schema_version (str): version string of this output to allow for parsing of future changes
Returns:
root (zarr.hierarchy.Group): initialized zarr group
"""
store = None
if file_format == "DirectoryStore":
store = zarr.DirectoryStore(path)
if file_format == "ZipStore":
store = zarr.ZipStore(path, mode="w")
# create the root group
root = zarr.group(store, overwrite=True)
# root.attrs['README'] = "The schema adopted in this zarr store may undergo changes in the future"
root.attrs["sample_id"] = sample_id
root.attrs["optimus_output_schema_version"] = schema_version
# Create the expression_matrix group
# root.create_group("expression_matrix", overwrite=True);
return root
def add_gene_metrics(data_group, input_path, gene_ids, verbose=False):
"""Converts the gene metrics from the Optimus pipeline to zarr file
Args:
data_group (zarr.hierarchy.Group): datagroup object for the zarr
input_path (str): file containing gene metrics name and values
gene_ids (list): list of gene ids
verbose (bool): whether to output verbose messages for debugging purposes
"""
# read the gene metrics names and values
if input_path.endswith(".gz"):
with gzip.open(input_path, "rt") as f:
gene_metrics = [row for row in csv.reader(f)]
else:
with open(input_path, "r") as f:
gene_metrics = [row for row in csv.reader(f)]
# metric names we use [1:] to remove the empty string
if len(gene_metrics[0][1:]):
data_group.create_dataset(
"gene_metadata_numeric_name",
shape=(len(gene_metrics[0][1:]),),
compressor=COMPRESSOR,
dtype="<U80",
chunks=(len(gene_metrics[0][1:]),),
data=list(gene_metrics[0][1:]),
)
else:
logging.info(
'Not adding "gene_metadata_numeric_name" to zarr output: must have at least one metric'
)
if verbose:
logging.info("# gene numeric metadata", len(gene_metrics[0][1:]))
# Gene metric values, the row and column sizes
gene_ids_location = {
gene_id: index for index, gene_id in enumerate(gene_ids)
}
# ignore the first line with the metric names in text
ncols = 0
gene_id_to_metric_values = {}
for row in gene_metrics:
# only consider genes that are in the count matrix
if not row[0] in gene_ids_location:
continue
row_values = []
for value_string in row[1:]:
# some of the standard deviation values do not exist for one reads matches
try:
value = np.float32(value_string)
except ValueError:
value = np.nan
row_values.append(value)
gene_id_to_metric_values[row[0]] = row_values
# note that all of these lengths are assumed to be equal and this check is already done in the pipeline
ncols = len(row_values)
# now insert the metrics of the cells that are in count matrix, i.e., the global variable "cell_ids"
gene_metric_values = []
for gene_id in gene_ids:
if gene_id in gene_id_to_metric_values:
gene_metric_values.append(gene_id_to_metric_values[gene_id])
else:
# if no metrics for a cell present in the count matrix then fill them with np.nans
gene_metric_values.append([np.nan] * ncols)
nrows = len(gene_ids)
if verbose:
logging.info("# of genes: {}".format(nrows))
logging.info("# of gene metadate metrics: {}".format(ncols))
# now insert the dataset that has the numeric values for the qc metrics for the genes
if nrows and ncols:
data_group.create_dataset(
"gene_metadata_numeric",
shape=(nrows, ncols),
compressor=COMPRESSOR,
dtype=np.float32,
chunks=(nrows, ncols),
data=gene_metric_values,
)
else:
logging.info(
'Not adding "gene_metadata_numeric" to zarr output: either the #genes or # cell ids is 0'
)
def add_cell_metrics(
data_group, metrics_file, cell_ids, emptydrops_file, verbose=False,
):
"""Converts cell metrics from the Optimus pipeline to zarr file
Args:
data_group (zarr.hierarchy.Group): datagroup object for the zarr
input_path (str): file containing gene metrics name and values
cell_ids (list): list of cell ids
verbose (bool): whether to output verbose messages for debugging purposes
emptydrops_path (str): emptydrops csv file
"""
# Read the csv input files
metrics_df = pd.read_csv(metrics_file, dtype=str)
emptydrops_df = | pd.read_csv(emptydrops_file, dtype=str) | pandas.read_csv |
import datetime
import pandas as pd
import numpy as np
import re
import os
def remove_blanks(df, col_name):
ctr = 0
working_df = pd.DataFrame(df)
# remove any blanks from the run
try:
while True:
value = working_df.at[ctr, col_name].lower()
if re.search("^blank\d*.*$", value) or re.search("^0$", value):
working_df.drop(labels=ctr, inplace=True)
ctr += 1
except ValueError:
pass
except KeyError:
pass
working_df = working_df.reset_index(drop=True)
print(" Done!\n")
return working_df
def remove_pools(df, col_name):
working_df = pd.DataFrame(df)
col_lst = list(working_df.columns)
size = working_df.index
new_row = []
for i in range(len(size)):
cell_val = str(working_df.iloc[i][col_lst.index(col_name)]).split("/")
cell_val = cell_val[0]
currentrow = working_df.iloc[i]
currentrow = currentrow.values.tolist()
if not ("pool" in cell_val.lower().strip() or "panel" in cell_val.lower().strip()):
new_row.append(currentrow)
working_df = pd.DataFrame(new_row, columns=col_lst)
return working_df
def merge_dataframes(df1=None, df2=None, df1_drop=None, df_final_drop=None, join_lst=None, join_type=None):
# df1 from qc table, may have duplicate hsns. Remove common columns between
# the two dataframes. df2 from results table
join_dict = {}
for col in join_lst:
join_dict[col] = 'str'
df1 = df1.astype(join_dict)
df2 = df2.astype(join_dict)
df1.drop(labels=df1_drop, axis=1, inplace=True)
#df1.drop_duplicates(subset=['hsn'], inplace=True)
df_final = df2.merge(df1, how=join_type, on=join_lst)
df_final.drop(labels=df_final_drop, axis=1, inplace=True)
return df_final
def format_hsn_col(df=None, hsn_colname=None, hsn_only=False):
df = remove_pools(df, hsn_colname)
df = remove_blanks(df, hsn_colname)
if hsn_only:
df.columns = [hsn_colname]
df[hsn_colname] = df.apply(lambda row: extract_hsn(row), axis=1)
df[hsn_colname] = df.apply(lambda row: str(row[hsn_colname]), axis=1)
df = df.rename(columns= {hsn_colname:'hsn'})
df.drop_duplicates(subset='hsn', inplace=True, ignore_index=True)
return df
def add_cols(obj=None, df=None, col_lst=None, col_func_map=None):
# iterate through all columns that should be in final df
for k in col_lst:
# if the column appears in the function mapping,
if k in col_func_map.keys():
# get the pointer to the func/value associated with the column
v = col_func_map[k]
try:
# try to get additional value to run apply function with
val = v[1]
try:
val = getattr(obj, v[1])
# try to catch v[1] as an object variable
df[k] = df.apply(lambda row: globals()[v[0]](row, val), axis=1)
except Exception:
# use v[1] as a constant argument to the function
df[k] = df.apply(lambda row: globals()[v[0]](row, v[1]), axis=1)
# no additional variables to supply to apply function
except IndexError:
# try using the value as a function
try:
df[k] = df.apply(lambda row: globals()[v[0]](row), axis=1)
# try using the value as a variable
except Exception:
val = getattr(obj, v[0])
df[k] = val
# if column not in mapping, insert empty column with appropriate
# name into the dataframe
else:
# try to insert the column
try:
df.insert(0, k, None)
# ValueError raised if column already exists in dataframe
except ValueError:
pass
return df
def format_date(row, colName):
if (isinstance(row[colName], pd.Timestamp)) or\
(isinstance(row[colName], datetime.datetime)):
if (not pd.isna(row[colName])):
return row[colName].strftime("%m/%d/%Y")
else:
return np.nan
else:
return np.nan
def get_today(row):
return datetime.datetime.today().strftime("%Y-%m-%d")
def get_pos(id):
pos_dict = {"A":1, "B":2, "C":3, "D":4, "E":5, "F":6, "G":7, "H":8}
pos = (int(id[-1])*8 - 8) + pos_dict[id[0]]
return pos
def parse_seq_id(row, arg):
try:
seq_id = str(row["Sequence name"]).split("/")
except:
seq_id = str(row['seqName']).split("/")
# if split didn't find matches, it is dealing with folder, should
# be split by ".", also has different indexes for values
if len(seq_id) == 1:
# WF 3
seq_id = str(row["seqName"]).split(".")
if arg == "hsn":
return int(seq_id[0][0:7])
elif arg == "m_num":
return int(seq_id[1][-2:])
elif arg == "pos":
return int(seq_id[4][-2:])
elif arg == "run_num":
return int(seq_id[3])
elif arg == "date":
return seq_id[2]
else:
raise ValueError("Bad argument to parse_seq_id --> folder")
else:
# WF_4, WF_5
if arg == "hsn":
if len(seq_id[0]) > 9:
return int(seq_id[0])
else:
return int(seq_id[0][0:7])
elif arg == "m_num":
return int(seq_id[1][4:6])
elif arg == "pos":
return int(seq_id[2][-2:])
elif arg == "run_num":
return int(seq_id[1][-2:])
elif arg == "date":
return seq_id[1][7:17]
else:
raise ValueError("Bad argument to parse_seq_id --> file")
def extract_hsn(row):
hsn = str(row["Sample ID"])
if len(hsn) == 7:
return hsn
return hsn[:-2]
def format_str_cols(df):
df.columns = [str(col) for col in list(df.columns)]
return df
def format_sex(row, ber=False):
col = "sex"
if ber:
col = 'Patient_Gender'
if pd.isna(row[col]) or str(row[col]).upper() == "" or str(row[col]).upper() == "UNKNOWN" or str(row[col]).upper() == "U":
return "Unknown"
elif str(row[col]).upper() == "M":
return "Male"
elif str(row[col]).upper() == "F":
return "Female"
else:
return str(row[col]).capitalize()
def format_facility(row, facility_replace_dict):
if pd.isna(row['facility']):
return None
elif row['facility'] == "":
return None
else:
facility = str(row['facility']).lower()
for key in facility_replace_dict.keys():
facility = facility.replace(key, facility_replace_dict[key])
return facility.lower()
def parse_category(row, parse_category_dict):
facility = str(row['facility']).lower()
for key in parse_category_dict.keys():
if re.search(key, facility):
return parse_category_dict[key]
return None
def format_race(row):
if pd.isna(row['race']) or row['race'] == "U":
return "Unknown"
elif row['race'] == "":
return "Unknown"
elif str(row['race']).upper() == "W":
return "White"
else:
return str(row['race'])
def format_source(row):
source = str(row['source']).lower()
if len(source) > 2:
if source == "nasopharyngeal":
return "NP"
elif source == "sputum/saliva":
return "SV"
else:
return "OT"
else:
return row['source']
def add_cols_by_name(df, lst):
curr_cols = list(df.columns)
for col in lst:
if not (col in curr_cols):
df.insert(0, col, np.nan)
return df
def format_f_name(row):
if pd.isna(row['name']):
return None
elif row['name'].strip() == "":
return None
else:
full_name = str(row["name"])
names = full_name.split()
f_name = names[0].capitalize()
f_name = f_name.replace("'","''")
return f_name
def format_l_name(row, lst):
if pd.isna(row['name']):
return None
elif row['name'].strip() == "":
return None
else:
full_name = str(row["name"])
names = full_name.split()
for item in lst:
if item == names[-1].lower():
return names[-2].capitalize() + ", " + names[-1].upper()
l_name = names[-1].capitalize()
l_name = l_name.replace("'", "''")
return l_name
def drop_cols(df, lst):
curr_cols = list(df.columns)
for col in curr_cols:
if not (col in lst):
df = df.drop(columns = col)
return df
def get_age(row):
try:
born = datetime.datetime.strptime(row["dob"], "%m/%d/%Y").date()
except Exception:
born = row["dob"].to_pydatetime().date()
try:
tested = row['doc'].to_pydatetime().date()
except Exception:
tested = datetime.datetime.strptime(row['doc'], "%m/%d/%Y").date()
if | pd.isnull(born) | pandas.isnull |
# -*- coding: utf-8 -*-
import requests
import json
import pandas as pd
from io import StringIO
import numpy as np
import time
#
timezones={}
#function = 'TIME_SERIES_INTRADAY'
apii = 'https://www.alphavantage.co/query?function={function}&symbol={symbol}&interval={interval}&outputsize=full&datatype=csv&apikey='
apid = 'https://www.alphavantage.co/query?function={function}&symbol={symbol}&outputsize=full&datatype=csv&apikey='
#https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=ASML&interval=1min&outputsize=compact&datatype=csv&time_period=0&apikey=
sector = 'https://www.alphavantage.co/query?function=SECTOR&datatype=csv&apikey='
s_type = ['close','high','low']#,'open']
ma_types = [0,1,2,3,4,5,6,7,8]
#Moving average type By default, matype=0. INT 0 = SMA, 1 = EMA, 2 = Weighted Moving Average (WMA), 3 = Double Exponential Moving Average (DEMA), 4 = Triple Exponential Moving Average (TEMA), 5 = Triangular Moving Average (TRIMA), 6 = T3 Moving Average, 7 = Kaufman Adaptive Moving Average (KAMA), 8 = MESA Adaptive Moving Average (MAMA).
indicator_dict = {
'sma':'https://www.alphavantage.co/query?function=SMA&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'ema':'https://www.alphavantage.co/query?function=EMA&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'tema':'https://www.alphavantage.co/query?function=TEMA&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'macd':'https://www.alphavantage.co/query?function=MACD&symbol={symbol}&interval={interval}&series_type=close&fastperiod=12&slowperiod=26&signalperiod=9&datatype=csv&apikey=',
'macdext':'https://www.alphavantage.co/query?function=MACDEXT&symbol={symbol}&interval={interval}&series_type={series_type}&fastperiod={fastperiod}&slowperiod={slowperiod}&signalperiod={signalperiod}&fastmatype={fastmatype}&slowmatype={slowmatype}&signalmatype={signalmatype}&datatype=csv&apikey=',
'stoch':'https://www.alphavantage.co/query?function=STOCH&symbol={symbol}&interval={interval}&fastkperiod={fastkperiod}&slowkperiod={slowkperiod}&slowdperiod={slowdperiod}&slowkmatype={slowkmatype}&slowdmatype={slowdmatype}&datatype=csv&apikey=',
'stochf':'https://www.alphavantage.co/query?function=STOCHF&symbol={symbol}&interval={interval}&fastkperiod={fastkperiod}&fastdperiod={fastdperiod}&fastdmatype={fastdmatype}&datatype=csv&apikey=',
'rsi':'https://www.alphavantage.co/query?function=RSI&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'stochrsi':'https://www.alphavantage.co/query?function=STOCHRSI&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&fastkperiod={fastkperiod}&fastdperiod={fastdperiod}&fastdmatype={fastdmatype}&datatype=csv&apikey=',
'willr':'https://www.alphavantage.co/query?function=WILLR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'adx':'https://www.alphavantage.co/query?function=ADX&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'adxr':'https://www.alphavantage.co/query?function=ADXR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'apo':'https://www.alphavantage.co/query?function=APO&symbol={symbol}&interval={interval}&series_type={series_type}&fastperiod={fastperiod}&slowperiod={slowperiod}&matype={matype}&datatype=csv&apikey=',
'ppo':'https://www.alphavantage.co/query?function=PPO&symbol={symbol}&interval={interval}&series_type={series_type}&fastperiod={fastperiod}&slowperiod={slowperiod}&matype={matype}&datatype=csv&apikey=',
'mom':'https://www.alphavantage.co/query?function=MOM&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'bop':'https://www.alphavantage.co/query?function=BOP&symbol={symbol}&interval={interval}&datatype=csv&apikey=',
'cci':'https://www.alphavantage.co/query?function=CCI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'cmo':'https://www.alphavantage.co/query?function=CMO&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'roc':'https://www.alphavantage.co/query?function=ROC&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'rocr':'https://www.alphavantage.co/query?function=ROCR&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'aroon':'https://www.alphavantage.co/query?function=AROON&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'aroonosc':'https://www.alphavantage.co/query?function=AROONOSC&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'mfi':'https://www.alphavantage.co/query?function=MFI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'trix':'https://www.alphavantage.co/query?function=TRIX&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'ultosc':'https://www.alphavantage.co/query?function=ULTOSC&symbol={symbol}&interval={interval}&timeperiod1={timeperiod1}&timeperiod2={timeperiod2}&timeperiod3={timeperiod3}&datatype=csv&apikey=',
'dx':'https://www.alphavantage.co/query?function=DX&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'minus_di':'https://www.alphavantage.co/query?function=MINUS_DI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'plus_di':'https://www.alphavantage.co/query?function=PLUS_DI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'minus_dm':'https://www.alphavantage.co/query?function=MINUS_DM&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'plus_dm':'https://www.alphavantage.co/query?function=PLUS_DM&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'bbands':'https://www.alphavantage.co/query?function=BBANDS&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&nbdevup={nbdevup}&nbdevdn={nbdevdn}&matype={matype}&datatype=csv&apikey=',
'midpoint':'https://www.alphavantage.co/query?function=MIDPOINT&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'midprice':'https://www.alphavantage.co/query?function=MIDPRICE&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'sar':'https://www.alphavantage.co/query?function=SAR&symbol={symbol}&interval={interval}&acceleration={acceleration}&maximum={maximum}&datatype=csv&apikey=',
'trange':'https://www.alphavantage.co/query?function=TRANGE&symbol={symbol}&interval={interval}&datatype=csv&apikey=',
'atr':'https://www.alphavantage.co/query?function=ATR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'natr':'https://www.alphavantage.co/query?function=NATR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'ad':'https://www.alphavantage.co/query?function=AD&symbol={symbol}&interval={interval}&datatype=csv&apikey=',
'adosc':'https://www.alphavantage.co/query?function=ADOSC&symbol={symbol}&interval={interval}&fastperiod={fastperiod}&slowperiod={slowperiod}&datatype=csv&apikey=',
'obv':'https://www.alphavantage.co/query?function=OBV&symbol={symbol}&interval={interval}&datatype=csv&apikey=',
'ht_trendline':'https://www.alphavantage.co/query?function=HT_TRENDLINE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_sine':'https://www.alphavantage.co/query?function=HI_SINE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_trendmode':'https://www.alphavantage.co/query?function=HT_TRENDMODE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_dcperiod':'https://www.alphavantage.co/query?function=HT_DCPERIOD&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_dcphase':'https://www.alphavantage.co/query?function=HT_DCPHASE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_dcphasor':'https://www.alphavantage.co/query?function=HT_DCPHASOR&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey='
}
def moving_a(ma,symbol,interval):
api = indicator_dict[ma]
ma_range = [5,10,15,20,35,50,65,100,125,200,250]
#125
out_df = pd.DataFrame()
first = True
for t in ma_range:
for s in s_type:
indicator = requests.get(api.format(symbol=symbol,interval=interval,time_period = t,series_type=s))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df = pd.merge(out_df,indi_df,on='time',how="inner")
return out_df
def macdext_get(macd,symbol, interval):#,types=False,time_period=False):
out_df = pd.DataFrame()
macd_range = [[5,10,3],[10,20,7],[12,26,9],[15,35,11]]
api = indicator_dict[macd]
macd_ma = 1
first=True
for i in macd_range:
for s in s_type:
indicator = requests.get(api.format(symbol=symbol,interval=interval,series_type=s,fastperiod=i[0],slowperiod=i[1],signalperiod=i[2],fastmatype=ma_types[1],slowmatype=ma_types[1],signalmatype=ma_types[1]))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df = pd.merge(out_df,indi_df,on='time',how="inner")
return out_df
def stoch_get(stoch,symbol,interval):
slowd = 3
slowk = 3
fastk = 5
fastd = 3
stoch_ma = 1
#EMA
api = indicator_dict[stoch]
if stoch == 'stoch':
indicator = requests.get(api.format(symbol=symbol,interval=interval,fastkperiod=fastk,slowkperiod=slowk,slowdperiod=slowd,slowkmatype=stoch_ma,slowdmatype=stoch_ma))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
indi_df = pd.read_csv(fixed)
return indi_df
elif stoch == 'stochf':
indicator = requests.get(api.format(symbol=symbol,interval=interval,fastkperiod=fastk,fastdperiod=fastd,fastdmatype=stoch_ma))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
indi_df = pd.read_csv(fixed)
return indi_df
def rsi_get(rsi,symbol,interval):
out_df = pd.DataFrame()
rsi_period = [7,11,14,21]
api = indicator_dict[rsi]
first = True
for t in rsi_period:
for s in s_type:
indicator = requests.get(api.format(symbol=symbol,interval=interval,time_period = t,series_type=s))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df = pd.merge(out_df,indi_df,on='time',how="inner")
return out_df
def stochrsi_get (indicator,symbol,interval):
api = indicator_dict[indicator]
fastk = 5
fastd = 3
fastma = 1
stype = 'close'
rsi_period = [7,11,14,21]
first = True
for t in rsi_period:
for s in s_type:
indicator = requests.get(api.format(symbol=symbol,interval=interval,time_period = t,series_type=s,fastkperiod=fastk,fastdperiod=fastd,fastdmatype=fastma))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df = pd.merge(out_df,indi_df,on='time',how="inner")
return out_df
def adx_get(indicator,symbol,interval):
api = indicator_dict[indicator]
adx_period = [7,11,14,21]
first = True
for t in adx_period:
indicator = requests.get(api.format(symbol=symbol,interval=interval,time_period = t))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df = pd.merge(out_df,indi_df,on='time',how="inner")
return out_df
def cci_get(indicator,symbol,interval):
api = indicator_dict[indicator]
cci_range = [5,10,15,20,35,50,65,85,100,125,200,250]
first = True
#annual/time period cycle high to high divided by three is the official time period
for t in cci_range:
indicator = requests.get(api.format(symbol=symbol,interval=interval,time_period = t))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df = pd.merge(out_df,indi_df,on='time',how="inner")
return out_df
def aroon_get(indicator,symbol,interval):
api= indicator_dict[indicator]
aroon_range = [5,10,15,20,35,50,65,85,100,125,200,250]
#period since last highest high and lowest low
first = True
for t in aroon_range:
indicator = requests.get(api.format(symbol=symbol,interval=interval,time_period = t))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df = pd.merge(out_df,indi_df,on='time',how="inner")
return out_df
def bbands_get(indicator,symbol,interval):
api= indicator_dict[indicator]
bb_range = [5,10,15,20,35,50,65,100,125,200,250]
ndup = 2
nddn = 2
bband_ma = [0,1,4]
first = True
for t in bb_range:
for m in bband_ma:
for s in s_type:
indicator = requests.get(api.format(symbol=symbol,interval=interval,time_period = t,series_type=s,nbdevup=ndup,nbdevdn=nddn,matype=m))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df = pd.merge(out_df,indi_df,on='time',how="inner")
return out_df
def adosc_get(indicator,symbol,interval):
api= indicator_dict[indicator]
fastperiod = 3
slowperiod = 10
first = True
#2,7?
indicator = requests.get(api.format(symbol=symbol,interval=interval,fastperiod=fastperiod,slowperiod=slowperiod))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
out_df = pd.read_csv(fixed)
return out_df
def simple_indicator(indicator,symbol,interval):
api = indicator_dict[indicator]
indicator = requests.get(api.format(symbol=symbol,interval=interval))
time.sleep(11.8)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
indi_df = pd.read_csv(fixed)
return indi_df
indicator_run = {
'sma':moving_a,
'ema':moving_a,
'tema':moving_a,
'macd':simple_indicator,
'macdext':macdext_get,
'stoch':stoch_get,
'stochf':stoch_get,
'rsi':rsi_get,
'stochrsi':stochrsi_get,
'willr':'notassigned',
'adx':adx_get,
'adxr':adx_get,
'apo':'notassigned',
'ppo':'notassigned',
'mom':'notassigned',
'bop':simple_indicator,
'cci':cci_get,
'cmo':'notassigned',
'roc':'notassigned',
'rocr':'notassigned',
'aroon':aroon_get,
'aroonosc':aroon_get,
'mfi':'notassigned',
'trix':'notassigned',
'ultosc':'notassigned',
'dx':'notassigned',
'minus_di':'notassigned',
'plus_di':'notassigned',
'minus_dm':'notassigned',
'plus_dm':'notassigned',
'bbands':bbands_get,
'midpoint':'notassigned',
'midprice':'notassigned',
'sar':'notassigned',
'trange':simple_indicator,
'atr':'notassigned',
'natr':'notassigned',
'ad':simple_indicator,
'adosc':adosc_get,
'obv':simple_indicator,
'ht_trendline':'notassigned',
'ht_sine':'notassigned',
'ht_trendmode':'notassigned',
'ht_dcperiod':'notassigned',
'ht_dcphase':'notassigned',
'ht_dcphasor':'notassigned'
}
def get_data (symbol,interval):
if interval in ['1min','5min','15min','30min','60min']:
intra_csv = requests.get(apii.format(function = 'TIME_SERIES_INTRADAY',symbol=symbol,interval=interval))
time.sleep(11.8)
#response 200 = got it
fixed = StringIO(intra_csv.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
intra_df = pd.read_csv(fixed)
indicator_df = get_indicators(symbol,interval)
out_df = pd.merge(intra_df,indicator_df,on='time',how='inner')
return out_df
elif interval == 'daily':
daily_csv = requests.get(apid.format(function = 'TIME_SERIES_DAILY',symbol=symbol))
time.sleep(11.8)
#response 200 = got it
indicator_df = get_indicators(symbol,interval)
fixed = StringIO(daily_csv.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
d_df = pd.read_csv(fixed)
print(d_df)
print(indicator_df)
out_df = | pd.merge(d_df,indicator_df,left_index=True,right_index=True,how='inner') | pandas.merge |
import datetime
import datetime as dt
import json
import re
from itertools import count
import datazimmer as dz
import numpy as np
import pandas as pd
from aswan import get_soup
from tqdm import tqdm
class Condition(dz.CompositeTypeBase):
lungs = bool
heart = bool
blood_pressure = bool
diabetes = bool
obesity = bool
class CovidVictim(dz.AbstractEntity):
serial = dz. Index & int
age = int
estimated_date = dt.datetime
is_male = bool
raw_conditions = str
condition = Condition
# TODO: this should be inherited from elsewhere
positive_rate = float
total_vaccinations = int
people_vaccinated = int
people_fully_vaccinated = int
total_boosters = int
hun_url = dz.SourceUrl("https://koronavirus.gov.hu/elhunytak")
wd_url = dz.SourceUrl("https://www.worldometers.info/coronavirus/country/hungary/")
owid_url = dz.SourceUrl("https://covid.ourworldindata.org")
OWID_SRC = f"{owid_url}/data/owid-covid-data.csv"
def get_hun_victim_df():
dfs = []
for p in tqdm(count()):
soup = get_soup(f"{hun_url}?page={p}")
elem = soup.find(class_="views-table")
try:
dfs.append(pd.read_html(str(elem))[0])
except ValueError:
break
return (
pd.concat(dfs, ignore_index=True)
.astype({"Kor": int})
.assign(is_male=lambda df: (df["Nem"].str.lower().str[0] == "f"))
.drop("Nem", axis=1)
.rename(columns={"Kor": CovidVictim.age, "Sorszám": CovidVictim.serial})
)
def get_count_df(patient_df):
soup = get_soup(wd_url)
js_str = soup.find("script", text=re.compile("'graph-deaths-daily', .*")).contents[
0
]
daily_df = (
pd.DataFrame(
{
k: json.loads(re.compile(rf"{k}: (\[.*\])").findall(js_str)[0])
for k in ["data", "categories"]
}
)
.assign(date=lambda df: pd.to_datetime(df["categories"]))
.fillna(0)
.sort_values("date")
.loc[lambda df: df["data"].cumsum() < patient_df.shape[0], :]
)
mismatch = patient_df.shape[0] - daily_df["data"].sum()
pad_dic = {"data": [mismatch], "date": pd.to_datetime(datetime.date.today())}
return pd.concat([daily_df, | pd.DataFrame(pad_dic) | pandas.DataFrame |
from xgboost import XGBRegressor
import pandas as pd
import argparse
from sklearn import metrics
import scipy
import pickle
from pathlib import Path
import numpy as np
'''
python3 XGBoostRegressor.py --dataset_type raw_c > raw_c_results.txt
python3 XGBoostRegressor.py --dataset_type raw_d > raw_d_results.txt
python3 XGBoostRegressor.py --dataset_type raw > raw_results.txt
'''
# extract type of feature data from arguments
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_type', type=str,
required=True, help='type of data')
args = parser.parse_args()
# read base data
X_train, y_train, X_test, y_test = None, None, None, None
Xc_train, Xc_test = None, None
Xd_train, Xd_test = None, None
# Get labels (same across all datasets)
test_diff_df = pd.read_csv("diff_expr/C12TestDiff.tsv",
delimiter='\t', header=None)
train_diff_df = pd.read_csv(
"diff_expr/C12TrainDiff.tsv", delimiter='\t', header=None)
valid_diff_df = pd.read_csv(
"diff_expr/C12ValidDiff.tsv", delimiter='\t', header=None)
y_test = test_diff_df
y_train = pd.concat([train_diff_df, valid_diff_df])
# Get features
if args.dataset_type == "raw_c" or args.dataset_type == "raw":
c1_test_df = pd.read_csv("embeddings/C1TestEmbeddings.csv", header=None)
c1_train_df = pd.read_csv("embeddings/C1TrainEmbeddings.csv", header=None)
c1_valid_df = pd.read_csv("embeddings/C1ValidEmbeddings.csv", header=None)
c2_test_df = pd.read_csv("embeddings/C2TestEmbeddings.csv", header=None)
c2_train_df = pd.read_csv("embeddings/C2TrainEmbeddings.csv", header=None)
c2_valid_df = pd.read_csv("embeddings/C2ValidEmbeddings.csv", header=None)
c12_train_df = pd.concat([c1_train_df, c2_train_df], axis=1)
c12_valid_df = pd.concat([c1_valid_df, c2_valid_df], axis=1)
# Reset index prevents errors from combining Xd_train and Xc_train.
# Unless you specify drop=True, it creates a new column that stores indices
# We don't want this, because it essentially adds another feature that's just id
Xc_train = pd.concat([c12_train_df, c12_valid_df]).reset_index(drop=True)
Xc_test = pd.concat([c1_test_df, c2_test_df], axis=1)
if args.dataset_type == "raw_d" or args.dataset_type == "raw":
diff_train = pd.read_csv("embeddings/DiffTrainEmbeddings.csv", header=None)
diff_valid = pd.read_csv("embeddings/DiffValidEmbeddings.csv", header=None)
# Reset index prevents errors from combining Xd_train and Xc_train.
# Unless you specify drop=True, it creates a new column that stores indices
# We don't want this, because it essentially adds another feature that's just id
Xd_train = pd.concat([diff_train, diff_valid]).reset_index(drop=True)
Xd_test = | pd.read_csv("embeddings/DiffTestEmbeddings.csv", header=None) | pandas.read_csv |
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import subprocess
import glob
import re
from helperFunctions.myFunctions_helper import *
import numpy as np
import pandas as pd
import fileinput
from itertools import product
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB import PDBList
from pdbfixer import PDBFixer
from simtk.openmm.app import PDBFile
# compute cross Q for every pdb pair in one folder
# parser = argparse.ArgumentParser(description="Compute cross q")
# parser.add_argument("-m", "--mode",
# type=int, default=1)
# args = parser.parse_args()
def getFromTerminal(CMD):
return subprocess.Popen(CMD,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
def read_hydrophobicity_scale(seq, isNew=False):
seq_dataFrame = pd.DataFrame({"oneLetterCode":list(seq)})
HFscales = pd.read_table("~/opt/small_script/Whole_residue_HFscales.txt")
if not isNew:
# Octanol Scale
# new and old difference is at HIS.
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS+" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
else:
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS0" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
HFscales_with_oneLetterCode = HFscales.assign(oneLetterCode=HFscales.AA.str.upper().map(code)).dropna()
data = seq_dataFrame.merge(HFscales_with_oneLetterCode, on="oneLetterCode", how="left")
return data
def create_zim(seqFile, isNew=False):
a = seqFile
seq = getFromTerminal("cat " + a).rstrip()
data = read_hydrophobicity_scale(seq, isNew=isNew)
z = data["DGwoct"].values
np.savetxt("zim", z, fmt="%.2f")
def expand_grid(dictionary):
return pd.DataFrame([row for row in product(*dictionary.values())],
columns=dictionary.keys())
def duplicate_pdb(From, To, offset_x=0, offset_y=0, offset_z=0, new_chain="B"):
with open(To, "w") as out:
with open(From, "r") as f:
for line in f:
tmp = list(line)
atom = line[0:4]
atomSerialNumber = line[6:11]
atomName = line[12:16]
atomResidueName = line[17:20]
chain = line[21]
residueNumber = line[22:26]
# change chain A to B
# new_chain = "B"
tmp[21] = new_chain
if atom == "ATOM":
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
# add 40 to the x
new_x = x + offset_x
new_y = y + offset_y
new_z = z + offset_z
tmp[30:38] = "{:8.3f}".format(new_x)
tmp[38:46] = "{:8.3f}".format(new_y)
tmp[46:54] = "{:8.3f}".format(new_z)
a = "".join(tmp)
out.write(a)
def compute_native_contacts(coords, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
n = len(dis)
remove_band = np.eye(n)
for i in range(1, MAX_OFFSET):
remove_band += np.eye(n, k=i)
remove_band += np.eye(n, k=-i)
dis[remove_band==1] = np.max(dis)
native_contacts = dis < DISTANCE_CUTOFF
return native_contacts.astype("int")
def compute_contacts(coords, native_contacts, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
constacts = dis < DISTANCE_CUTOFF
constacts = constacts*native_contacts # remove non native contacts
return np.sum(constacts, axis=1).astype("float")
def compute_localQ_init(MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
from pathlib import Path
home = str(Path.home())
struct_id = '2xov'
filename = os.path.join(home, "opt/pulling/2xov.pdb")
p = PDBParser(PERMISSIVE=1)
s = p.get_structure(struct_id, filename)
chains = s[0].get_list()
# import pdb file
native_coords = []
for chain in chains:
dis = []
all_res = []
for res in chain:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res.get_resname()=='GLY'):
native_coords.append(res['CA'].get_coord())
elif (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res:
native_coords.append(res['CB'].get_coord())
else:
print('ERROR: irregular residue at %s!' % res)
exit()
native_contacts_table = compute_native_contacts(native_coords, MAX_OFFSET, DISTANCE_CUTOFF)
return native_contacts_table
def compute_localQ(native_contacts_table, pre=".", ii=-1, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_contacts = np.sum(native_contacts_table, axis=1).astype("float")
dump = read_lammps(os.path.join(pre, f"dump.lammpstrj.{ii}"), ca=False)
localQ_list = []
for atom in dump:
contacts = compute_contacts(np.array(atom), native_contacts_table, DISTANCE_CUTOFF=DISTANCE_CUTOFF)
c = np.divide(contacts, native_contacts, out=np.zeros_like(contacts), where=native_contacts!=0)
localQ_list.append(c)
data = pd.DataFrame(localQ_list)
data.columns = ["Res" + str(i+1) for i in data.columns]
data.to_csv(os.path.join(pre, f"localQ.{ii}.csv"), index=False)
def readPMF_basic(pre):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys())
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(location)
name_list = ["f", "df", "e", "s"]
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def make_metadata_3(k=1000.0, temps_list=["450"], i=-1, biasLow=None, biasHigh=None):
print("make metadata")
cwd = os.getcwd()
files = glob.glob(f"../data_{i}/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in sorted(files):
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
if biasLow:
if float(bias) < biasLow:
continue
if biasHigh:
if float(bias) > biasHigh:
continue
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "../{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def readPMF(pre, is2d=False, force_list=["0.0", "0.1", "0.2"]):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys()),
"force":force_list
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
force = row["force"]
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/force_{force}/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/force_{force}/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(pmf_list)
name_list = ["f", "df", "e", "s"]
if is2d:
names = ["x", "y"] + name_list
else:
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, force=force, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def readPMF_2(pre, is2d=0, force_list=["0.0", "0.1", "0.2"]):
if is2d:
print("reading 2d pmfs")
else:
print("reading 1d dis, qw and z")
if is2d == 1:
mode_list = ["2d_qw_dis", "2d_z_dis", "2d_z_qw"]
elif is2d == 2:
mode_list = ["quick"]
else:
mode_list = ["1d_dis", "1d_qw", "1d_z"]
all_data_list =[]
for mode in mode_list:
tmp = readPMF(mode, is2d, force_list).assign(mode=mode)
all_data_list.append(tmp)
return pd.concat(all_data_list).dropna().reset_index()
def shrinkage(n=552, shrink_size=6, max_frame=2000, fileName="dump.lammpstrj"):
print("Shrinkage: size: {}, max_frame: {}".format(shrink_size, max_frame))
bashCommand = "wc " + fileName
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
line_number = int(output.decode("utf-8").split()[0])
print(line_number)
print(line_number/552)
# number of atom = 543
n = 552
count = 0
with open("small.lammpstrj", "w") as out:
with open(fileName, "r") as f:
for i, line in enumerate(f):
if (i // n) % shrink_size == 0:
if count >= max_frame*n:
break
count += 1
out.write(line)
def compute_theta_for_each_helix(output="angles.csv", dumpName="../dump.lammpstrj.0"):
print("This is for 2xov only")
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
atoms_all_frames = read_lammps(dumpName)
# print(atoms[0])
# print(len(atoms), len(atoms[0]))
# helices_angles_all_frames = []
with open(output, "w") as out:
out.write("Frame, Helix, Angle\n")
for ii, frame in enumerate(atoms_all_frames):
# helices_angles = []
for count, (i, j) in enumerate(helices_list):
# print(i, j)
i = i-91
j = j-91
# end - start
a = np.array(frame[j]) - np.array(frame[i])
b = np.array([0, 0, 1])
angle = a[2]/length(a) # in form of cos theta
# helices_angles.append(angle)
# print(angle)
out.write("{}, {}, {}\n".format(ii, count+1, angle))
# helices_angles_all_frames.append(helices_angles)
def structure_prediction_run(protein):
print(protein)
protocol_list = ["awsemer", "frag", "er"]
do = os.system
cd = os.chdir
cd(protein)
# run = "frag"
for protocol in protocol_list:
do("rm -r " + protocol)
do("mkdir -p " + protocol)
do("cp -r {} {}/".format(protein, protocol))
cd(protocol)
cd(protein)
# do("cp ~/opt/gremlin/protein/{}/gremlin/go_rnativeC* .".format(protein))
do("cp ~/opt/gremlin/protein/{}/raptor/go_rnativeC* .".format(protein))
fileName = protein + "_multi.in"
backboneFile = "fix_backbone_coeff_" + protocol
with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
for line in file:
tmp = line.replace("fix_backbone_coeff_er", backboneFile)
print(tmp, end='')
cd("..")
do("run.py -m 0 -n 20 {}".format(protein))
cd("..")
cd("..")
# do("")
def check_and_correct_fragment_memory(fragFile="fragsLAMW.mem"):
with open("tmp.mem", "w") as out:
with open(fragFile, "r") as f:
for i in range(4):
line = next(f)
out.write(line)
for line in f:
gro, _, i, n, _ = line.split()
delete = False
# print(gro, i, n)
# name = gro.split("/")[-1]
with open(gro, "r") as one:
next(one)
next(one)
all_residues = set()
for atom in one:
residue, *_ = atom.split()
# print(residue)
all_residues.add(int(residue))
for test in range(int(i), int(i)+int(n)):
if test not in all_residues:
print("ATTENTION", gro, i, n, "missing:",test)
delete = True
if not delete:
out.write(line)
os.system(f"mv {fragFile} fragsLAMW_back")
os.system(f"mv tmp.mem {fragFile}")
def read_complete_temper_2(n=4, location=".", rerun=-1, qnqc=False, average_z=False, localQ=False, disReal=False, dis_h56=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False):
all_data_list = []
for i in range(n):
file = "lipid.{}.dat".format(i)
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.{}.dat".format(i)
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.{}.dat".format(i)
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.{}.dat".format(i)
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.{}.dat".format(i)
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc_{i}", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn_{i}", names=["qn"])[1:].reset_index(drop=True)
qc2 = pd.read_table(location+f"qc2_{i}", names=["qc2"])[1:].reset_index(drop=True)
wham = pd.concat([wham, qn, qc, qc2],axis=1)
# if average_z:
# z = pd.read_table(location+f"z_{i}.dat", names=["AverageZ"])[1:].reset_index(drop=True)
# wham = pd.concat([wham, z],axis=1)
if disReal:
tmp = pd.read_csv(location+f"distance_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if dis_h56:
tmp = pd.read_csv(location+f"distance_h56_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp1 = pd.read_csv(location+f"distance_h12_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp2 = pd.read_csv(location+f"distance_h34_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
tmp1.columns = tmp1.columns.str.strip()
tmp2.columns = tmp2.columns.str.strip()
wham = pd.concat([wham, tmp, tmp1, tmp2],axis=1)
if average_z:
z = | pd.read_csv(location+f"z_complete_{i}.dat") | pandas.read_csv |
"""Climodat Daily Data Estimator.
python daily_estimator.py YYYY MM DD
RUN_NOON.sh - processes the current date, this skips any calendar day sites
RUN_NOON.sh - processes yesterday, running all sites
RUN_2AM.sh - processes yesterday, which should run all sites
"""
from __future__ import print_function
import sys
import datetime
import pandas as pd
from pandas.io.sql import read_sql
import numpy as np
from pyiem import iemre
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn, ncopen
from pyiem.datatypes import temperature, distance
from pyiem.reference import TRACE_VALUE, state_names
def load_table(state, date):
"""Update the station table"""
nt = NetworkTable("%sCLIMATE" % (state, ))
rows = []
istoday = (date == datetime.date.today())
for sid in nt.sts:
# handled by compute_0000
if sid[2:] == '0000' or sid[2] == 'C':
continue
if istoday and not nt.sts[sid]['temp24_hour'] in range(3, 12):
# print('skipping %s as is_today' % (sid, ))
continue
i, j = iemre.find_ij(nt.sts[sid]['lon'], nt.sts[sid]['lat'])
nt.sts[sid]['gridi'] = i
nt.sts[sid]['gridj'] = j
rows.append(
{'station': sid, 'gridi': i, 'gridj': j,
'temp24_hour': nt.sts[sid]['temp24_hour'],
'precip24_hour': nt.sts[sid]['precip24_hour'],
'tracks': nt.sts[sid]['attributes'].get(
'TRACKS_STATION', '|').split("|")[0]}
)
if not rows:
return
df = pd.DataFrame(rows)
df.set_index('station', inplace=True)
for key in ['high', 'low', 'precip', 'snow', 'snowd']:
df[key] = None
return df
def estimate_precip(df, ts):
"""Estimate precipitation based on IEMRE"""
idx = iemre.daily_offset(ts)
nc = ncopen(iemre.get_daily_ncname(ts.year), 'r', timeout=300)
grid12 = distance(nc.variables['p01d_12z'][idx, :, :],
'MM').value("IN").filled(0)
grid00 = distance(nc.variables['p01d'][idx, :, :],
"MM").value("IN").filled(0)
nc.close()
for sid, row in df.iterrows():
if not pd.isnull(row['precip']):
continue
if row['precip24_hour'] in [0, 22, 23]:
precip = grid00[row['gridj'], row['gridi']]
else:
precip = grid12[row['gridj'], row['gridi']]
# denote trace
if precip > 0 and precip < 0.01:
df.at[sid, 'precip'] = TRACE_VALUE
elif precip < 0:
df.at[sid, 'precip'] = 0
elif np.isnan(precip) or np.ma.is_masked(precip):
df.at[sid, 'precip'] = 0
else:
df.at[sid, 'precip'] = "%.2f" % (precip,)
def estimate_snow(df, ts):
"""Estimate the Snow based on COOP reports"""
idx = iemre.daily_offset(ts)
nc = ncopen(iemre.get_daily_ncname(ts.year), 'r', timeout=300)
snowgrid12 = distance(nc.variables['snow_12z'][idx, :, :],
'MM').value('IN').filled(0)
snowdgrid12 = distance(nc.variables['snowd_12z'][idx, :, :],
'MM').value('IN').filled(0)
nc.close()
for sid, row in df.iterrows():
if | pd.isnull(row['snow']) | pandas.isnull |
import numpy as np
import pandas as pd
from .DensityFunctions import BaseDensityCalc
def raw_delta_calc(times):
'''
Given an array of times, this function calculates the deltas between them.
Arguments
---------
- times: array:
This is an array of times that will be used to calculate the deltas.
Returns
---------
- out: array:
This is an array of deltas.
'''
out = (times[1:] - times[:-1])*1e-9
out = out.astype(float)
return out
def single_location_delta(input_df, single_location,
columns={'time': 'time', 'location': 'location'}, recall_value=5,
return_as_list = False):
'''
This function takes the ```input_df``` and calculates the raw time delta between the single_location location time
and the time of the ```recall_value``` number of locations immediately before the single_location.
This does not separate on subject. Please pass data from a single subject into this function.
Arguments
---------
- input_df: pandas dataframe:
This is a dataframe that contains columns relating to the subject, time and location of sensor trigger.
- single_location: string:
This is the location value that you wish to calculate the delta to.
- columns: dictionary:
This is the dictionary with the column names in ```input_df``` for each of the values of data that we need
in our calculations.
This dictionary should be of the form:
```
{'time': column containing the times of sensor triggers,
'location': column containing the locations of the sensor triggers}
```
- recall_value: integer:
This is the number of previous locations to the single_location trigger
- return_as_list: bool:
This option allows the user to return a list of the dates and data if ```True```. This is
used internally by other functions.
Returns
---------
- out: dictionary:
This has the Timestamps of the dates as keys (for example: Timestamp('2021-05-05 00:00:00')) and the
arrays of deltas as values. The arrays of deltas are of shape ```(Nt, recall_value)``` where Nt is the
number of visits to ```single_location``` on a given day. If there are no ```single_location``` visits
found in the data, then an empty dictionary will be returned.
'''
time_column = columns['time']
location_column = columns['location']
# format the incoming data to ensure assumptions about structure are met
input_df[time_column] = pd.to_datetime(input_df[time_column], utc=True)
input_df = input_df.sort_values(time_column)
# find the indices of the data that match with the location we want to find the delta to
single_location_indices = np.where(input_df[location_column] == single_location)[0].reshape(-1, 1)
# making sure that the recall value is not more than the number of sensor triggers before the
# first single_location sensor trigger
if len(single_location_indices) == 0:
if return_as_list: return [], []
else: return {}
single_location_indices = single_location_indices[np.argmax(recall_value < single_location_indices):]
# indices of the sensor triggers that we need in our calculations
recall_indices = np.hstack([single_location_indices - i for i in range(recall_value + 1)])
# the times of the sensor triggers
recall_times = input_df[time_column].values[recall_indices]
# the delta between the times for each of the previous sensors to recall_value
recall_delta = (recall_times[:, 0, None] - recall_times[:, 1:]) * 1e-9
# the times of the single_location triggers
single_location_times = input_df[time_column].iloc[single_location_indices.reshape(-1, )]
# dates of the single_location triggers
single_location_dates = single_location_times.dt.date
# out dictionary
out = {}
if return_as_list:
date_list = []
data_list = []
for nd, date in enumerate(single_location_dates.unique()):
date_list.append(date)
data_to_add = recall_delta[single_location_dates.values == date].astype(float)
data_list.append(data_to_add)
return pd.to_datetime(date_list), data_list
else:
# creating the output dictionary
for date in single_location_dates.unique():
# saving the delta values for this date to the dictionary
out[pd.to_datetime(date)] = recall_delta[single_location_dates.values == date].astype(float)
return out
class TimeDeltaDensity(BaseDensityCalc):
'''
This function allows the user to calculate reverse percentiles on some data, given another
dataset.
'''
def __init__(self, save_baseline_array=True, sample=False, sample_size=10000,
seed=None, verbose=True):
BaseDensityCalc.__init__(self, save_baseline_array=save_baseline_array,
sample=sample, sample_size=sample_size, seed=seed, verbose=verbose)
return
def rp_single_location_delta(input_df, single_location, baseline_length_days = 7, baseline_offset_days = 0,
columns={'time': 'time', 'location': 'location'}, recall_value=5):
'''
This function takes the ```input_df``` and calculates the reverse percentage time delta between the ```single_location``` location time
and the time of the ```recall_value``` number of locations immediately before the ```single_location```. The baseline
for the reverse percentage calculation is defined by ```baseline_length_days``` and ```baseline_offset_days```.
For example:
With ```baseline_length_days = 7``` and ```baseline_offset_days = 1```, the rp deltas on the day
```pd.Timestamp('2021-06-29')``` are calculated using the deltas from
```pd.Timestamp('2021-06-21 00:00:00')``` to ```pd.Timestamp('2021-06-28 00:00:00')```.
This does not separate on subject. Please pass data from a single subject into this function.
NOTE: The reverse percentage is calculated based on all of the deltas coming into a location!
This means that the delta is agnostic to the "from" location.
Arguments
---------
- input_df: pandas dataframe:
This is a dataframe that contains columns relating to the time and location of sensor trigger.
- single_location: string:
This is the location value that you wish to calculate the delta to.
- baseline_length_days: integer:
This is the length of the baseline in days that will be used. This value is used when finding
the ```baseline_length_days``` complete days of ```single_location``` data to use as a baseline.
- baseline_offset_days: integer:
This is the offset to the baseline period. ```0``` corresponds to a time period ending the morning of the
current date being calculated on.
- columns: dictionary:
This is the dictionary with the column names in ```input_df``` for each of the values of data that we need
in our calculations.
This dictionary should be of the form:
```
{'time': column containing the times of sensor triggers,
'location': column containing the locations of the sensor triggers}
```
- recall_value: integer:
This is the number of previous locations to the single_location trigger
Returns
---------
- out: dictionary:
This has the Timestamps of the dates as keys (for example: Timestamp('2021-05-05 00:00:00')) and the
arrays of deltas as values. The arrays of deltas are of shape ```(Nt, recall_value)``` where Nt is the
number of visits to ```single_location``` on a given day.
'''
# column names
time_column = columns['time']
location_column = columns['location']
out = {}
# format the incoming data to ensure assumptions about structure are met
input_df[time_column] = pd.to_datetime(input_df[time_column], utc=True)
input_df = input_df.sort_values(time_column)
# getting the single location raw delta
date_list, data_list = single_location_delta(input_df, single_location, columns, recall_value, return_as_list=True)
# for each date
for nd, date in enumerate(date_list):
date = | pd.to_datetime(date) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Web Map Tile Service time dimension demonstration
-------------------------------------------------
This example further demonstrates WMTS support within cartopy. Optional
keyword arguments can be supplied to the OGC WMTS 'gettile' method. This
allows for the specification of the 'time' dimension for a WMTS layer
which supports it.
There are 10000+ WMS services out there. Here are some compiled lists:
http://www.skylab-mobilesystems.com/en/wms_serverlist.html
http://directory.spatineo.com
http://directory.spatineo.com/service/42691/
# See using open street map:
#http://scitools.org.uk/cartopy/docs/v0.15/examples/tube_stations.html
# Planet Labs
https://www.planet.com/docs/reference/tile-services/
# Google tiles:
https://ocefpaf.github.io/python4oceanographers/blog/2015/06/22/osm/
The example shows satellite imagery retrieved from NASA's Global Imagery
Browse Services for 5th Feb 2016. A true color MODIS image is shown on
the left, with the MODIS false color 'snow RGB' shown on the right.
"""
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
#import matplotlib.ticker as mticker
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
import cartopy.crs as ccrs
import cartopy.feature as cfeature
#from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from owslib.wmts import WebMapTileService
import geopandas as gpd
import pandas as pd
from pandas.plotting import table
import numpy as np
import shapely.wkt
import urllib
import json
import os
#plt.style.use('seaborn-white')
plt.rcParams['font.size'] = 14
def add_wmts_gibs_basemap(ax, date='2016-02-05'):
"""http://gibs.earthdata.nasa.gov/"""
URL = 'http://gibs.earthdata.nasa.gov/wmts/epsg4326/best/wmts.cgi'
wmts = WebMapTileService(URL)
# Layers for MODIS true color and snow RGB
# NOTE: what other tiles available?: TONS!
#https://wiki.earthdata.nasa.gov/display/GIBS/GIBS+Available+Imagery+Products#expand-ReferenceLayers9Layers
#layer = 'MODIS_Terra_SurfaceReflectance_Bands143'
#layer = 'MODIS_Terra_CorrectedReflectance_Bands367'
#layer = 'ASTER_GDEM_Greyscale_Shaded_Relief' #better zoomed in
layer = 'SRTM_Color_Index'
#layer = 'BlueMarble_ShadedRelief' #static
#layer = 'BlueMarble_NextGeneration'
#layer = 'BlueMarble_ShadedRelief_Bathymetry'
#layer = 'Reference_Labels'
#layer = 'Reference_Features'
ax.add_wmts(wmts, layer, wmts_kwargs={'time': date}) # alpha=0.5
#NOTE: can access attributes:
#wmts[layer].title
return wmts
def add_xyz_tile(ax, url, zoom=6):
""" Grab a map tile from the web """
from cartopy.io.img_tiles import GoogleTiles
# Not sure about how projections are handled...
#url = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Shaded_Relief/MapServer/tile/{z}/{y}/{x}.jpg'
#url = 'http://tile.stamen.com/watercolor/{z}/{x}/{y}.png'
#url = 'https://s3.amazonaws.com/elevation-tiles-prod/normal/{z}/{x}/{y}.png'
tiler = GoogleTiles(url=url)
ax.add_image(tiler, zoom)
def query_asf(snwe, sat='1A'):
'''
takes list of [south, north, west, east]
'''
miny, maxy, minx, maxx = snwe
roi = shapely.geometry.box(minx, miny, maxx, maxy)
#test.__geo_interface__ #geojson?
polygonWKT = roi.to_wkt()
polygon = urllib.parse.quote_plus(polygonWKT) #for URL
base = 'https://api.daac.asf.alaska.edu/services/search/param?'
#Note: 'intersects' gets entire global track! ALSO, better to use 'requests' librar?
poly = 'intersectsWith={}'.format(polygon)
plat = 'platform=Sentinel-{}'.format(sat) #1B
proc = 'processingLevel=SLC'
beam = 'beamMode=IW'
#relativeOrbit=$ORBIT
out = 'output=json > query{}.json'.format(sat)
#out = 'output=kml > query.kml'
querystr = '\\&'.join([base, poly, plat, beam, proc, out]) #escape backslash
#poly= polygonWKT
#url = '\&'.join([base, poly, plat, beam, proc, out]) #
os.system('curl ' + querystr)
print(querystr)
# query ASF catalog and get json back
def load_asf_json(jsonfile):
''' Convert JSON metadata from asf query to dataframe '''
with open(jsonfile) as f:
meta = json.load(f)[0] #list of scene dictionaries
df = pd.DataFrame(meta)
polygons = df.stringFootprint.apply(shapely.wkt.loads)
gf = gpd.GeoDataFrame(df,
crs={'init': 'epsg:4326'},
geometry=polygons)
#gf['date'] = pd.to_datetime(df.sceneDate, format='%Y-%m-%d %H:%M:%S')
#gf.to_file(outfile) #saves as shapefile
return gf
def main():
# Plot setup
plot_CRS = ccrs.Mercator()
geodetic_CRS = ccrs.Geodetic()
x0, y0 = plot_CRS.transform_point(30, -16, geodetic_CRS)
x1, y1 = plot_CRS.transform_point(40, -6, geodetic_CRS)
fig,ax = plt.subplots(figsize=(8,8), dpi=100,
subplot_kw=dict(projection=plot_CRS))
ax.set_xlim((x0, x1))
ax.set_ylim((y0, y1))
#wmts = add_wmts_gibs_basemap(ax)
#url = 'http://tile.stamen.com/watercolor/{z}/{x}/{y}.png'
#url = 'http://tile.stamen.com/terrain/{z}/{x}/{y}.png'
#url = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Shaded_Relief/MapServer/tile/{z}/{y}/{x}.jpg'
#url = 'https://s3.amazonaws.com/elevation-tiles-prod/normal/{z}/{x}/{y}.png'
#add_xyz_tile(ax, url, zoom=7) #higher number higher resultion #can't pass alpha argument...
#ax.stock_img()
# New 0.15 background method...
os.environ["CARTOPY_USER_BACKGROUNDS"] = '/Users/scott/Data/cartopy_data'
#ax.background_img("GrayEarth")
ax.background_img("ne_shaded", "low")
#ax.coastlines(resolution='50m', color='black')
# http://scitools.org.uk/cartopy/docs/v0.15/matplotlib/feature_interface.html
#https://gis.stackexchange.com/questions/88209/python-mapping-in-matplotlib-cartopy-color-one-country
#borders = cfeature.BORDERS #low res
borders = cfeature.NaturalEarthFeature(scale='10m',
category='cultural',
name='admin_0_boundary_lines_land')
ax.add_feature(borders, facecolor='none', edgecolor='k')
# Label Basemap
'''
txt = plt.text(-69, -20.5, 'MODIS' , fontsize=18,
color='wheat', transform=geodetic_CRS)
txt.set_path_effects([PathEffects.withStroke(linewidth=5,
foreground='black')])
'''
# Add Rungwe
ax.plot(-33.668, -9.135, 'k^', transform=geodetic_CRS)
# Add deformation footprint
# Add ROI gjson
# Go directly from SNWE box: -11.75 -8.75 32.5 35.5
snwe = [-11.75, -8.75, 32.5, 35.5]
#query_asf(snwe, '1A') #downloads query.json
#query_asf(snwe, '1B') #downloads query.json
# Some basic coverage analysis
gf = load_asf_json('query.json')
orbits = gf.relativeOrbit.unique()
#gf.to_file('query.geojson', driver='GeoJSON') #automatically rendered on github!
#gf.iloc[0] #everything available
#gf.plot()
gf.groupby('relativeOrbit').fileName.count()
gf.groupby('relativeOrbit').sceneDate.describe()
gf.groupby('relativeOrbit').sceneDate.min()
gf.groupby('relativeOrbit').sceneDate.max()
# create a summary dataframe, insert as figure metadata
#dfS = pd.DataFrame(index=gf.relativeOrbit.unique(), columns=['Count','Start','Stop'])
#dfS['Count'] = gf.groupby('relativeOrbit').fileName.count()
#dfS['Start'] = gf.groupby('relativeOrbit').sceneDate.min()
#dfS['Stop'] = gf.groupby('relativeOrbit').sceneDate.max()
# Above has weird dtypes...
# Add to plot! as a custom legend - see below for separate figure
#table(ax, dfS, loc='upper right', zorder=10)
# Add all scene footprints to the plot!
'''
ax.add_geometries(gf.geometry.values, ccrs.PlateCarree(),
facecolor='none',
edgecolor='k',
linewidth=1,
linestyle='-')
'''
# To unclutter, show cascaded union for each track in different colors
#colors = ['c','m','b','y']
colors = plt.cm.jet(np.linspace(0,1,orbits.size))
#colors = plt.get_cmap('jet', orbits.size) #not iterable
for orbit,color in zip(orbits, colors):
df = gf.query('relativeOrbit == @orbit')
poly = df.geometry.cascaded_union
if df.flightDirection.iloc[0] == 'ASCENDING':
linestyle = '--'
#xpos, ypos = poly.bounds[0], poly.bounds[3] #upper left
xpos,ypos = poly.centroid.x, poly.bounds[3]
else:
linestyle = '-'
#xpos, ypos = poly.bounds[2], poly.bounds[1] #lower right
xpos,ypos = poly.centroid.x, poly.bounds[1]
ax.add_geometries([poly],
ccrs.PlateCarree(),
facecolor='none',
edgecolor=color,
linewidth=2,
linestyle=linestyle)
ax.text(xpos, ypos, orbit, color=color, fontsize=16, fontweight='bold', transform=geodetic_CRS)
# Add some text labels
#fs = 16
#ax.text(-69.6, -20.6, 'A149', color='b', fontsize=fs, fontweight='bold', transform=geodetic_CRS)
#ax.text(-67.0, -20.25, 'A76', color='y', fontsize=fs, fontweight='bold', transform=geodetic_CRS)
#ax.text(-67.8, -24.9, 'D83', color='m', fontsize=fs, fontweight='bold', transform=geodetic_CRS)
#ax.text(-69.6, -24.6, 'D156', color='c', fontsize=fs, fontweight='bold', transform=geodetic_CRS)
# Warning, mixing PlateCarree and Mercator here... might not work
# for large regions
gl = ax.gridlines(ccrs.PlateCarree(), draw_labels=True,
linewidth=0.5, color='gray', alpha=0.5, linestyle='-')
gl.xlabels_top = False
gl.ylabels_left = False
#gl.xlines = False
#gl.xlocator = mticker.FixedLocator([-180, -45, 0, 45, 180])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
#plt.show()
plt.title('Sentinel-1 Coverage SEGMENT Project')
plt.savefig('map_coverage.pdf', bbox_inches='tight')
# Second figure for timeline
'''
df = pd.DataFrame(gf.relativeOrbit.astype('int'))
#df['platform'] = gf.platform
df['sceneDate'] = pd.to_datetime(gf.sceneDate)
df['dateStr'] = df.sceneDate.apply(lambda x: x.strftime('%Y-%m-%d'))
df['code'] = df.relativeOrbit.astype('category').cat.codes
dfS = pd.DataFrame(index=df.relativeOrbit.unique())
dfS['Count'] = df.groupby('relativeOrbit').dateStr.count()
dfS['Start'] = df.groupby('relativeOrbit').dateStr.min()
dfS['Stop'] = df.groupby('relativeOrbit').dateStr.max()
dfS.sort_index(inplace=True, ascending=False)
dfS.index.name = 'Orbit'
plot_timeline_table(df, dfS)
'''
#plot_timeline_table_new()
def plot_timeline(df):
fig,ax = plt.subplots(figsize=(8,4))
plt.scatter(df.sceneDate.values, df.code.values, c=df.code.values, cmap='viridis', s=40)
plt.yticks(df.code.unique(), df.relativeOrbit.unique())
plt.axvline('2016-04-22', color='gray', linestyle='dashed', label='Sentinel-1B launch')
ax.xaxis.set_minor_locator(MonthLocator())
ax.xaxis.set_major_locator(YearLocator())
plt.legend()
plt.ylabel('Orbit Number')
fig.autofmt_xdate()
plt.title('Sentinel-1 Coverage at Uturuncu')
plt.savefig('timeline.pdf', bbox_inches='tight')
def plot_timeline_table(df, dfS):
plt.rcParams['font.size'] = 14
fig,ax = plt.subplots(figsize=(11,8.5))
plt.scatter(df.sceneDate.values, df.code.values, c=df.code.values, cmap='viridis', s=40)
plt.yticks(df.code.unique(), df.relativeOrbit.unique())
plt.axvline('2016-04-22', color='gray', linestyle='dashed', label='Sentinel-1B launch')
# Add to plot! as a custom legend
table(ax, dfS, loc='top', zorder=10, fontsize=12,
#colWidths=[0.2, 0.2, 0.2, 0.2],
bbox=[0.1, 0.7, 0.8, 0.275] )#[left, bottom, width, height])
ax.xaxis.set_minor_locator(MonthLocator())
ax.xaxis.set_major_locator(YearLocator())
plt.legend(loc='lower right')
plt.ylim(-1,6)
plt.ylabel('Orbit Number')
fig.autofmt_xdate()
plt.title('Sentinel-1 Coverage SEGMENT Project')
plt.savefig('timeline_with_table.pdf', bbox_inches='tight')
def print_acquisitions(df, orbit=156):
''' Write acquistions to csv file '''
dates = pd.to_datetime(df.query('relativeOrbit == @orbit').dateStr)
tmp = | pd.DataFrame(dates) | pandas.DataFrame |
from sklearn.inspection import permutation_importance
from eli5.permutation_importance import get_score_importances
from sklearn.metrics import (
roc_curve,
confusion_matrix,
accuracy_score,
matthews_corrcoef,
roc_auc_score,
log_loss,
mean_squared_error,
mean_absolute_error,
r2_score,
make_scorer
)
import pandas as pd
import numpy as np
import configparser
import pickle
def roc_cutoff(y_true, y_pred):
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
cutoff = thresholds[np.argmax(tpr - fpr)]
return cutoff
def root_mean_squared_error(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
def evaluate_clf(y_true, y_pred, cutoff):
pred_label = (y_pred >= cutoff) * 1
tn, fp, fn, tp = confusion_matrix(y_true, pred_label).ravel()
accuracy = accuracy_score(y_true, pred_label)
balanced_accuracy = (tp / (tp + fn) + tn / (tn + fp)) / 2
mcc = matthews_corrcoef(y_true, pred_label)
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
auc = roc_auc_score(y_true, y_pred)
metrics = {
'auc': [auc],
'acc': [accuracy],
'sen': [sensitivity],
'spe': [specificity],
'bac': [balanced_accuracy],
'mcc': [mcc],
'cutoff': [cutoff]
}
return metrics
def evaluate_reg(y_true, y_pred):
mae = mean_absolute_error(y_true, y_pred)
mse = mean_squared_error(y_true, y_pred)
rmse = root_mean_squared_error(y_true, y_pred)
r2 = r2_score(y_true, y_pred)
metrics = {'r2': [r2], 'mae': [mae], 'rmse': [rmse], 'mse': [mse]}
return metrics
def get_permutation_importance(task, model, X, y, colmumns, n_repeats):
model_type = type(model.get_model()).__name__[:3]
if model_type == 'LGB' or model_type == 'XGB':
if task == 'classification':
scoring = make_scorer(log_loss)
elif task == 'regression':
scoring = make_scorer(root_mean_squared_error)
imps = permutation_importance(
model,
X,
y,
scoring=scoring,
n_repeats=n_repeats,
n_jobs=-1,
)['importances_mean']
elif model_type[:2] == 'NN':
if task == 'classification':
def scoring(X, y_true):
return np.sqrt(mean_squared_error(y_true, model.predict(X)))
elif task == 'regression':
def scoring(X, y_true):
return np.log_loss(y_true, model.predict(X))
score_decreases = get_score_importances(
scoring,
X,
y,
n_iter=n_repeats,
)[1]
imps = np.mean(score_decreases, axis=0)
df_imps = | pd.DataFrame(imps, columns=['permutation_importance']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_pricing_calloption [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_pricing_calloption&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-call-option-value).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import interpolate
from tqdm import trange
from arpym.statistics import meancov_sp
from arpym.pricing import call_option_value, ytm_shadowrates
from arpym.tools import add_logo, histogram_sp
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_calloption-parameters)
tau_hor = 100 # time to horizon (in days)
j_ = 1000 # number of scenarios
k_strk = 1407 # strike of the options on the S&P500 (in dollars)
t_end = np.datetime64('2013-08-31') # expiry date of the options
y = 0.02 # yield curve (assumed flat and constant)
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_calloption-implementation-step00): Import data
# +
path = '../../../databases/temporary-databases/'
db_proj = pd.read_csv(path+'db_calloption_proj.csv', index_col=0)
m_moneyness = np.array([float(col[col.find('m=')+2:col.find(' tau=')])
for col in db_proj.columns[1:]])
m_moneyness = np.unique(m_moneyness)
tau_implvol = np.array([float(col[col.find(' tau=')+5:])
for col in db_proj.columns[1:]])
tau_implvol = np.unique(tau_implvol)
db_projdates = pd.read_csv(path + 'db_calloption_proj_dates.csv', header=0,
parse_dates=True)
t_m = np.array(pd.to_datetime(db_projdates.values.reshape(-1)),
dtype='datetime64[D]')
m_ = t_m.shape[0]-1
deltat_m = np.busday_count(t_m[0], t_m[1])
if tau_hor > m_:
print(" Projection doesn't have data until given horizon!!!" +
" Horizon lowered to ", m_)
tau_hor = m_
# number of monitoring times
m_ = tau_hor
t_m = t_m[:m_+1]
i_ = db_proj.shape[1]
x_proj = db_proj.values.reshape(j_, -1, i_)
x_proj = x_proj[:, :m_+1, :]
x_tnow = x_proj[0, 0, :]
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_calloption-implementation-step01): Pricing at the horizon
# +
v_call_thor = np.zeros((j_, m_+1))
log_sigma_atm = np.zeros((j_, m_+1))
s_thor = np.zeros((j_, m_+1))
points = list(zip(*[grid.flatten() for grid in
np.meshgrid(*[tau_implvol, m_moneyness])]))
for m in trange(m_+1,desc='Day'):
tau = np.busday_count(t_m[m], t_end)/252
if tau < tau_implvol[0]:
tau = tau_implvol[0]
for j in range(j_):
# compute shadow yield
x_y = ytm_shadowrates(np.array([y]))
x_y = np.atleast_1d(x_y)
# compute call option value
v_call_thor[j, m] = \
call_option_value(x_proj[j, m, 0], x_y, tau,
x_proj[j, m, 1:], m_moneyness, tau_implvol,
k_strk, t_end, t_m[m])
# compute log-implied volatility at the moneyness
log_sigma_atm[j, m] = \
interpolate.LinearNDInterpolator(points,
x_proj[j, m, 1:])(*np.r_[tau, 0])
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_calloption-implementation-step02): Scenario-probability expectations and standard deviations
# +
mu_v = np.zeros(m_+1)
sig_v = np.zeros(m_+1)
for m in range(len(t_m)):
mu_v[m], sig1 = meancov_sp(v_call_thor[:, m].reshape(-1, 1))
sig_v[m] = np.sqrt(sig1)
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_calloption-implementation-step03): Save databases
# +
output = {'j_': pd.Series(j_),
'k_strike': | pd.Series(k_strk) | pandas.Series |
"""Core function for pyinfraformat."""
import logging
import os
from datetime import datetime
from gc import collect
from numbers import Integral
import pandas as pd
from ..exceptions import FileExtensionMissingError
logger = logging.getLogger("pyinfraformat")
__all__ = ["Holes"]
class Holes:
"""Container for multiple infraformat hole information."""
def __init__(self, holes=None, lowmemory=False):
"""Container for multiple infraformat hole information.
Parameters
----------
holes : list
list of infraformat hole information
lowmemory : bool, optional
Create Pandas DataFrame one by one minimizing memory use.
"""
if holes is None:
holes = []
self.holes = list(holes) if not isinstance(holes, Hole) else [holes]
self._lowmemory = lowmemory
self.n = None
def __str__(self):
msg = "Infraformat Holes -object:\n Total of {n} holes".format(n=len(self.holes))
value_counts = self.value_counts()
if self.holes:
max_length = max([len(str(values)) for values in value_counts.values()]) + 1
counts = "\n".join(
" - {key:.<10}{value:.>6}".format(
key="{} ".format(key),
value=("{:>" + "{}".format(max_length) + "}").format(value),
)
for key, value in value_counts.items()
)
msg = "\n".join((msg, counts))
return msg
def __repr__(self):
return self.__str__()
def __getitem__(self, index):
if isinstance(index, Integral):
return self.holes[index]
return Holes(self.holes[index])
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self.holes):
result = self.holes[self.n]
self.n += 1
return result
else:
raise StopIteration
def __add__(self, other):
if isinstance(other, Holes):
return Holes(self.holes + other.holes)
if isinstance(other, Hole):
return Holes(self.holes + [other])
raise ValueError("Only Holes or Hole -objects can be added.")
def __len__(self):
return len(self.holes)
def append(self, hole):
"""Append Hole object to holes."""
if isinstance(hole, Hole):
self.holes += [hole]
else:
raise ValueError("Only Hole -object can be appended.")
def extend(self, holes):
"""Extend with Holes -object."""
if isinstance(holes, Holes):
self.holes += holes
else:
raise ValueError("Only Holes -object can be extended.")
def filter_holes(self, *, bbox=None, hole_type=None, start=None, end=None, fmt=None, **kwargs):
"""Filter holes.
Parameters
----------
bbox : tuple
left, right, bottom, top
hole_type : str
start : str
Date string. Recommended format is yyyy-mm-dd.
Value is passed to `pandas.to_datetime`.
end : str
Date string. Recommended format is yyyy-mm-dd.
Value is passed to `pandas.to_datetime`.
fmt : str
Custom date string format for `start` and `end`.
Value is passed for `datetime.strptime`.
See https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior
Returns
-------
list
Filtered holes.
Examples
--------
filtered_holes = holes_object.filter_holes(
bbox=(24,25,60,61)
)
filtered_holes = holes_object.filter_holes(
hole_type=["PO"]
)
filtered_holes = holes_object.filter_holes(
start="2015-05-15", end="2016-08-06"
)
filtered_holes = holes_object.filter_holes(
start="05/15/15", end="08/06/16", fmt="%x"
)
Return types are from:
_filter_coordinates(bbox, **kwargs)
_filter_type(hole_type, **kwargs)
_filter_date(start=None, end=None, fmt=None, **kwargs)
"""
filtered_holes = self.holes
if bbox is not None:
filtered_holes = self._filter_coordinates(filtered_holes, bbox, **kwargs)
if hole_type is not None:
filtered_holes = self._filter_type(filtered_holes, hole_type, **kwargs)
if start is not None or end is not None:
filtered_holes = self._filter_date(filtered_holes, start, end, fmt=fmt, **kwargs)
return filtered_holes
def _filter_coordinates(self, holes, bbox):
"""Filter object by coordinates."""
xmin, xmax, ymin, ymax = bbox
filtered_holes = []
for hole in holes:
if not (
hasattr(hole.header, "XY")
and "X" in hole.header.XY.keys()
and "Y" in hole.header.XY.keys()
):
continue
if (
hole.header.XY["X"] >= xmin
and hole.header.XY["X"] <= xmax
and hole.header.XY["Y"] >= ymin
and hole.header.XY["Y"] <= ymax
):
filtered_holes.append(hole)
return Holes(filtered_holes)
def _filter_type(self, holes, hole_type):
"""Filter object by survey abbreviation (type)."""
filtered_holes = []
if isinstance(hole_type, str):
hole_type = [hole_type]
for hole in holes:
if (
hasattr(hole.header, "TT")
and ("Survey abbreviation" in hole.header.TT)
and any(item == hole.header.TT["Survey abbreviation"] for item in hole_type)
):
filtered_holes.append(hole)
return Holes(filtered_holes)
def _filter_date(self, holes, start=None, end=None, fmt=None):
"""Filter object by datetime."""
if isinstance(start, str) and fmt is None:
start = pd.to_datetime(start)
elif isinstance(start, str) and fmt is not None:
start = datetime.strptime(start, fmt)
if isinstance(end, str) and fmt is None:
end = | pd.to_datetime(end) | pandas.to_datetime |
from covid19_util import *
import datetime
import time
import geonamescache
from io import StringIO
import ipywidgets
from matplotlib import dates as mdates
from matplotlib import colors as mcolors
import pandas as pd
import requests
import scipy.optimize
import scipy.stats
from multi_checkbox_widget import multi_checkbox_widget
class Covid19Processing:
def __init__(self, show_result=True):
self.dataframes = {}
gc = geonamescache.GeonamesCache()
gc_data = list(gc.get_countries().values())
gc_states = gc.get_us_states()
for state in gc_states:
state_data = gc_states[state]
if not state_data["name"].endswith(", US"):
state_data["name"] += ", US"
gc_data += [state_data]
self.country_metadata = {}
populations = pd.read_csv("populations.csv", names=["country", "population"], index_col=0, header=0)
for country in populations.index:
if country in normalized_names:
populations.loc[normalized_names[country]] = populations.loc[country]
self.countries_to_plot = ["Brazil", "China", "Japan", "South Korea", "United States",
"India", "Italy", "Germany", "Russia", "Netherlands", "Spain", "World"]
for country_data in gc_data:
name = country_data["name"]
if name in normalized_names:
name = normalized_names[name]
population = populations.loc[name].population
if "continentcode" in country_data:
continent = continent_codes[country_data["continentcode"]]
else:
continent = "North America"
self.country_metadata[name] = {
"population": population,
"continent": continent
}
for metric in data_urls.keys():
url = base_url + data_urls[metric] # Combine URL parts
r = requests.get(url) # Retrieve from URL
self.dataframes[metric] = pd.read_csv(StringIO(r.text), sep=",") # Convert into Pandas dataframe
if show_result:
# Display the first lines
display(Markdown("### Raw confirmed cases data, per region/state"))
with pd.option_context("display.max_rows", 10, "display.max_columns", 14):
display(self.dataframes["confirmed"])
def process(self, rows=20, debug=False):
# Clean up
for metric in data_urls.keys():
if "states" not in metric:
is_country = True
by = "_by_country"
else:
is_country = False
by = "_by_state"
if is_country:
by_country = self.dataframes[metric].groupby("Country/Region").sum() # Group by country
dates = by_country.columns[2:] # Drop Lat/Long
else:
by_country = self.dataframes[metric].groupby("Province_State").sum()
dates = by_country.columns[11:] # Skip various clutter columns
metric = metric.split("_", 1)[0]
# Convert to columns to matplotlib dates
by_country = by_country.loc[:, dates]
dates = pd.to_datetime(dates)
by_country.columns = dates
if metric == "confirmed":
# Early China data points
early_china_data = {
"1/17/20": 45,
"1/18/20": 62,
"1/20/20": 218
}
if is_country:
# Insert data points
for d, n in early_china_data.items():
by_country.loc["China", pd.to_datetime(d)] = n
# Retain chronological column order
by_country = by_country.reindex(list(sorted(by_country.columns)), axis=1)
by_country = by_country.fillna(0)
# Correct an odd blip in the Japanese data.
# From 2/5 to 2/7, the Johns Hopkins data for Japan goes 22, 45, 25.
# I assume that the 45 is incorrect. Replace with 23.5, halfway between the values for 2/5 and 2/7
by_country.loc["Japan", pd.to_datetime("2/06/20")] = 23.5
# Correct a typo in US data, see https://github.com/CSSEGISandData/COVID-19/issues/2167
if by_country.loc["US", pd.to_datetime("4/13/20")] == 682619:
by_country.loc["US", pd.to_datetime("4/13/20")] -= 102000
# Change some weird country names to more commonly used ones
by_country = by_country.rename(index=normalized_names)
if not is_country:
by_country.index = [x+", US"for x in by_country.index]
by_country.sort_index(inplace=True)
# Store processed results for metric
self.dataframes[metric + by] = by_country.fillna(0).astype(int)
all_countries = self.dataframes["confirmed_by_country"].index
# Add in recovered and active
for by in ["_by_country", "_by_state"]:
if is_country:
print("Simulating active and recovered cases...")
for x in ["recovered", "active"]:
self.dataframes[x + by] = | pd.DataFrame(columns=self.dataframes["confirmed"+by].columns) | pandas.DataFrame |
import seaborn as sns
import matplotlib.pyplot as plt
import json
import pandas as pd
import numpy as np
import csv
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warnings.simplefilter('ignore', ConvergenceWarning)
lower = 0
upper = 1
font = {'family': 'monospace',
'weight': 'bold',
'size': 20}
PLOT_MARGIN = 0.25
HEIGHT = 1920
WIDTH = 1200
'''
filenname: .json file with correlation data
csv_filename: list of .csv files with price data
plottable: 2D array with topmost and lowermost dependent parameters
'''
def scatter_reg(filename: str, csv_filenames: list[str], plottable: list):
with open(filename, "r", encoding="utf-8") as file:
data = json.load(file)
pl_list = []
for p_item in plottable:
p_dict = {}
for pl in p_item:
p_dict[pl] = []
for pl in p_item:
in_v = [i['independent_parameters']
for i in data if i['dependent'] == pl]
in_v = [list(i.keys()) for i in in_v]
in_v = np.array(in_v).flatten().tolist()
in_v = [w.lstrip('parameter').strip() for w in in_v]
p_dict[pl] = in_v
pl_list.append(p_dict)
from_files_data = [pd.read_csv(f, dtype=np.float32) for f in csv_filenames]
dataframe = pd.concat(from_files_data, axis=1)
plottable_df = []
for pl_v in pl_list:
df_l_temp = []
for df in pl_v:
unduplicated = list(set(pl_v[df]))
dep = dataframe[df]
indep = dataframe[unduplicated]
fin_df = pd.concat([dep, indep], axis=1)
df_l_temp.append(fin_df)
plottable_df.append(df_l_temp)
_, axes = plt.subplots(2, 3, figsize=(19, 11))
for index_1, _ in enumerate(plottable):
s_in = 0
for p_df in plottable_df[index_1]:
p_df = p_df.loc[:, ~p_df.columns.duplicated()]
y_max, y_min = np.amax(p_df.iloc[:, 0]), np.amin(p_df.iloc[:, 0])
x_max, x_min = np.amax(p_df.values), np.amin(p_df.values)
for _, d in enumerate(p_df):
if p_df.iloc[:, 0].name != d:
if len(set(p_df[d])) > 1:
s = sns.regplot(ax=axes[index_1, s_in], y=p_df.iloc[:, 0].name, x=d,
data=p_df, ci=None, truncate=False, robust=True, line_kws={'linewidth': 0.6}, scatter_kws={'alpha': 0.5})
y_delta = 0.2
x_delta = 1.4
s.set_ylim((y_min - y_delta, y_max + y_delta))
s.set_xlim((x_min - x_delta, x_max + x_delta))
name = p_df.iloc[:, 0].name
name = (name[:40] + '..') if len(name) > 44 else name
try:
int(name.split(",")[0])
name = "".join(name.split(",")[1:])
except:
name = name
axes[index_1, s_in].set_title(
name, fontdict={'fontsize': 21})
s.set(xlabel=" ")
s.set(ylabel=" ")
s_in += 1
png_fname = filename.split(".")[0] + ".png"
plt.tight_layout()
plt.savefig(f"plots/{png_fname}", format="png", bbox_inches='tight', dpi=300)
'''
filename: .json file with correlation data
csv_filenames: list of .csv files with independent parameters
name_list: list of dependent parameters
'''
def scatter(filename: str, csv_filenames: list[str], name_list: list[str]):
with open(filename, "r", encoding="utf-8") as file:
data = json.load(file)
from_files_data = [ | pd.read_csv(f, dtype=np.float32) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
"""
1. using most recent publication of researchers as input to generate user profiles
2. pretrain word2vec model window_5.model.bin and candidate_paper.csv are available via google drive link,
you can download the files and
change the path in this script so as to run the script successfully.
3. result saved in rank_result_rm/rank_result_mr_own_corpus.csv
"""
import sys
from gensim.models.keyedvectors import KeyedVectors
import numpy as np
import pandas as pd
# load pre-train model on my own corpus
model = '/Users/sherry/Downloads/window_5/window_5.model.bin'
w2v_model = KeyedVectors.load_word2vec_format(model, binary=True)
# read all candidate papers info, contain two columns: paper ID and paper content
candidate_paper_df = | pd.read_csv('/Users/sherry/Downloads/candidate_papers.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from linear_ce import LinearActionExtractor
from mlp_ce import MLPActionExtractor
from forest_ce import ForestActionExtractor
def exp_sens(N=50, dataset='h', K=4, time_limit=600):
np.random.seed(0)
print('# Sensitivity')
from utils import DatasetHelper
D = DatasetHelper(dataset=dataset, feature_prefix_index=False)
X_tr, X_ts, y_tr, y_ts = D.train_test_split()
print('* Dataset: ', D.dataset_name)
print('* Feature: ', X_tr.shape[1])
from sklearn.linear_model import LogisticRegression
mdl = LogisticRegression(penalty='l2', C=1.0, solver='liblinear')
mdl = mdl.fit(X_tr, y_tr)
ce = LinearActionExtractor(mdl, X_tr, Y=y_tr, feature_names=D.feature_names, feature_types=D.feature_types, feature_categories=D.feature_categories,
feature_constraints=D.feature_constraints, target_name=D.target_name, target_labels=D.target_labels)
print('* Model: LR')
print('* Test Score: ', mdl.score(X_ts, y_ts))
print()
denied = X_ts[mdl.predict(X_ts)==1]
alphas = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
dict_sens = {'Mahalanobis':{0.001:[], 0.01:[], 0.1:[], 1.0:[], 10.0:[], 100.0:[]}, '10-LOF':{0.001:[], 0.01:[], 0.1:[], 1.0:[], 10.0:[], 100.0:[]}, 'Time':{0.001:[], 0.01:[], 0.1:[], 1.0:[], 10.0:[], 100.0:[]}}
for n,x in enumerate(denied[:N]):
print('## {}-th Denied Individual '.format(n+1))
print('### Cost: DACE')
for alpha in alphas:
print('#### alpha = {}'.format(alpha))
a = ce.extract(x, K=K, cost_type='DACE', alpha=alpha, time_limit=time_limit)
if(a!=-1):
print(a)
for key in dict_sens.keys(): dict_sens[key][alpha].append(a.scores_[key])
else:
for key in dict_sens.keys(): dict_sens[key][alpha].append(-1)
for key in dict_sens.keys():
pd.DataFrame(dict_sens[key]).to_csv('./res/sens/{}_{}.csv'.format(D.dataset_name, key), index=False)
def exp_compare(N=1, dataset='h', model='LR', K=4, time_limit=600):
np.random.seed(0)
print('# Comparison')
from utils import DatasetHelper
D = DatasetHelper(dataset=dataset, feature_prefix_index=False)
X_tr, X_ts, y_tr, y_ts = D.train_test_split()
print('* Dataset: ', D.dataset_name)
print('* Feature: ', X_tr.shape[1])
print('* Model: ', model)
if(model=='LR'):
from sklearn.linear_model import LogisticRegression
mdl = LogisticRegression(penalty='l2', C=1.0, solver='liblinear')
print('* C: ', mdl.C)
mdl = mdl.fit(X_tr, y_tr)
ce = LinearActionExtractor(mdl, X_tr, Y=y_tr, feature_names=D.feature_names, feature_types=D.feature_types, feature_categories=D.feature_categories,
feature_constraints=D.feature_constraints, target_name=D.target_name, target_labels=D.target_labels)
elif(model=='MLP'):
from sklearn.neural_network import MLPClassifier
mdl = MLPClassifier(hidden_layer_sizes=(200,), max_iter=500, activation='relu', alpha=0.0001)
print('* T: ', mdl.hidden_layer_sizes)
mdl = mdl.fit(X_tr, y_tr)
ce = MLPActionExtractor(mdl, X_tr, Y=y_tr, feature_names=D.feature_names, feature_types=D.feature_types, feature_categories=D.feature_categories,
feature_constraints=D.feature_constraints, target_name=D.target_name, target_labels=D.target_labels)
elif(model=='RF'):
from sklearn.ensemble import RandomForestClassifier
mdl = RandomForestClassifier(n_estimators=100, max_depth=8 if dataset=='o' else 4)
print('* T: ', mdl.n_estimators)
print('* depth: ', mdl.max_depth)
mdl = mdl.fit(X_tr, y_tr)
ce = ForestActionExtractor(mdl, X_tr, Y=y_tr, feature_names=D.feature_names, feature_types=D.feature_types, feature_categories=D.feature_categories,
feature_constraints=D.feature_constraints, target_name=D.target_name, target_labels=D.target_labels)
print('* Test Score: ', mdl.score(X_ts, y_ts))
print()
denied = X_ts[mdl.predict(X_ts)==1]
dict_comp = {'TLPS':{'Mahalanobis':[], '10-LOF':[], 'Time':[]}, 'MAD':{'Mahalanobis':[], '10-LOF':[], 'Time':[]}, 'PCC':{'Mahalanobis':[], '10-LOF':[], 'Time':[]}}
alphas = [0.01, 0.1, 1.0]
dict_dace = {0.01:{'Mahalanobis':[], '10-LOF':[], 'Time':[]}, 0.1:{'Mahalanobis':[], '10-LOF':[], 'Time':[]}, 1.0:{'Mahalanobis':[], '10-LOF':[], 'Time':[]}}
for n,x in enumerate(denied[:N]):
print('## {}-th Denied Individual '.format(n+1))
for cost in ['TLPS', 'MAD', 'PCC']:
print('### Cost: ', cost)
a = ce.extract(x, K=K, cost_type=cost, time_limit=time_limit)
if(a!=-1):
print(a)
for key in dict_comp[cost].keys(): dict_comp[cost][key].append(a.scores_[key])
else:
for key in dict_comp[cost].keys(): dict_comp[cost][key].append(-1)
print('### Cost: DACE')
for alpha in alphas:
print('#### alpha = {}'.format(alpha))
a = ce.extract(x, K=K, cost_type='DACE', alpha=alpha, time_limit=time_limit)
if(a!=-1):
print(a)
for key in dict_dace[alpha].keys(): dict_dace[alpha][key].append(a.scores_[key])
else:
for key in dict_dace[alpha].keys(): dict_dace[alpha][key].append(-1)
for key in dict_comp.keys():
| pd.DataFrame(dict_comp[key]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': | Categorical(['1', '1', '2']) | pandas.Categorical |
# -*- coding: utf-8 -*- {{{
#
# Your license here
# }}}
import sys
from datetime import datetime, timedelta
from dateutil import parser
import pandas as pd
import matplotlib.pyplot as plt
from os.path import dirname, abspath, join
sys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))
from fleet_factory import create_fleet
from service_factory import create_service
def integration_test(service_name, fleet_name, service_type='Traditional', **kwargs):
start_time = kwargs['start_time']
sim_step = dynamic_time_step(service_name, fleet_name)
kwargs['sim_step'] = sim_step
# Create test service
service = create_service(service_name, **kwargs)
if service is None:
raise 'Could not create service with name ' + service_name
grid_type = 1
if service_name == 'ArtificialInertia':
grid_type = 2
# Create test fleet
fleet = create_fleet(fleet_name, grid_type, **kwargs)
if fleet is None:
raise 'Could not create fleet with name ' + fleet_name
# Assign test fleet to test service to use
service.fleet = fleet
assigned_fleet_name = service.fleet.__class__.__name__
# Run test
if service_name == 'Regulation':
monthtimes = dict({
# 'January': ["2017-01-01 00:00:00", "2017-01-31 23:59:59"],
# 'February': ["2017-02-01 00:00:00", "2017-02-28 23:59:59"],
# 'March': ["2017-03-01 00:00:00", "2017-03-31 23:59:59"],
# 'April': ["2017-04-01 00:00:00", "2017-04-30 23:59:59"],
# 'May': ["2017-05-01 00:00:00", "2017-05-31 23:59:59"],
# 'June': ["2017-06-01 00:00:00", "2017-06-30 23:59:59"],
# 'July': ["2017-07-01 00:00:00", "2017-07-31 23:59:59"],
'August': ["2017-08-01 16:00:00", "2017-08-01 18:59:59"],
# 'September': ["2017-09-01 00:00:00", "2017-09-30 23:59:59"],
# 'October': ["2017-10-01 00:00:00", "2017-10-31 23:59:59"],
# 'November': ["2017-11-01 00:00:00", "2017-11-30 23:59:59"],
# 'December': ["2017-12-01 00:00:00", "2017-12-31 23:59:00"]
})
all_results = pd.DataFrame(columns=['performance_score', 'hourly_integrated_MW',
'mileage_ratio', 'Regulation_Market_Clearing_Price(RMCP)',
'Reg_Clearing_Price_Credit'])
for month in monthtimes.keys():
print('Starting ' + str(month) + ' ' + service_type + ' at ' + datetime.now().strftime('%H:%M:%S'))
fleet_response = service.request_loop(service_type=service_type,
start_time=parser.parse(monthtimes[month][0]),
end_time=parser.parse(monthtimes[month][1]),
sim_step=sim_step,
clearing_price_filename='historical-ancillary-service-data-2017.xls',
fleet_name=assigned_fleet_name)
month_results = pd.DataFrame.from_dict(fleet_response, orient='index')
all_results = pd.concat([all_results, month_results])
print(' Finished ' + str(month) + ' ' + service_type)
# Fix formatting of all_results dataframe to remove tuples
all_results[['Perf_score', 'Delay_score', 'Corr_score', 'Prec_score']] = all_results['performance_score'].apply(
pd.Series)
all_results[['MCP', 'REG_CCP', 'REG_PCP']] = all_results['Regulation_Market_Clearing_Price(RMCP)'].apply(
pd.Series)
all_results[['Reg_Clr_Pr_Credit', 'Reg_RMCCP_Credit', 'Reg_RMPCP_Credit']] = all_results[
'Reg_Clearing_Price_Credit'].apply(pd.Series)
all_results.drop(
columns=['performance_score', 'Regulation_Market_Clearing_Price(RMCP)', 'Reg_Clearing_Price_Credit'],
inplace=True)
print('Writing result .csv')
file_dir = join(dirname(abspath(__file__)), 'services', 'reg_service', 'results', '')
all_results.to_csv(file_dir + datetime.now().strftime(
'%Y%m%d') + '_annual_hourlyresults_' + service_type + '_' + fleet_name + '.csv')
elif service_name == 'Reserve':
monthtimes = dict({
# 'January': ["2017-01-08 00:00:00", "2017-01-08 23:59:59"],
# 'February': ["2017-02-01 00:00:00", "2017-02-28 23:59:59"],
# 'March': ["2017-03-01 00:00:00", "2017-03-31 23:59:59"],
# 'April': ["2017-04-01 00:00:00", "2017-04-30 23:59:59"],
# 'May': ["2017-05-01 00:00:00", "2017-05-31 23:59:59"],
# 'June': ["2017-06-01 00:00:00", "2017-06-30 23:59:59"],
'June': ["2017-06-07 00:00:00", "2017-06-07 23:59:59"],
# 'June': ["2017-06-07 00:00:00", "2017-06-08 23:59:59"],
# 'July': ["2017-07-01 00:00:00", "2017-07-31 23:59:59"],
# 'August': ["2017-08-01 00:00:00", "2017-08-31 23:59:59"],
# 'September': ["2017-09-01 00:00:00", "2017-09-30 23:59:59"],
# 'October': ["2017-10-01 00:00:00", "2017-10-31 23:59:59"],
# 'November': ["2017-11-01 00:00:00", "2017-11-30 23:59:59"],
# 'December': ["2017-12-01 00:00:00", "2017-12-31 23:59:00"]
})
all_results = pd.DataFrame(columns=['Event_Start_Time', 'Event_End_Time',
'Response_to_Request_Ratio', 'Response_MeetReqOrMax_Index_number',
'Event_Duration_mins', 'Response_After10minToEndOr30min_To_First10min_Ratio',
'Requested_MW', 'Responded_MW_at_10minOrEnd',
'Responded_MW_After10minToEndOr30min', 'Shortfall_Ratio',
'Response_0min_Min_MW', 'Response_10minOrEnd_Max_MW',
'Response_After10minToEnd_MW', 'Avg_Ramp_Rate', 'Best_Ramp_Rate',
'SRMCP_DollarsperMWh_DuringEvent',
'SRMCP_DollarsperMWh_SinceLastEvent',
'Service_Value_NotInclShortfall_dollars',
'Service_Value_InclShortfall_dollars',
'Period_from_Last_Event_Hours',
'Period_from_Last_Event_Days'])
if 'battery' in assigned_fleet_name.lower():
annual_signals = pd.DataFrame(columns=['Date_Time', 'Request', 'Response', 'SoC'])
else:
annual_signals = pd.DataFrame(columns=['Date_Time', 'Request', 'Response'])
previous_event_end = pd.Timestamp('05/01/2017 00:00:00')
for month in monthtimes.keys():
print('Starting ' + str(month) + ' at ' + datetime.now().strftime('%H:%M:%S'))
start_time = parser.parse(monthtimes[month][0])
fleet_response = service.request_loop(start_time=start_time,
end_time=parser.parse(monthtimes[month][1]),
sim_step=sim_step,
clearing_price_filename=start_time.strftime('%Y%m') + '.csv',
previous_event_end=previous_event_end,
four_scenario_testing=False,
fleet_name=assigned_fleet_name)
try:
previous_event_end = fleet_response[0].Event_End_Time[-1]
except:
# If the dataframe in fleet_response[0] has no entries, then attempting
# to index the Event_End_Time column will throw an error. This allows
# for skipping past that error.
pass
all_results = pd.concat([all_results, fleet_response[0]], sort=True)
annual_signals = | pd.concat([annual_signals, fleet_response[1]], sort=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 11:33:55 2020
@author: User
"""
import sys
from pathlib import Path
import functools
# import collections
from collections import Counter
import pickle
# import types
# import post_helper
# import plotting
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import linregress, zscore
import pandas as pd
import numpy as np
import datetime as dt
import pandas as pd
mpl.style.use("seaborn")
mpl.rcParams["figure.dpi"] = 100
# from sklearn.cluster import KMeans
# print ('Name prepare input:', __name__ )
if __name__ == "__main__":
# print(f'Package: {__package__}, File: {__file__}')
# FH_path = Path(__file__).parent.parent.parent.joinpath('FileHelper')
# sys.path.append(str(FH_path))
# sys.path.append(str(Path(__file__).parent.parent.joinpath('indexer')))
sys.path.append(str(Path(__file__).parent.parent.parent))
# sys.path.append("..")
# print(sys.path)
# import FileHelper
from FileHelper.PostChar import Characterization_TypeSetting, SampleCodesChar
from FileHelper.PostPlotting import *
from FileHelper.FindSampleID import GetSampleID
from FileHelper.FindFolders import FindExpFolder
# from FileHelper.FileFunctions.FileOperations import PDreadXLorCSV
from collect_load import Load_from_Indexes, CollectLoadPars
# from FileHelper.FindExpFolder import FindExpFolder
from plotting import eisplot
from prep_postchar import postChar
import EIS_export
elif "prepare_input" in __name__:
pass
# import RunEC_classifier
# from FileHelper.FindSampleID import FindSampleID
import logging
_logger = logging.getLogger(__name__)
# from FileHelper.PostChar import SampleSelection, Characterization_TypeSetting
def mkfolder(folder):
folder.mkdir(exist_ok=True, parents=True)
return folder
def filter_cols(_df, n):
if any(["startswith" in i for i in n]):
_lst = [i for i in _df.columns if i.startswith(n[-1])]
else:
_lst = [i for i in _df.columns if n[-1] in i]
return _lst
OriginColors = Characterization_TypeSetting.OriginColorList()
Pfolder = FindExpFolder().TopDir.joinpath(
Path("Preparation-Thesis/SiO2_projects/SiO2_Me_ECdepth+LC")
)
plotsfolder = mkfolder(Pfolder.joinpath("correlation_plots"))
EC_folder = Pfolder.joinpath("EC_data")
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
print("finished")
# SampleCodesChar().load
def multiIndex_pivot(df, index=None, columns=None, values=None):
# https://github.com/pandas-dev/pandas/issues/23955
output_df = df.copy(deep=True)
if index is None:
names = list(output_df.index.names)
output_df = output_df.reset_index()
else:
names = index
output_df = output_df.assign(
tuples_index=[tuple(i) for i in output_df[names].values]
)
if isinstance(columns, list):
output_df = output_df.assign(
tuples_columns=[tuple(i) for i in output_df[columns].values]
) # hashable
output_df = output_df.pivot(
index="tuples_index", columns="tuples_columns", values=values
)
output_df.columns = pd.MultiIndex.from_tuples(
output_df.columns, names=columns
) # reduced
else:
output_df = output_df.pivot(
index="tuples_index", columns=columns, values=values
)
output_df.index = pd.MultiIndex.from_tuples(output_df.index, names=names)
return output_df
def get_float_cols(df):
return [key for key, val in df.dtypes.to_dict().items() if "float64" in str(val)]
def cm2inch(value):
return value / 2.54
# class PorphSamples():
# def __init__(self):
# self.template = PorphSamples.template()
def decorator(func):
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Do something before
value = func(*args, **kwargs)
# Do something after
return value
return wrapper_decorator
def read_load_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
DF_diff = pd.read_pickle(_pklpath)
DF_diff.columns
return DF_diff
except Exception as e:
print("reading error", e)
return pd.DataFrame()
else:
print("read error not existing", _pklpath)
return | pd.DataFrame() | pandas.DataFrame |
"""Test model for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy <EMAIL>
Usage:
python main.py --model_config 'config/bert_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'bert-submission-test-1.csv'
python main.py --model_config 'config/rnn_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'rnn-submission-test-1.csv'
"""
import argparse
import json
import os
import time
from types import SimpleNamespace
import fire
import pandas
import pandas as pd
import torch
from torch.utils.data import DataLoader
from sfzyzb.data import Data
from sfzyzb.evaluate import evaluate
from sfzyzb.model import BertForClassification, RnnForSentencePairClassification, LogisticRegression
from sfzyzb.utils import load_torch_model
LABELS = ['0', '1']
MODEL_MAP = {
'bert': BertForClassification,
'rnn': RnnForSentencePairClassification,
'lr': LogisticRegression
}
class Sentence_Abstract(object):
def __init__(self,model_config='sfzyzb/config/bert_config-l.json'):
# 0. Load config
with open(model_config) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
self.device = torch.device('cuda:0')
# device = torch.device('cpu')
else:
self.device = torch.device('cpu')
# 1. Load data
self.data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type, config=config)
# 2. Load model
self.model = MODEL_MAP[config.model_type](config)
self.model = load_torch_model(
self.model, model_path=os.path.join(config.model_path, 'model.bin'))
self.model.to(self.device)
self.config = config
def get_abstract(self, in_file):
# 0. preprocess file
tag_sents = []
para_id = 0
convert_file ="data/{}-2.csv".format(time.time())
with open(in_file, 'r', encoding='utf-8') as fin:
for line in fin:
sents = json.loads(line.strip())
text = sents['text']
sentences = [item['sentence'] for item in text]
for sent in sentences:
tag_sents.append((para_id, sent))
para_id += 1
df = pandas.DataFrame(tag_sents, columns=['para', 'content'])
df.to_csv(convert_file, columns=['para', 'content'], index=False)
test_set = self.data.load_file(convert_file, train=False)
data_loader_test = DataLoader(
test_set, batch_size=self.config.batch_size, shuffle=False)
# 3. Evaluate
answer_list = evaluate(self.model, data_loader_test, self.device)
# 4. Write answers to file
# df = pd.read_csv("data/para_content_test.csv")
idcontent_list = list(df.itertuples(index=False))
filter_list = [k for k, v in zip(idcontent_list, answer_list) if v]
df = | pd.DataFrame(filter_list, columns=['para', 'content']) | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.