prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Pre-process data for the visualisation website."""
import pandas as pd
import logging
import sys
UK_CASES_PATH = 'data/uk_cases.csv'
OUTPUT_PATH = 'docs/assets/data/site_data.csv'
# RtCproj_PATH = '../data/RtCproj.csv'
uk_cases = pd.read_csv(UK_CASES_PATH)
# RtCproj = pd.read_csv(RtCproj_PATH)
df = uk_cases.set_index(['Country', 'Area name']).stack().to_frame().reset_index().rename(columns={
'Area name': 'area',
'level_2': 'Date',
0: 'cases_new'
})
df['cases_new_smoothed'] = df['cases_new'].rolling(7, center=True).mean()
df['Date'] =
|
pd.to_datetime(df['Date'])
|
pandas.to_datetime
|
import csv
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import time
import goslate
import numpy as np
import cld3
from googletrans import Translator
###############DataFrames:#########################
dfn = pd.read_csv("/root/.encrypted/.pythonSai/ira_tweets_csv_hashed.csv", sep=",", header=None, usecols=[0,1,2,3,6,12,14,15,16,19,20,27,28], chunksize=2000, names=["tweetid", "userid", "user_display_name", "user_screen_name", "user_profile_url", "tweet_text", "tweet_client_name", "in_reply_to_tweetid", "in_reply_to_userid", "retweet_userid", "retweet_tweetid", "hashtags", "urls"])
dfi = pd.read_csv("/root/.encrypted/.pythonSai/kCoreBots/kCoreBotsList.csv", sep=",", header=None, chunksize=2000)
df_lst =
|
pd.DataFrame(columns=["tweetid", "userid", "user_display_name", "user_screen_name", "user_profile_url", "tweet_text", "tweet_client_name", "in_reply_to_tweetid", "in_reply_to_userid", "retweet_userid", "retweet_tweetid", "hashtags", "urls"])
|
pandas.DataFrame
|
from io import StringIO
from pathlib import Path
import pytest
import pandas as pd
from pandas import DataFrame, read_json
import pandas._testing as tm
from pandas.io.json._json import JsonReader
@pytest.fixture
def lines_json_df():
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
return df.to_json(lines=True, orient="records")
def test_read_jsonl():
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars():
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_to_jsonl():
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(read_json(result, lines=True), df)
@pytest.mark.parametrize("chunksize", [1, 1.0])
def test_readjson_chunks(lines_json_df, chunksize):
# Basic test that read_json(chunks=True) gives the same result as
# read_json(chunks=False)
# GH17048: memory usage when lines=True
unchunked = read_json(StringIO(lines_json_df), lines=True)
reader = read_json(StringIO(lines_json_df), lines=True, chunksize=chunksize)
chunked =
|
pd.concat(reader)
|
pandas.concat
|
"""based on https://github.com/quintel/etdataset-
public/tree/master/curves/demand/households/space_heating by Quintel"""
from pathlib import Path
import pandas as pd
from .smoothing import ProfileSmoother
class HouseholdProfileModel:
"""Class to describe a heating model of a house"""
@property
def u_value(self):
return 1 / self.r_value
@property
def concrete_mass(self):
return self.surface_area * self.wall_thickness * self.p_concrete
@property
def heat_capacity(self):
return self.concrete_mass * self.c_concrete / 3.6e6
@property
def exchange_delta(self):
return self.u_value * self.surface_area / 1000
@classmethod
def from_defaults(cls, house_type, insulation_type, **kwargs):
"""Quintel default values"""
# load properties
file = Path(__file__).parent / 'data/house_properties.csv'
properties = pd.read_csv(file, index_col=[0, 1])
# subset correct house and insulation profile
properties = properties.loc[(house_type, insulation_type)]
# load thermostat values
file = Path(__file__).parent / 'data/thermostat_values.csv'
thermostat = pd.read_csv(file, usecols=[insulation_type], squeeze=True)
# convert to dictonairy
config = properties.to_dict()
# append other config props
config['thermostat'] = thermostat.to_dict()
config['house_type'] = house_type
config['insulation_type'] = insulation_type
# append kwargs
config.update(kwargs)
return cls(**config)
def __init__(self, behaviour, surface_area, thermostat, r_value,
wall_thickness, window_area, house_type, insulation_type,
**kwargs):
"""kwargs are passed to smoother"""
# set constants
self.p_concrete = 2400
self.c_concrete = 880
self.behaviour = behaviour
self.surface_area = surface_area
self.window_area = window_area
self.thermostat = thermostat
self.inside_temperature = self.thermostat[0]
self.r_value = r_value
self.wall_thickness = wall_thickness
self.house_type = house_type
self.insulation_type = insulation_type
self.__smoother = ProfileSmoother(**kwargs)
def check_profile(self, profile):
# check profile length
if len(profile) != 8760:
raise ValueError(f'"{profile.name}" must contain 8760 values')
return profile
def make_heat_demand_profile(self, temperature, irradiation):
"""heat demand profile"""
if len(temperature) != 8760:
raise ValueError('temperature must contain 8760 values')
if len(irradiation) != 8760:
raise ValueError('irradiation must contain 8760 values')
# merge datapoints
profile = pd.concat([temperature, irradiation], axis=1)
profile.columns = ['temperature', 'irradiance']
# make periodindex
start = f'01-01-01 00:00'
profile.index = pd.period_range(start=start, periods=8760, freq='H')
# set hour columns
profile['hour'] = profile.index.hour
# calculate heat demand
func = self._calculate_heat_demand
profile = profile.apply(lambda cols: func(*cols), axis=1)
# smooth profile
func = self.__smoother.calculate_smoothed_demand
profile = func(profile.values, self.insulation_type)
# name profile
name = f'weather/insulation_{self.house_type}_{self.insulation_type}'
profile = pd.Series(profile, name=name, dtype='float64')
# factor profile
profile = profile / profile.sum() / 3600
return profile
def _calculate_heat_demand(self, outside_temperature,
solar_irradiation, hour_of_the_day):
thermostat_temperature = self.thermostat[hour_of_the_day]
# How much energy is needed from heating to bridge the temperature gap?
if self.inside_temperature < thermostat_temperature:
needed_heating_demand = (
(thermostat_temperature - self.inside_temperature) *
self.heat_capacity
)
else:
needed_heating_demand = 0.0
# Updating the inside temperature
if self.inside_temperature < thermostat_temperature:
self.inside_temperature = thermostat_temperature
# How big is the difference between the temperature inside and outside?
temperature_difference = self.inside_temperature - outside_temperature
# How much heat is leaking away in this hour?
energy_leaking = (
self.exchange_delta * temperature_difference
)
# How much energy is added by irradiation?
energy_added_by_irradiation = solar_irradiation * self.window_area
# What is the inside temperature after the leaking?
self.inside_temperature = (
self.inside_temperature -
(energy_leaking - energy_added_by_irradiation) /
self.heat_capacity
)
return needed_heating_demand
class HouseholdsModel:
"""Class to create household profiles"""
@classmethod
def from_defaults(cls):
"""Quintel default values"""
# load properties
cols = ['house_type', 'insulation_level']
file = Path(__file__).parent / 'data/house_properties.csv'
properties = pd.read_csv(file, usecols=cols)
house_types = properties.house_type.unique()
insulation_types = properties.insulation_level.unique()
return cls(house_types, insulation_types)
def __init__(self, house_types, insulation_types):
# set arguments
self.house_types = house_types
self.insulation_types = insulation_types
def make_heat_demand_profile(self, house_type, insulation_type,
temperature, irradiance, **kwargs):
"""kwargs are passed to smoother"""
model = HouseholdProfileModel.from_defaults(house_type, insulation_type, **kwargs)
profile = model.make_heat_demand_profile(temperature, irradiance)
return profile
def make_heat_demand_profiles(self, temperature, irradiance, **kwargs):
"""kwargs are passed to smoother"""
# reference props
houses = self.house_types
levels = self.insulation_types
# set up parameters
func = self.make_heat_demand_profile
config = {'temperature' : temperature, 'irradiance': irradiance}
# append kwargs
config.update(kwargs)
# make profiles
profiles = [func(h, l, **config) for h in houses for l in levels]
return
|
pd.concat(profiles, axis=1)
|
pandas.concat
|
from MovieRecommender import train_test_model
import pandas as pd
import numpy as np
import sys
from scipy.sparse import csr_matrix, load_npz
import pickle
from tabulate import tabulate
def get_movies_rated(data, user_id, train_data, movies):
data_matrix = data.loc[data.rating != 0]
users = list(np.sort(data_matrix.user_id.unique())) # Get unique users
items = list(np.sort(data_matrix.item_id.unique())) # Get unique movies
users_arr = np.array(users) # Array of user IDs from the ratings matrix
items_arr = np.array(items) # Array of movie IDs from the ratings matrix
# Returns index row of user id
user_ind = np.where(users_arr == user_id)[0][0]
# Get column indices of rated items
rating_ind = train_data[user_ind, :].nonzero()[1]
movie_codes = items_arr[rating_ind] # Get the movie ids for rated items
return movies.loc[movies['item_id'].isin(movie_codes),
'name'].reset_index(drop=True)
def predict_ratings(predictions, item_vecs, user_id):
item_vecs = predictions[1]
user_vec = predictions[0][user_id, :]
pred = user_vec.dot(item_vecs).toarray()[0].reshape(-1)
return pred
def similar_items(movies, model, movie_list, n_similar=20):
# Use implicit to get similar items.
movies.name = movies.name.str.strip()
item_id = movies.item_id.loc[movies.name.str.lower().
isin([s.lower() for s in movie_list])].iloc[0]
movie_names = []
similar = model.similar_items(item_id, n_similar)
# Print the names of similar movies
for item in similar:
idx, rating = item
movie_names.append(movies.name.loc[movies.item_id == idx+1].iloc[0])
similar = pd.DataFrame({"Similar Movies": movie_names[1:]})
return similar
def recommendations(data, train_data, movies, model,
sparse_user_item, user_id):
# Use the implicit recommender.
recommended = model.recommend(user_id, sparse_user_item)
movies_recom = []
# ratings_recom = []
# Get movie names from ids
for item in recommended:
idx, rating = item
movies_recom.append((movies.name.loc[movies.item_id == idx+1].iloc[0]))
# ratings_recom.append(rating)
# Create a dataframe of movie names and scores
# recommendations = pd.DataFrame({'Movies': movies_recom,
# 'Rating': ratings_recom})
movies_rated_by_users = get_movies_rated(data, user_id, train_data, movies)
minlen = min(len(movies_recom), len(movies_rated_by_users))
recommendations = pd.DataFrame({'Recommended Movies':
movies_recom[:minlen],
'Movies Rated':
movies_rated_by_users[:minlen]})
return recommendations
def main():
train_test_model.main()
movies = pd.read_pickle("./output/movies.pkl")
sparse_user_item = load_npz("./output/sparse_user_item.npz")
item_vecs = np.load('./output/item_vecs.npy')
user_vecs = np.load('./output/user_vecs.npy')
data =
|
pd.read_pickle("./output/ratings.pkl")
|
pandas.read_pickle
|
import datetime as date
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import Dash, dcc, html, Input, Output, State
from flask import Flask
from flask_session import Session
from flask import Flask, current_app, flash, jsonify, make_response, redirect, request, render_template, send_file, Blueprint, url_for, redirect
from functools import wraps
from distutils.log import error
from azure.cosmos import CosmosClient, PartitionKey
from googleapiclient.discovery import build
import logging
from ms_identity_web import IdentityWebPython
from ms_identity_web.adapters import FlaskContextAdapter
from ms_identity_web.configuration import AADConfig
from api_miners import key_vault, youtube, pubmed
from api_miners.pubmed import *
from views import pubs, education
from dash.dash_table.Format import Format, Group
#App Configurations
app = Flask(__name__)
Session(app) # init the serverside session for the app: this is requireddue to large cookie size
aad_configuration = AADConfig.parse_json('aadconfig.json')
SESSION_TYPE = "filesystem"
SESSION_STATE = None
key_dict = key_vault.get_key_dict()
endpoint = key_dict['AZURE_ENDPOINT']
azure_key = key_dict['AZURE_KEY']
secret_api_key = key_dict['SERPAPI_KEY']
#CosmosDB Connection
client = CosmosClient(endpoint, azure_key)
database_name = 'ohdsi-impact-engine'
container = pubmed.init_cosmos(key_dict, 'pubmed')
container_ignore = pubmed.init_cosmos(key_dict, 'pubmed_ignore')
#Azure Authentication Configurations
secure_client_credential=None
app.logger.level=logging.INFO # can set to DEBUG for verbose logs
if app.config.get('ENV') == 'production':
# The following is required to run on Azure App Service or any other host with reverse proxy:
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)
# Use client credential from outside the config file, if available.
if secure_client_credential: aad_configuration.client.client_credential = secure_client_credential
AADConfig.sanity_check_configs(aad_configuration)
adapter = FlaskContextAdapter(app)
ms_identity_web = IdentityWebPython(aad_configuration, adapter)
#youtube.main()
#pubmed.main()
# app=dash.Dash()
#app.layout= pubs.build_pubs_dash()
# app.layout= education.build_education_dash()
#Dash Apps
import dash_bootstrap_components as dbc
external_stylesheets = [dbc.themes.BOOTSTRAP]
pubmedDashApp = dash.Dash(__name__, server=app, url_base_pathname='/pub_dashboard/', external_stylesheets=external_stylesheets)
pubmedDashApp.layout= pubs.build_pubs_dash
youtubeDashApp = dash.Dash(__name__, server=app, url_base_pathname='/educ_dashboard/', external_stylesheets=external_stylesheets)
youtubeDashApp.layout= education.build_education_dash
@app.route('/')
@app.route('/sign_in_status')
def index():
return render_template('home.html')
# return render_template('auth/status.html')
@app.route('/publication_dashboard/', methods = ['POST', 'GET'])
def dashboard():
# dashHtml = BeautifulSoup(pubmedDashApp.index(), 'html.parser')
return render_template("publication_dashboard.html")
# return jsonify({'htmlresponse': render_template('publication_dashboard.html', dashHtml = pubmedDashApp)})
@app.route('/pub_dashboard', methods = ['POST', 'GET'])
def dash_app_pub():
return pubmedDashApp.index()
@pubmedDashApp.callback(
Output(component_id='bar-container', component_property='children'),
[Input(component_id='datatable-interactivity', component_property="derived_virtual_data"),
Input(component_id='datatable-interactivity', component_property='derived_virtual_selected_rows'),
Input(component_id='datatable-interactivity', component_property='derived_virtual_selected_row_ids'),
Input(component_id='datatable-interactivity', component_property='selected_rows'),
Input(component_id='datatable-interactivity', component_property='derived_virtual_indices'),
Input(component_id='datatable-interactivity', component_property='derived_virtual_row_ids'),
Input(component_id='datatable-interactivity', component_property='active_cell'),
Input(component_id='datatable-interactivity', component_property='selected_cells')], prevent_initial_call=True
)
def update_bar(all_rows_data, slctd_row_indices, slct_rows_names, slctd_rows,
order_of_rows_indices, order_of_rows_names, actv_cell, slctd_cell):
dff = pd.DataFrame(all_rows_data)
df2=((dff.groupby('Publication Year')['PubMed ID']).count()).reset_index()
df2.columns=['Year','Count']
df3=((dff.groupby('Publication Year')['Citation Count']).sum()).reset_index()
df3['cumulative']= round(df3['Citation Count'].cumsum(), 0)
df3.columns=['Year','citations','Count']
fig = make_subplots(specs=[[{"secondary_y": True}]])
# Add traces
fig.add_trace(
go.Bar(
x=df2['Year'],
y=df2['Count'],
marker=dict(color = '#20425A'),
hovertemplate =
'<i>Publications in %{x}</i>: %{y:.0f}<extra></extra>',
showlegend = False
),
secondary_y=False,
)
fig.add_trace(
go.Line(
x=df3['Year'],
y=df3['Count'],
marker=dict(color = '#f6ac15'),
hovertemplate =
'<i>Citations in %{x}</i>: %{y} <extra></extra>',
),
secondary_y='Secondary'
)
# Add figure title
fig.update_layout(title_text="<b> OHDSI Publications & Cumulative Citations</b>", title_x=0.5, showlegend=False)
# Set x-axis title
fig.update_xaxes(title_text="Year")
# Set y-axes titles
fig.update_yaxes(
title_text="Number of Publications",
secondary_y=False)
fig.update_yaxes(
title_text="Number of Citations",
secondary_y=True)
return [
dcc.Graph(id = 'bar-chart',
figure = fig.update_layout(yaxis={'tickformat': '{:,}'}),
style={'width': '100%', 'padding-left': '50px'},
)
]
@pubmedDashApp.callback(
Output(component_id='line-container', component_property='children'),
[Input(component_id='datatable-interactivity', component_property="derived_virtual_data"),
Input(component_id='datatable-interactivity', component_property='derived_virtual_selected_rows'),
Input(component_id='datatable-interactivity', component_property='derived_virtual_selected_row_ids'),
Input(component_id='datatable-interactivity', component_property='selected_rows'),
Input(component_id='datatable-interactivity', component_property='derived_virtual_indices'),
Input(component_id='datatable-interactivity', component_property='derived_virtual_row_ids'),
Input(component_id='datatable-interactivity', component_property='active_cell'),
Input(component_id='datatable-interactivity', component_property='selected_cells')], prevent_initial_call=True
)
def update_line(all_rows_data, slctd_row_indices, slct_rows_names, slctd_rows,
order_of_rows_indices, order_of_rows_names, actv_cell, slctd_cell):
dff =
|
pd.DataFrame(all_rows_data)
|
pandas.DataFrame
|
# CPTAC Images Join
import pandas as pd
import numpy as np
imglist = pd.read_csv('../CPTAC-LUAD-HEslide-filename-mapping_Jan2019.csv', header=0)
samplelist = pd.read_csv('../CPTAC_LUAD.csv', header=0)
imglist = imglist[['Slide_ID', 'FileName']]
samplelist = samplelist.join(imglist.set_index('Slide_ID'), how='inner', on='Slide_ID')
samplelist = samplelist.dropna(subset=['FileName'])
samplelist = samplelist[['Case_ID', 'Slide_ID', 'FileName']]
Labelfile = pd.read_csv('../luad-v2.0-sample-annotation.csv', header=0)
Labelfile = Labelfile.loc[Labelfile['Type'] == 'Tumor']
Labelfile = Labelfile[['Participant', 'STK11.mutation.status']]
Labelfile = Labelfile.rename(columns={'Participant': 'Case_ID', 'STK11.mutation.status': 'STK11'})
Labelfile = Labelfile.join(samplelist.set_index('Case_ID'), how='inner', on='Case_ID')
print(len(list(Labelfile.Case_ID.unique())))
Labelfile = Labelfile.drop('Case_ID', axis=1)
Labelfile = Labelfile.drop_duplicates()
Labelfile.to_csv('../CPTAC_Joint.csv', index=False, header=True)
# TCGA
import pandas as pd
import os
# Get all images in the root directory
def image_ids_in(root_dir, ignore=['.DS_Store', 'dict.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
dirname = id.split('-01Z')[0]
ids.append([id, dirname])
return ids
TCGAls = image_ids_in('../images/TCGA')
TCGAls = pd.DataFrame(TCGAls)
TCGAls = TCGAls.rename(columns={'0': 'FileName', '1': 'Slide_ID'})
TCGAls.to_csv('../TCGAls.csv', index=False, header=True)
import pandas as pd
import os
TCGAall = pd.read_csv('../TCGA_all.tsv', sep='\t', header=0)
TCGAstk = pd.read_csv('../TCGA_STK11_MUT.tsv', sep='\t', header=0)
TCGAim = pd.read_csv('../TCGAls.csv', header=0)
TCGAall = TCGAall[['case_id', 'submitter_id']]
TCGAstk = TCGAstk[['submitter_id', 'STK11']]
TCGAall = TCGAall.join(TCGAstk.set_index('submitter_id'), how='left', on='submitter_id')
TCGAall = TCGAall.fillna(0)
TCGAall = TCGAall.drop('case_id', axis=1)
TCGAall = TCGAall.rename(columns={'submitter_id': 'Slide_ID'})
TCGAall = TCGAall.join(TCGAim.set_index('Slide_ID'), how='inner', on='Slide_ID')
TCGAall = TCGAall.drop_duplicates()
print(len(list(TCGAall.Slide_ID.unique())))
lll = []
for idx, row in TCGAall.iterrows():
lll.append(row['Slide_ID']+'-'+row['FileName'].split('-')[-2])
TCGAall['Slide_ID'] = lll
TCGAall.to_csv('TCGA_Joint.csv', index=False, header=True)
LUADlabel =
|
pd.concat([Labelfile, TCGAall])
|
pandas.concat
|
"""
<NAME>
python 3.7
:> to plot the variables that are selected
check/run following before running this file:
- filepaths_to_use.sh (returns the paths of variables that are avaiable)
- data_scope.py (creates a dataframe with hierarchy of data structure)
- data_processing/df_data_selected.csv (.csv output of data_scope.py)
The ploting of SSP585 and Historical will be done based on the raw files stored in the repository
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import netCDF4 as nc4
import re
import seaborn as sns
import cftime
import os
from functions import Unit_Conversions, time_dim_dates,index_and_dates_slicing
import datetime as dt
#1- Hack to fix missing PROJ4 env var
import os
#import conda
"""
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
#-1 Hack end
from mpl_toolkits.basemap import Basemap
"""
# Reading the dataframe of the selected files
# -------------------------------------------
#web_path = '/project/projectdirs/m2467/www/bharat/'
web_path = '/global/homes/b/bharat/results/web/'
in_path = '/global/homes/b/bharat/results/data_processing/'
cmip6_filepath_head = '/global/cfs/cdirs/m3522/cmip6/CMIP6/'
#cmip6_filepath_head = '/global/homes/b/bharat/cmip6_data/CMIP6/'
df_files = pd.read_csv(in_path + 'df_data_selected.csv')
cori_scratch = '/global/cscratch1/sd/bharat/' # where the anomalies per slave rank are saved
hierarcy_str= [ 'activity_id','institution_id','source_id','experiment_id',
'member_id','table_id','variable_id','grid_label','version','filenames']
# Source_id or Model Names:
# -------------------------
source_ids = np.unique(df_files['source_id'])
# Experiments
# -----------
experiments_ids = np.unique(df_files['experiment_id'])
# Variable names:
# --------------
variable_ids = np.unique(df_files['variable_id'])
#Create a DataFrame to store the essential informtation of the model and variables
col_names = [ 'source_id','experiment_id','member_id','variable_id',
'grid_label','version','time_units','time_calendar',
'lat','lon','var_units','area_units']
df_model_info = pd.DataFrame(columns = col_names)
#def create_df_info (s = source_run,e = exp, m=member, v=variable_run, g=grid_run, ver = ver_run, tu=time.units, tc=time.calendar,lt = lat.size, ln = lon.size, vu=var.units, au =area.units, ignore_index = True):
def create_df_info (i=np.nan, s = np.nan,e = np.nan, m=np.nan, v=np.nan,
g=np.nan, ver_h = np.nan, ver_s = np.nan, tu=np.nan,
tc=np.nan,lt = np.nan ,ln = np.nan, vu=np.nan, au =np.nan):
d = {'source_id' : pd.Series([s],index=[i]),
'experiment_id': pd.Series([e],index=[i]),
'member_id' : pd.Series([m],index=[i]),
'variable_id' : pd.Series([v],index = [i]),
'grid_label' : pd.Series([g],index = [i]),
'version_historial': pd.Series([ver_h],index = [i]),
'version_ssp587': pd.Series([ver_s],index = [i]),
'time_units' : pd.Series([tu],index = [i]),
'time_calendar': pd.Series ([tc],index = [i]),
'lat' : pd.Series([lt], index = [i]),
'lon' : pd.Series([ln],index= [i]),
'var_units' : pd.Series([vu],index = [i]),
'area_units' : pd.Series([au],index = [i])}
df = pd.DataFrame(d)
return df
# -----------------------
#creating a copy of the df_files
temp = df_files.copy(deep = True)
#creating the filters based on mpodel and variable
# model for this run
source_run = 'CanESM5'
variable_run = 'ra'
# The Models that have gpp for historical and ssp585 experiments:
source_selected = ['CanESM5','IPSL-CM6A-LR','CNRM-ESM2-1','BCC-CSM2-MR','CNRM-CM6-1']
source_selected = ['CESM2','CanESM5','IPSL-CM6A-LR','CNRM-ESM2-1','BCC-CSM2-MR','CNRM-CM6-1','EC-Earth3-Veg','UKESM1-0-LL']
source_selected = ['CESM2','CanESM5','IPSL-CM6A-LR','CNRM-ESM2-1','BCC-CSM2-MR','CNRM-CM6-1'] # for GPP // No areacella in : 'EC-Earth3-Veg','UKESM1-0-LL'
#source_selected = ['CESM2','CanESM5','IPSL-CM6A-LR','CNRM-ESM2-1'] # for NBP // no NBP in BCC //No areacella in : 'EC-Earth3-Veg','UKESM1-0-LL'
# Select which model you want to run:
# ===================================
#source_selected = ['CNRM-CM6-1' ]
#source_selected = ['BCC-CSM2-MR' ]
#source_selected = ['CNRM-ESM2-1' ]
#source_selected = ['IPSL-CM6A-LR' ]
#source_selected = ['CanESM5' ]
source_selected = ['CESM2' ]
# The abriviation of the models that will be analyzed:
source_code = { 'cesm' : 'CESM2',
'can' : 'CanESM5',
'ipsl' : 'IPSL-CM6A-LR',
'bcc' : 'BCC-CSM2-MR',
'cnrn-e': 'CNRM-ESM2-1',
'cnrn-c': 'CNRM-CM6-1' }
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--sources' ,'-src' , help = "Which model(s) to analyse?" , type= str, default= 'all' )
parser.add_argument('--variable' ,'-var' , help = "variable? gpp/npp/nep/nbp,,,," , type= str, default= 'gpp' )
args = parser.parse_args()
# The inputs:
src = str (args.sources)
variable_run= str (args.variable)
# Model(s) to analyze:
# --------------------
source_selected = []
if len(src.split('-')) >1:
source_selected = src.split('-')
elif src in ['all', 'a']:
source_selected = list(source_code.values() )
elif len(src.split('-')) == 1:
if src in source_code.keys():
source_selected = [source_code[src]]
else:
print (" Enter a valid source id")
#running : run plot_ts_variables_cont_f.py -src cesm -var ra
# Creating a lis to Unique colors for multiple models:
# ---------------------------------------------------
NUM_COLORS = len(source_selected)
LINE_STYLES = ['solid', 'dashed', 'dashdot', 'dotted']
NUM_STYLES = len(LINE_STYLES)
sns.reset_orig() # get default matplotlib styles back
clrs = sns.color_palette('husl', n_colors=NUM_COLORS)
# To Save the ts of every source in a dict for ploting
# -----------------------------------------------------
ts_yr_source_var_member = {}
ts_rm5yr_source_var_member = {}
ts_av_rm5yr_source_var_member = {}
# Savings a dictionary of common members per source id
# ----------------------------------------------------
var_ar_gC = {} # to store the variable array
for s_idx, source_run in enumerate(source_selected):
var_ar_gC [source_run] = {}
member_ids_common_source = {}
for s_idx, source_run in enumerate(source_selected):
filters = (temp['source_id'] == source_run) & (temp['variable_id'] == variable_run)
filters_area = (temp['source_id'] == source_run) & (temp['variable_id'] == 'areacella')
filters_lf = (temp['source_id'] == source_run) & (temp['variable_id'] == 'sftlf')
ts_yr_source_var_member[source_run] = {}
ts_rm5yr_source_var_member[source_run] = {}
ts_av_rm5yr_source_var_member[source_run] = {}
#passing the filters to the dataframe
df_tmp = temp[filters]
df_tmp_area = temp[filters_area]
df_tmp_lf = temp[filters_lf]
# grid of the filtered dataframe
grid_run = np.unique(df_tmp['grid_label'])[0]
#list of member ids
filters_exp = {} #dic to store filters of experiments
member_ids_exp = {} #dic to store member ids of experiments
filters_mem = {} #dic to store member ids of experiments
#checking and using the latest version
vers = {}
df_ver_tmp = {}
filters_ver = {}
ix = 1 #initiating the idx for the dataframe for info
for exp in experiments_ids:
vers [exp] = {} # initiating a version dic per member
df_ver_tmp [exp] = {}
filters_ver[exp] = {}
filters_mem[exp] = (df_tmp['experiment_id'] == exp )
member_ids_exp[exp] = np.unique(df_tmp[filters_mem[exp]]['member_id'])
member_ids_common = np.intersect1d(member_ids_exp['historical'] , member_ids_exp['ssp585'])
# Checking for the latest version of the members
for member in member_ids_common:
for exp in experiments_ids:
df_ver_tmp [exp][member] = df_tmp[filters_mem[exp]]
filters_ver [exp][member] = (df_tmp['experiment_id'] == exp ) & (df_tmp['member_id'] == member )
vers_tmp = np.unique(df_tmp[filters_ver[exp][member]]['version']) # all versions in str
vers_tmp_int = np.array([int(v[1:]) for v in vers_tmp]) # all versions in int
tmp_idx = np.where(vers_tmp_int == np.max(vers_tmp_int))# index of max number version
vers[exp][member] = vers_tmp[tmp_idx[0][0]] # newest for this run
# Saving the common members to a dict for plotting purposes
if (source_run == 'CESM2') and (variable_run == 'tasmax'): member_ids_common = ['r1i1p1f1']
member_ids_common_source[source_run] = member_ids_common
for member in member_ids_common_source[source_run]:
var_ar_gC [source_run][member] = {}
#Check if the files are in chunks of time
num_chunk_time = {}
for exp in experiments_ids:
if (source_run == 'CESM2') and (variable_run == 'tasmax'):
num_chunk_time[exp] = 1
else:
num_chunk_time[exp] = len(df_tmp[filters_ver[exp][member]][df_tmp['version'] == vers[exp][member]])
print ("Number of chunks of time in of model %s under experiment '%s' are: %d"%(source_run ,exp, num_chunk_time[exp]))
# Creating a dictionary for storing the nc data
nc_data = {}
for exp in experiments_ids:
nc_data [exp] = {}
filepath_areacella = {}
filepath_sftlf = {}
for member in member_ids_common:
# Pointing to the selective files that i need for this plot
print ("Source ID :%s, Member ID :%s"%(source_run,member))
filepaths_cont = {}
for exp in experiments_ids:
filepaths_cont[exp] = []
member_id_tmp = member
nc_data [exp][member_id_tmp] = {} # reading members separately per experiment
print ("==============================================================")
# This is when the versions are saved over multiple time chunks
if num_chunk_time[exp] >= 1:
if (source_run == 'CESM2') and (variable_run == 'tasmax'):
pass
else:
filepath_ar = np.array(df_ver_tmp[exp][member][filters_ver[exp][member]][df_ver_tmp[exp][member]['version'] == vers[exp][member]])
for chunk_idx in range(num_chunk_time[exp]):
if (source_run == 'CESM2') and (variable_run == 'tasmax') and (exp == 'historical'):
filepaths_cont[exp] = ["/global/cscratch1/sd/bharat/add_cmip6_data/CESM2/extra_cmip6_data/tasmax_Amon_CESM2_historical_r1i1p1f1_gn_185001-201412.nc"]
elif (source_run == 'CESM2') and (variable_run == 'tasmax') and (exp == 'ssp585'):
filepaths_cont[exp] = ["/global/cscratch1/sd/bharat/add_cmip6_data/CESM2/extra_cmip6_data/tasmax_Amon_CESM2_ssp585_r1i1p1f1_gn_201501-210012.nc"]
else:
filepaths_cont[exp].append (cmip6_filepath_head + "/".join(filepath_ar[chunk_idx]))
if source_run == 'BCC-CSM2-MR':
filepath_area = "/global/homes/b/bharat/extra_cmip6_data/areacella_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc"
filepath_lf = "/global/homes/b/bharat/extra_cmip6_data/sftlf_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc"
else:
filters_area = (temp['variable_id'] == 'areacella') & (temp['source_id'] == source_run)
filters_lf = (temp['variable_id'] == 'sftlf') & (temp['source_id'] == source_run)
filepath_area = cmip6_filepath_head + "/".join(np.array(temp[filters_area].iloc[-1]))
filepath_lf = cmip6_filepath_head + "/".join(np.array(temp[filters_lf].iloc[-1]))
# Check chunk_idx nc_data[exp][member_id_tmp][chunk_idx]
for exp in experiments_ids:
for chunk_idx in range(num_chunk_time[exp]):
nc_data[exp][member_id_tmp][chunk_idx] = nc4.Dataset(filepaths_cont[exp][chunk_idx])
#nc_data[member_id_tmp] = nc4.MFDataset([filepaths_cont['historical'], filepaths_cont['ssp585']])
var = nc_data['historical'][member_id_tmp][0].variables[variable_run]
lat = nc_data['historical'][member_id_tmp][0].variables['lat']
lon = nc_data['historical'][member_id_tmp][0].variables['lon']
time = nc_data['historical'][member_id_tmp][0].variables['time']
lat_bounds = nc_data['historical'][member_id_tmp][0].variables[lat.bounds]
lon_bounds = nc_data['historical'][member_id_tmp][0].variables[lon.bounds]
print ("Time Size: ", time.size, "no. of lats: ", lat.size, "no. of lons: ", lon.size)
# Concatenating the variables under consideration
# Since the time can start on 1850 or 2015, so it is important to use cftime and read the units along with it
# The same calculation is applied to time_bounds
var_data = nc_data['historical'][member_id_tmp][0].variables[variable_run][...]
time_datetime = cftime.num2date (times = nc_data['historical'][member_id_tmp][0].variables['time'][...],
units = nc_data['historical'][member_id_tmp][0].variables['time'].units,
calendar = nc_data['historical'][member_id_tmp][0].variables['time'].calendar )
time_bounds_datetime = cftime.num2date ( times = nc_data['historical'][member_id_tmp][0].variables[time.bounds][...],
units = nc_data['historical'][member_id_tmp][0].variables['time'].units,
calendar= nc_data['historical'][member_id_tmp][0].variables['time'].calendar )
# concatenating the variables under consideration
# the aim is to make one variable for the whole time duration from 1850 -- 2100
for exp in experiments_ids:
for chunk_idx in range(num_chunk_time[exp]):
if (exp == 'historical') and (chunk_idx == 0):
continue
print (exp)
if (source_run == 'CESM2') and (variable_run == 'tasmax') and (exp=='ssp585'):
cesm2_tasmax_bias = 6.433
var_data = np.concatenate( (var_data,
nc_data[exp][member_id_tmp][chunk_idx].variables[variable_run][...] - cesm2_tasmax_bias ),
axis =0) # units: kg m-2 s-1
else:
var_data = np.concatenate( (var_data,
nc_data[exp][member_id_tmp][chunk_idx].variables[variable_run][...]),
axis =0) # units: kg m-2 s-1
time_datetime = np.concatenate( (time_datetime,
cftime.num2date(times = nc_data[exp][member_id_tmp][chunk_idx].variables['time'][...],
units = nc_data[exp][member_id_tmp][chunk_idx].variables['time'].units,
calendar = nc_data[exp][member_id_tmp][chunk_idx].variables['time'].calendar)),
axis = 0)
try:
time_bounds_datetime = np.concatenate( (time_bounds_datetime,
cftime.num2date(times = nc_data[exp][member_id_tmp][chunk_idx].variables[time.bounds][...],
units = nc_data[exp][member_id_tmp][chunk_idx].variables['time'].units,
calendar = nc_data[exp][member_id_tmp][chunk_idx].variables['time'].calendar))
,axis = 0)
except:
time_bounds_datetime = np.concatenate( (time_bounds_datetime,
cftime.num2date(times = nc_data[exp][member_id_tmp][chunk_idx].variables['time_bnds'][...],
units = nc_data[exp][member_id_tmp][chunk_idx].variables['time'].units,
calendar = nc_data[exp][member_id_tmp][chunk_idx].variables['time'].calendar))
,axis = 0)
print (exp)
# Masking the values again to avoid errors arising due to masking
var_data = np.ma.masked_equal(var_data, var.missing_value)
# saving datetime time dates
# now the units are the same for the time bounds as "TIME_units"
time_bounds = nc_data['historical'][member_id_tmp][0].variables[time.bounds]
TIME_units = 'days since 1850-01-01 00:00:00'
time_floats = cftime.date2num (dates = time_datetime,
units = TIME_units,
calendar = nc_data['historical'][member_id_tmp][0].variables['time'].calendar)
time_bounds_floats = cftime.date2num (dates = time_bounds_datetime,
units = TIME_units,
calendar = nc_data['historical'][member_id_tmp][0].variables['time'].calendar)
try:
area = nc4.Dataset(filepath_areacella['historical']).variables['areacella']
lf = nc4.Dataset(filepath_sftlf['historical']).variables['sftlf']
except:
if source_run == 'BCC-CSM2-MR':
area = nc4.Dataset("/global/homes/b/bharat/extra_cmip6_data/areacella_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc").variables['areacella']
lf = nc4.Dataset("/global/homes/b/bharat/extra_cmip6_data/sftlf_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc").variables['sftlf']
else:
area = nc4.Dataset(filepath_area).variables["areacella"]
lf = nc4.Dataset(filepath_lf).variables["sftlf"]
#convert "kg m-2 s-1" to "gC"
if lf.units == '%':
lf = lf[...]/100 # converting the land fraction percentage to the fraction.
area_act = area[...] * lf #area_act (m2) is the effective or actual area of that pixels
if variable_run in ['gpp','npp','nep','nbp','fFireAll','ra','rh', 'fHarvest', 'fDeforestToAtmos', 'fLulccAtmLut','cTotFireLut']:
time_days = [int(time_bounds_floats[i][1]-time_bounds_floats[i][0]) for i in range(time_bounds_floats.shape[0])]
time_sec = np.array(time_days)*24*3600
vol_m2s = time_sec[:,np.newaxis,np.newaxis] * area_act # units vol: m^2*s
if variable_run in ['fLulccAtmLut', 'cTotFireLut']:
var_gC = vol_m2s * np.sum(var_data,axis=1) * 1000 # gC/mon for due to Primary and Secondary
else:
var_gC = vol_m2s * var_data * 1000 # gC/mon
var_ar_gC [source_run] [member] ['var_gC'] = var_gC
var_ar_gC [source_run] [member] ['lat'] = lat[...]
var_ar_gC [source_run] [member] ['lon'] = lon[...]
var_ar_gC [source_run] [member] ['lat_bounds'] = lat_bounds[...]
var_ar_gC [source_run] [member] ['lon_bounds'] = lon_bounds[...]
var_gC_global_mon_tot = np.ma.array([np.ma.sum(var_gC[i,:,:]) for i in range(var_data.shape[0])]) #g/mon
var_gC_global_yr_tot = np.ma.array([np.ma.sum(var_gC_global_mon_tot[i*12:(i*12)+12]) for i in range(len(var_gC_global_mon_tot)//12)]) #g/y
pd_var_gC_global_yr_tot = pd.Series(var_gC_global_yr_tot) #g/y
var_gC_global_rm5yr_tot = pd_var_gC_global_yr_tot.rolling(window=5,center = False).mean()# 5 year rolling mean
ts_yr_source_var_member[source_run][member] = var_gC_global_yr_tot #g/y
ts_rm5yr_source_var_member[source_run][member] = var_gC_global_rm5yr_tot #g/y
df_tmp = create_df_info( s = source_run,e = exp, m=member, v=variable_run,
g=grid_run, ver_h = vers['historical'],ver_s = vers['ssp585'],
tu=time.units, tc=time.calendar,lt = lat.size,
ln = lon.size, vu=var.units, au =area.units,i=ix)
df_model_info = df_model_info.append(df_tmp, sort = True)
ix = ix +1
# Calculations in case of climate drivers :
# -----------------------------------------
if variable_run in ['pr','mrso','mrsos']:
# Conversted Variable in the desired units of [mm day-1]
if variable_run == 'pr' : des_units = 'mm day-1'# Desired units of precipitation
if variable_run == 'mrso' : des_units = 'mm' # Desired units of soil moisture
if variable_run == 'mrsos' : des_units = 'mm' # Desired units of soil moisture
con_factor = Unit_Conversions (From=var.units, To= des_units)[0] # Conversion Factor
con_var = var_data * con_factor
con_var_units = Unit_Conversions (From=var.units, To= des_units)[1] # Converted Units
# Area weighted averages
con_var_global_mon_awm = np.ma.array([np.ma.average(con_var[i,:,:],
weights = area_act ) for i in range(con_var.shape[0])])
con_var_global_yr_tot_awm = np.ma.array([np.ma.average(np.ma.sum(con_var[i*12:(i*12+12),:,:],axis =0),
weights = area_act) for i in range ( con_var.shape[0]//12)])
con_var_global_yr_av_awm = np.ma.array([np.ma.average(np.ma.mean(con_var[i*12:(i*12+12),:,:],axis =0),
weights = area_act) for i in range ( con_var.shape[0]//12)])
# Calculation the moving averages
pd_con_var_global_yr_tot_awm = pd.Series (con_var_global_yr_tot_awm)
con_var_global_rm5yr_tot_awm = pd_con_var_global_yr_tot_awm.rolling(window=5,center = False).mean()# 5 year rolling mean
pd_con_var_global_yr_av_awm = pd.Series (con_var_global_yr_av_awm)
con_var_global_rm5yr_av_awm = pd_con_var_global_yr_av_awm.rolling(window=5,center = False).mean()# 5 year rolling mean
ts_yr_source_var_member[source_run][member] = con_var_global_yr_tot_awm # con units
ts_rm5yr_source_var_member[source_run][member] = con_var_global_rm5yr_tot_awm # con units
ts_av_rm5yr_source_var_member[source_run][member] = con_var_global_rm5yr_av_awm # con units
if variable_run in ['tas','tasmax','tasmin']:
# Conversted Variable in the desired units of [mm day-1]
if variable_run in ['tas','tasmax','tasmin'] : des_units = 'C'# Desired units of precipitation
con_factor = Unit_Conversions (From=var.units, To= des_units)[0] # Conversion Factor
con_var = var_data + con_factor
con_var_units = Unit_Conversions (From=var.units, To= des_units)[1] # Converted Units
# Area weighted averages
con_var_global_mon_awm = np.ma.array([np.ma.average(con_var[i,:,:],
weights = area_act ) for i in range(con_var.shape[0])])
con_var_global_yr_tot_awm = np.ma.array([np.ma.average(np.ma.sum(con_var[i*12:(i*12+12),:,:],axis =0),
weights = area_act) for i in range ( con_var.shape[0]//12)])
con_var_global_yr_av_awm = np.ma.array([np.ma.average(np.ma.mean(con_var[i*12:(i*12+12),:,:],axis =0),
weights = area_act) for i in range ( con_var.shape[0]//12)])
# Calculation the moving averages
pd_con_var_global_yr_tot_awm = pd.Series (con_var_global_yr_tot_awm)
con_var_global_rm5yr_tot_awm = pd_con_var_global_yr_tot_awm.rolling(window=5,center = False).mean()# 5 year rolling mean
pd_con_var_global_yr_av_awm = pd.Series (con_var_global_yr_av_awm)
con_var_global_rm5yr_av_awm = pd_con_var_global_yr_av_awm.rolling(window=5,center = False).mean()# 5 year rolling mean
ts_yr_source_var_member[source_run][member] = con_var_global_yr_tot_awm # con units
ts_rm5yr_source_var_member[source_run][member] = con_var_global_rm5yr_tot_awm # con units
ts_av_rm5yr_source_var_member[source_run][member] = con_var_global_rm5yr_av_awm # con units
if variable_run in ['gpp','npp','nep','nbp','fFireAll','ra','rh', 'fHarvest', 'fDeforestToAtmos', 'fLulccAtmLut','cTotFireLut']:
# Plotting Total Global Yearly GPP/NPP/NEP/NBP
# --------------------------------------------
time_x = np.arange(1850,2101)
fig_1 = plt.figure(tight_layout = True, dpi = 400)
for member in member_ids_common:
if (variable_run == 'nep') and (source_run == 'CESM2'):
plt.plot (time_x , -ts_yr_source_var_member[source_run][member]/(10**15) , label = member, linewidth = .3)
else:
plt.plot (time_x , ts_yr_source_var_member[source_run][member]/(10**15) , label = member, linewidth = .3)
plt.title ("%s - %s: Total Global Yearly"%(source_run, variable_run))
plt.ylabel ("PgC/year")
plt.xlabel ("Time")
plt.grid (True, linestyle='--',linewidth = .5)
plt.legend (loc='upper center', bbox_to_anchor=(0.5, -0.06),
fancybox=True, shadow=True, ncol=7,fontsize=6)
try: os.remove (web_path + "TS_%s_%s_%s_gC_tot_global_yr.pdf"%(source_run, member, variable_run))
except: print("The fig1 does not already exist")
fig_1 .savefig (web_path + "TS_%s_%s_%s_gC_tot_global_yr.pdf"%(source_run, member, variable_run))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/TimeSeries/"%(source_run,member, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig_1 .savefig (path_save + "TS_%s_%s_%s_gC_tot_global_yr.pdf"%(source_run, member, variable_run))
plt.close (fig_1)
# Plotting 5yr Running mean Total Global Yearly GPP/NPP/NEP/NBP
# -------------------------------------------------------------
fig_2 = plt.figure(tight_layout = True, dpi = 400)
for member in member_ids_common:
if (variable_run == 'nep') and (source_run == 'CESM2'):
plt.plot (time_x , -ts_rm5yr_source_var_member[source_run][member]/(10**15), label = member, linewidth = .3)
else:
plt.plot (time_x , ts_rm5yr_source_var_member[source_run][member]/(10**15), label = member, linewidth = .3)
plt.title ("%s - %s: 5 year Moving Average Total Global Yearly"%(source_run, variable_run))
plt.ylabel ("PgC/year")
plt.xlabel ("Time")
plt.grid (True, linestyle='--',linewidth = .5)
plt.legend (loc='upper center', bbox_to_anchor=(0.5, -0.06),
fancybox=True, shadow=True, ncol=7,fontsize=6)
try: os.remove (web_path + "%s_%s_gC_tot_global_rm5yr.pdf"%(source_run,variable_run))
except: print("The fig2 does not already exist")
fig_2 .savefig (web_path + "%s_%s_%s_gC_tot_global_rm5yr.pdf"%(source_run,member, variable_run))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/TimeSeries/"%(source_run,member, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig_2 .savefig (path_save + "%s_%s_%s_gC_tot_global_rm5yr.pdf"%(source_run,member, variable_run))
plt.close (fig_2)
if variable_run in ['pr','mrso','mrsos','tas','tasmax']:
# Climate Drivers
time_x = np.arange(1850,2101)
# Plotting yr Running mean Total Global AWM Yearly
# -------------------------------------------------------------
fig_11 = plt.figure(tight_layout = True, dpi = 400)
for member in member_ids_common:
plt.plot (time_x , ts_yr_source_var_member[source_run][member] , label = member, linewidth = .3)
plt.title ("%s - %s: Total Global AWM Yearly"%(source_run, variable_run))
plt.ylabel (con_var_units)
plt.xlabel ("Time")
plt.grid (True, linestyle='--',linewidth = .5)
plt.legend (loc='upper center', bbox_to_anchor=(0.5, -0.06),
fancybox=True, shadow=True, ncol=7,fontsize=6)
try: os.remove (web_path + "%s_%s_tot_awm_global_yr.pdf"%(source_run,variable_run))
except: print("The fig1 does not already exist")
fig_11 .savefig (web_path + "%s_%s_%s_tot_awm_global_yr.pdf"%(source_run,member, variable_run))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/TimeSeries/"%(source_run,member, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig_11 .savefig (path_save + "%s_%s_%s_tot_awm_global_yr.pdf"%(source_run,member, variable_run))
plt.close (fig_11)
# Plotting 5yr Running mean Total Global AWM Yearly
# -------------------------------------------------------------
fig_12 = plt.figure(tight_layout = True, dpi = 400)
for member in member_ids_common:
plt.plot (time_x , ts_rm5yr_source_var_member[source_run][member], label = member, linewidth = .3)
plt.title ("%s - %s: 5 year Moving Average Total Global AWM"%(source_run, variable_run))
plt.ylabel (con_var_units)
plt.xlabel ("Time")
plt.grid (True, linestyle='--',linewidth = .5)
plt.legend (loc='upper center', bbox_to_anchor=(0.5, -0.06),
fancybox=True, shadow=True, ncol=7,fontsize=6)
try: os.remove (web_path + "%s_%s_tot_global_awm_rm5yr.pdf"%(source_run,variable_run))
except: print("The fig2 does not already exist")
fig_12 .savefig (web_path + "%s_%s_%s_tot_global_awm_rm5yr.pdf"%(source_run, member, variable_run))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/TimeSeries/"%(source_run,member, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig_12 .savefig (path_save + "%s_%s_%s_tot_global_awm_rm5yr.pdf"%(source_run, member, variable_run))
plt.close (fig_12)
# Plotting yr Running mean Average Global AWM Yearly
# -------------------------------------------------------------
fig_13 = plt.figure(tight_layout = True, dpi = 400)
for member in member_ids_common:
plt.plot (time_x , ts_av_rm5yr_source_var_member[source_run][member] , label = member, linewidth = .3)
plt.title ("%s - %s: Average Global AWM Yearly"%(source_run, variable_run))
plt.ylabel (con_var_units)
plt.xlabel ("Time")
plt.grid (True, linestyle='--',linewidth = .5)
plt.legend (loc='upper center', bbox_to_anchor=(0.5, -0.06),
fancybox=True, shadow=True, ncol=7,fontsize=6)
try: os.remove (web_path + "%s_%s_av_awm_global_yr.pdf"%(source_run,variable_run))
except: print("The fig13 does not already exist")
fig_13 .savefig (web_path + "%s_%s_%s_av_awm_global_yr.pdf"%(source_run, member, variable_run))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/TimeSeries/"%(source_run,member, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig_13 .savefig (path_save + "%s_%s_%s_av_awm_global_yr.pdf"%(source_run, member, variable_run))
plt.close (fig_13)
if variable_run in ['gpp','npp','nep','nbp','fFireAll','ra','rh', 'fHarvest', 'fDeforestToAtmos', 'fLulccAtmLut','cTotFireLut']:
fig_3,ax = plt.subplots(nrows=1,ncols=1,tight_layout = True, dpi = 400)
plt.title ("Multi-Model %s: Total Global Yearly"%(variable_run.upper()))
plt.ylabel ("PgC/year")
plt.xlabel ("Time")
plt.grid (True, linestyle='--',linewidth = .5)
for s_idx, source_run in enumerate(source_selected):
if (variable_run == 'nep') and (source_run == 'CESM2'):
mm_mean = ax.plot (time_x , -np.array(pd.DataFrame(ts_yr_source_var_member[source_run]).mean(axis = 1))/(10**15) , label = source_run, linewidth = 1)
else:
mm_mean = ax.plot (time_x , np.array(pd.DataFrame(ts_yr_source_var_member[source_run]).mean(axis = 1))/(10**15) , label = source_run, linewidth = 1)
mm_mean[0] .set_color(clrs[s_idx])
for m_idx,member in enumerate(member_ids_common_source[source_run]):
if (variable_run == 'nep') and (source_run == 'CESM2'):
lines = ax.plot (time_x , -ts_yr_source_var_member[source_run][member]/(10**15) , label = member, linewidth = .3)
else:
lines = ax.plot (time_x , ts_yr_source_var_member[source_run][member]/(10**15) , label = member, linewidth = .3)
lines[0].set_color(clrs[s_idx])
lines[0].set_linestyle(LINE_STYLES[m_idx%NUM_STYLES])
plt.legend (loc='upper center', bbox_to_anchor=(0.5, -0.06),
fancybox=True, shadow=True, ncol=7,fontsize=6)
try: os.remove (web_path + "MultiModel_%s_gC_tot_global_yr.pdf"%(variable_run))
except: print("The fig3 does not already exist")
fig_3.savefig (web_path + "MultiModel_%s_gC_tot_global_yr.pdf"%(variable_run))
fig_4,ax = plt.subplots(nrows=1,ncols=1,tight_layout = True, dpi = 400, figsize=(9,5))
plt.title ("Multi-Model %s: 5 Year Moving Average Total Global "%(variable_run.upper()))
#plt.ylabel ("PgC/year")
plt.ylabel ("Total NBP (PgC/year)", fontsize = 14)
plt.xlabel ("Time", fontsize = 14)
plt.xticks(fontsize = 12) #
plt.grid (True, linestyle='--',linewidth = .5)
for s_idx, source_run in enumerate(source_selected):
if (variable_run == 'nep') and (source_run == 'CESM2'):
mm_mean = ax.plot (time_x , -np.array(pd.DataFrame(ts_rm5yr_source_var_member[source_run]).mean(axis = 1))/(10**15) , label = source_run, linewidth = 1)
else:
mm_mean = ax.plot (time_x , np.array(pd.DataFrame(ts_rm5yr_source_var_member[source_run]).mean(axis = 1))/(10**15) , label = source_run, linewidth = 1)
mm_mean[0] .set_color(clrs[s_idx])
for m_idx,member in enumerate(member_ids_common_source[source_run]):
if (variable_run == 'nep') and (source_run == 'CESM2'):
lines = ax.plot (time_x , -ts_rm5yr_source_var_member[source_run][member]/(10**15) , linewidth = .3)
else:
lines = ax.plot (time_x , ts_rm5yr_source_var_member[source_run][member]/(10**15) , linewidth = .3)
lines[0].set_color(clrs[s_idx])
lines[0].set_linestyle(LINE_STYLES[m_idx%NUM_STYLES])
plt.legend (loc='upper center', bbox_to_anchor=(0.5, -0.06),
fancybox=True, shadow=True, ncol=3,fontsize=10)
try: os.remove(web_path + "MultiModel_%s_gC_tot_global_rm5yr.pdf"%(variable_run))
except: print("The fig4 does not already exist")
fig_4.savefig (web_path + "MultiModel_%s_gC_tot_global_rm5yr.pdf"%(variable_run))
# Figure for the paper for NBP and CESM2
fig_411,ax = plt.subplots(nrows=1,ncols=1,tight_layout = True, dpi = 400, figsize=(9,5))
plt.title (f"Multi-Model {variable_run.upper()}: 5 Year Moving Average Total Global \n")
#plt.ylabel ("PgC/year")
plt.ylabel ("Total NBP (PgC/year)", fontsize = 14)
plt.xlabel ("Time", fontsize = 14)
plt.xticks(fontsize = 12) #
plt.yticks(fontsize = 12) #
plt.grid (True, linestyle='--',linewidth = .5)
for s_idx, source_run in enumerate(source_selected):
if (variable_run == 'nbp') and (source_run == 'CESM2'):
mm_mean = ax.plot (time_x , np.array(
|
pd.DataFrame(ts_rm5yr_source_var_member[source_run])
|
pandas.DataFrame
|
from tkinter import *
import tkinter as tk
import os
import datetime as dt
from dateutil.relativedelta import relativedelta
import pytz
from openpyxl import load_workbook
import win32com.client
import win32api
import xlrd
from simple_salesforce import Salesforce
import pandas as pd
from openpyxl.styles import Font, Color, PatternFill
import win32timezone
xl = win32com.client.Dispatch("Excel.Application")
datenow = dt.datetime.today()
lastmonth = datenow - relativedelta(months=1)
root = tk.Tk()
root.title("Outlook-SFDC Sync")
# Add a grid
mainframe = Frame(root)
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
mainframe.pack(pady=25, padx=25)
# Create a Tkinter variable
tkvar = StringVar(root)
tkvar2 = StringVar(root)
# Dictionary with options
choices = {'Outlook Download', 'Outlook SFDC Upload',}
tkvar.set('Outlook Download') # set the default option
popupMenu = OptionMenu(mainframe, tkvar, *choices)
Label(mainframe, text="Please choose which script you want to run:").grid(row=1, column=1)
popupMenu.grid(row=2, column=1)
def ok():
script = tkvar.get()
if script == 'Outlook Download':
create_window()
elif script == 'Outlook SFDC Upload':
root.withdraw()
wb = xlrd.open_workbook('C:\\SFDC Outlook Synchronization\\SFDC_Admin\\SFDC_Admin.xlsx')
first_sheet = wb.sheet_by_name("Sheet1")
a1 = first_sheet.cell(0, 1).value
a2 = first_sheet.cell(1, 1).value
a3 = first_sheet.cell(2, 1).value
try:
sf = Salesforce(username=a1, password=a2, security_token=a3)
except Exception:
win32api.MessageBox(0,
"The script cannot run. You need to either 1) Update your Salesforce password (in cell B2) in the following file: C:\SFDC Outlook Synchronization\SFDC_Admin\SFDC_Admin.xlsx and save the file or 2) Check your Internet connectivity.",
"Error!",
0x00001000)
root.destroy()
quit()
for wbb in xl.Workbooks:
if wbb.Name == 'Outlook Sync.xlsx':
wbb.Close(True)
wb = load_workbook(filename="C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx", read_only=False,keep_vba=False)
ws = wb.get_sheet_by_name('Outlook Sync')
file = pd.ExcelFile('C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx')
df = file.parse('Outlook Sync')
#df.sort_values(['Upload Event to SFDC?'], ascending=[False], inplace=True)
g = 1
for index, row in df.iterrows():
try:
if df.iloc[index]['Upload Event to SFDC?'] == "No" or pd.isnull(df.iloc[index]['Upload Event to SFDC?']):
g = g + 1
continue
if "Yes" in ws.cell(row=g + 1, column=17).value:
g = g + 1
continue
except(Exception):
pass
try:
g = g + 1
if "'" not in df.iloc[index]['SFDC Contact']:
contact = "'" + df.iloc[index]['SFDC Contact'] + "'"
else:
contact = df.iloc[index]['SFDC Contact']
query_result2 = sf.query_all("SELECT Id FROM Contact Where Email = %s" % contact)
records2 = query_result2['records']
df2 = pd.DataFrame(records2)
df2.drop('attributes', inplace=True, axis=1)
if "'" not in df.iloc[index]['Assigned To']:
owner = "'" + df.iloc[index]['Assigned To'] + "'"
else:
owner = df.iloc[index]['Assigned To']
query_result4 = sf.query_all("SELECT Id FROM User Where Email = %s" % owner)
records4 = query_result4['records']
df4 = pd.DataFrame(records4)
df4.drop('attributes', inplace=True, axis=1)
if pd.isnull(df.iloc[index]['Location']):
location = ''
else:
location = df.iloc[index]['Location']
if pd.isnull(df.iloc[index]['Appointment Body']):
description = ''
else:
description = df.iloc[index]['Appointment Body']
if pd.isnull(df.iloc[index]['SFDC Opportunity']):
oppty = ''
else:
oppty = "'" + df.iloc[index]['SFDC Opportunity'] + "'"
query_result3 = sf.query_all("SELECT Id FROM Opportunity Where Name = %s" % oppty)
records3 = query_result3['records']
df3 = pd.DataFrame(records3)
df3.drop('attributes', inplace=True, axis=1)
oppty = df3.iloc[0]['Id']
if pd.isnull(df.iloc[index]['Additional Participant #1']):
additionalp1 = ""
additionalp1id = ""
else:
additionalp1 = df.iloc[index]['Additional Participant #1']
additionalp1sfdc = "'" + additionalp1 + "'"
try:
query_result5 = sf.query_all("SELECT Id FROM User Where Name = %s" % additionalp1sfdc)
records5 = query_result5['records']
df5 = pd.DataFrame(records5)
df5.drop('attributes', inplace=True, axis=1)
additionalp1id = df5.iloc[0]['Id']
except(Exception):
additionalp1id = ""
if pd.isnull(df.iloc[index]['Additional Participant #2']):
additionalp2 = ""
additionalp2id = ""
else:
additionalp2 = df.iloc[index]['Additional Participant #2']
additionalp2sfdc = "'" + additionalp2 + "'"
try:
query_result6 = sf.query_all("SELECT Id FROM User Where Name = %s" % additionalp2sfdc)
records6 = query_result6['records']
df6 = pd.DataFrame(records6)
df6.drop('attributes', inplace=True, axis=1)
additionalp2id = df6.iloc[0]['Id']
except(Exception):
additionalp2id = ""
if
|
pd.isnull(df.iloc[index]['Additional Participant #3'])
|
pandas.isnull
|
import mock
import pytest
from collections import OrderedDict
from io import StringIO
import pandas as pd
from pandas.util.testing import assert_frame_equal
from spackl.file.base import BaseFile
from spackl.file import CSV
from spackl.util import Path
test_csv_path = Path.cwd().as_posix() + '/tests/file/data/test.csv'
test_csv_tab_path = Path.cwd().as_posix() + '/tests/file/data/test_tab.csv'
test_csv_zip_path = Path.cwd().as_posix() + '/tests/file/data/test.csv.zip'
test_nonexistent_csv_path = 'tests/file/data/test_nothere_config.csv'
expected_results = [OrderedDict([('first', 'a'), ('second', 'b'), ('third', 'c')]),
OrderedDict([('first', 'd'), ('second', 'e'), ('third', 'f')]),
OrderedDict([('first', 'g'), ('second', 'h'), ('third', 'i')])]
def test_csv():
with pytest.raises(TypeError):
CSV()
with pytest.raises(AttributeError):
CSV(test_nonexistent_csv_path)
with pytest.raises(AttributeError):
CSV(dict())
csv = CSV(test_csv_path)
assert isinstance(csv, BaseFile)
assert csv._name is None
assert csv._use_pandas is False
assert csv._csv_kwargs == dict()
def test_with_kwargs():
name = '<NAME>, dear.'
csv = CSV(test_csv_path, name=name)
assert str(csv) == test_csv_path
assert csv.name == name
dialect = 'excel'
csv = CSV(test_csv_path, dialect=dialect)
assert csv._csv_kwargs == {'dialect': 'excel'}
def test_filelike_obj():
obj = StringIO()
csv = CSV(obj)
assert str(csv) == str(obj)
assert csv._data is None
assert csv.opened is False
csv.open()
assert csv._data
assert csv.opened is True
csv.close()
assert not csv._data
assert csv.opened is False
def test_file():
csv = CSV(test_csv_path)
csv.open()
assert csv._data
assert csv.opened is True
results = csv.query()
assert results.result == expected_results
csv.close()
assert not csv._data
assert csv.opened is False
def test_tab_file():
csv = CSV(test_csv_tab_path)
results = csv.query(dialect='excel-tab')
assert results.result == expected_results
assert not csv._data
assert csv.opened is False
def test_zipfile():
csv = CSV(test_csv_zip_path)
csv.open()
assert csv._data
assert csv.opened is True
results = csv.query()
assert results.result == expected_results
csv.close()
assert not csv._data
assert csv.opened is False
def test_pandas():
csv = CSV(test_csv_path, use_pandas=True)
results = csv.query()
expected_df = pd.DataFrame(expected_results)
|
assert_frame_equal(results, expected_df)
|
pandas.util.testing.assert_frame_equal
|
"""
Created on Aug 24th 2016
Plotting functions for SEM-EDX (or TEM-EDX ) spectra
@author: tkc
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re, math, sys
from matplotlib.backends.backend_pdf import PdfPages
import scipy
import scipy.stats # load in this sequence to get linregress working
from statsmodels.formula.api import ols # ordinary least squares
if 'C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX')
from EDX_import_functions import getelemmaps # called by reportmaps
import ternary
import tkinter as tk
# from sympy import Interval, Union # for overlapping plot range removal
#%%
font = {'size' : 22}
plt.rc('font', **font)
MPL_STYLE = {
"text.color":"k",
"axes.labelcolor":"black",
"axes.edgecolor":"0.4",
"axes.facecolor":"white",
"xtick.color": "k",
"ytick.color": "k",
"figure.facecolor":"white",
"figure.edgecolor":"white",
"text.usetex":False,
"axes.labelsize":"large"
}
plt.rcParams.update(MPL_STYLE)
def elemcompareplot(Integlog, elem1, elem2, thresh=0.1, errbars='xy'):
''' Pass two elements and make scatter plot of corrected counts
useful for setting relative k-factors
uses inner merge to select only subset with values from each df
use either sample or filenumber'''
#TODO have calccomp copy error in basis to allow errbars if basis=True
el1=Integlog[Integlog['Element']==elem1]
el2=Integlog[Integlog['Element']==elem2]
fig, axes = plt.subplots(nrows=1, ncols=1) # axes is array
# Merge dfs with comp1 and comp2 using inner join
compdata=pd.merge(el1, el2, how='inner', on=['Basename','Filenumber','Point'
,'Filename','Filepath','Sample','Comments'], suffixes=('','b'))
compdata.plot.scatter(x='Correctedcounts', y='Correctedcountsb', ax=axes) # single plot axes has no [#,#]
# linear regression: fitting, plot and add labels
xdata=compdata['Correctedcounts'].as_matrix() # this data column as np array
ydata=compdata['Correctedcountsb'].as_matrix()
#
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(xdata, ydata) # imported from scipy.stats
# set x range for linear plot
text1=str(round(slope,3))+' *x +' + str(round(intercept,3))
text2='R = ' + str(round(r_value,3)) + ' p = '+str(round(p_value,4))
xmax=max(xdata)
x=np.linspace(0,xmax,100) # setting range for
axes.text(0.025,0.9, text1, fontsize=12, transform=axes.transAxes)
axes.text(0.025,0.8, text2, fontsize=12, transform=axes.transAxes)
plt.plot(x, x*slope+intercept, color='r') # plot appropriate line
# Now test, plot and return outliers
theseoutliers=returnoutliers(xdata.tolist(), ydata.tolist()) # index #
compdata= pd.concat([compdata, theseoutliers], axis=1, join='inner') # same length so just join by index
# Add residual and pval to each compositional comparison line
theseoutliers=compdata[compdata['Pval']<thresh] # now filter by threshold for outliers (with all cols)
if not theseoutliers.empty:
# error bars from errcorrcnts
if errbars=='xy':
theseoutliers.plot.scatter(x='Correctedcounts', y='Correctedcountsb',
xerr='Errcorrcnts', yerr='Errcorrcntsb', ax=axes, color='r')
elif errbars=='x': # plottable x error column exists
theseoutliers.plot.scatter(x='Correctedcounts', y='Correctedcountsb',
xerr='Errcorrcnts', ax=axes, color='r')
elif errbars=='y': # plottable y error column exists
theseoutliers.plot.scatter(x='Correctedcounts', y='Correctedcountsb',
yerr='Errcorrcntsb', ax=axes, color='r')
else: # no plottable errors for outliers
theseoutliers.plot.scatter(x='Correctedcounts', y='Correctedcountsb',
ax=axes, color='r')
return compdata, theseoutliers
def reportmaps(NSScsvparams, Elements, PDFreport='NSSmaps_report.pdf'):
'''Batch plot/PDF report of all selected elements for all NSS extracted csv image from NSS param log '''
plt.ioff() # turn off interactive mode (for plot to pdf)
with PdfPages(PDFreport) as pdf:
for index, row in NSScsvparams.iterrows():
thisrow=NSScsvparams.loc[[index]]
elementmaps=getelemmaps(thisrow, Elements)
fig=plotmaps(elementmaps, thisrow, savename='', imode=False) # no separate save and return figure
pdf.savefig(fig)
plt.ion() # turn interactive plotting back on
return
def plotmaps(elementmaps, thismaprow, savename='', imode=True):
''' For plot arbitrary number of element maps (passed as list of numpy arrays) into single figure; designed for x-ray image maps
extracted from spectral images; name passed in thismap
imode to false if called by PDF report loop'''
if imode:
plt.ion() # ensure interactive mode is on (on for single plots, off for PDF reports)
# determine which of the elementmaps will be plotted
nummaps=len(elementmaps) # all passed element maps plotted (selection/filtering occurs with getelemmaps)
# Determine shape of figure
if nummaps<=3:
numrows=1
else:
numrows=2
numcols=math.ceil(nummaps/numrows)
# fig, axes = plt.subplots(nrows=numrows, ncols=numcols, figsize=(16,9), squeeze=False)
fig, axes = plt.subplots(nrows=numrows, ncols=numcols, squeeze=False)
plottitle=thismaprow.iloc[0]['Basename']+': '+thismaprow.iloc[0]['SIname']+'.si'
fig.suptitle(plottitle)
for i, [elem, thismap] in enumerate(elementmaps): # thismap is element string followed by 512 x 512 array
thisrow=i//numcols
thiscol=i%numcols
axindex=thisrow, thiscol # tuple to index axes
axes[axindex].set_aspect('equal')
if elem=='Grey':
axes[axindex].imshow(thismap, cmap='gray') # make SE image grayscale
axes[axindex].set_title('SE')
else:
axes[axindex].imshow(thismap, cmap='hot') # plots element map to correct subplot
axes[axindex].set_title(elem)
fig.tight_layout()
# Hide empty subplots
for i in range(0,numrows*numcols):
if i>len(elementmaps)-1:
thisrow=i//numcols
thiscol=i%numcols
axindex=thisrow, thiscol # tuple to index axes
axes[axindex].set_visible(False)
if savename!='':
fig.savefig(savename) # optional saving of figure
if imode==False:
return fig
else:
return # just plot for interactive mode
def organizecomp(df):
'''Get rid of common duplicated columns '''
removelist=['Projectb','Filenameb','FilePathb','Sampleb','Commentsb']
singleelemlist=['Ca','Mg','Si','S']
for i, val in enumerate(removelist):
if val in df:
df=df.drop(val,axis=1)
for i, val in enumerate(singleelemlist): # don't drop basis as these will differ if comparing smdif and integ compositions
if val+'amplb' in df:
df=df.drop(val+'amplb',axis=1)
return df
def returnoutliers(xdata, ydata):
'''pass xcol and ycol as lists, makes plot and return outliers with pvals below specified threshold'''
# convert pandas series to lists
regression= ols("data ~ x", data=dict(data=ydata, x=xdata)).fit()
outliers=regression.outlier_test()
# df with cols as student_resid, unadj_p and bonf (bonferroni)
colnames=['Resid','Pval','Bonf']
outliers.columns=colnames # rename columns
return outliers
def scattercompplot(comp1, comp2, elemlist, joinlist=['Filename'], thresh=0.1, basis=False, errbars='xy'):
'''Pass two versions of composition calculation (using different lines or whatever) and compare
major elements using scatter graphs .. single point for each sample
uses inner merge to select only subset with values from each df
use either sample or filenumber'''
#TODO have calccomp copy error in basis to allow errbars if basis=True
elemlist=[re.match('\D+',i).group(0) for i in elemlist]
# strip number from peaks like Fe2 if present; columns will be element names (Fe) not peak names (Fe2)
if basis==False: # use atomic % (which is the default), not basis for each element
elemlist=['%'+s for s in elemlist]
numregions=len(elemlist)
# set nrows and ncols for figure of proper size
cols=divmod(numregions,2)[0]+ divmod(numregions,2)[1]
if numregions>1:
rows=2
else:
rows=1
fig, axes = plt.subplots(nrows=rows, ncols=cols) # axes is array
# merge dfs with comp1 and comp2 using inner join
compdata=pd.merge(comp1, comp2, how='inner', on=joinlist, suffixes=('','b'))
mycols=compdata.dtypes.index # list of same columns
mycols=mycols.tolist()
#mycols=mycols.append('Element')
outliers=pd.DataFrame(columns=mycols) # empty dataframe for outlying points
fulldata=pd.DataFrame(columns=mycols)
newcols=['Resid','Pval','Bonf', 'Element'] # single column for residuals but separate value needed for each row per element
mycols.extend(newcols)
for i, cname in enumerate(newcols):
outliers[cname]=''
fulldata[cname]=''
for i, elem in enumerate(elemlist):
# new version of base compositional data for each loop (otherwise reindexing problems)
compdata=
|
pd.merge(comp1, comp2, how='inner', on=joinlist, suffixes=('','b'))
|
pandas.merge
|
import pandas as pd
from matplotlib import pyplot as plt
import hmac
import hashlib
import base64
import json
import time
import requests
from datetime import datetime
from tabulate import tabulate
import csv
investment = 00.00
coindcx = json.load(open('./api.json'))
key = coindcx['apikey']
secret = coindcx['secretkey']
common_url = "https://api.coindcx.com/exchange"
secret_bytes = bytes(secret, encoding='utf-8')
timeStamp = int(round(time.time() * 1000))
body = {
"timestamp": timeStamp
}
json_body = json.dumps(body, separators = (',', ':'))
signature = hmac.new(secret_bytes, json_body.encode(), hashlib.sha256).hexdigest()
url = common_url + "/v1/users/balances"
ret = {}
headers = {
'Content-Type': 'application/json',
'X-AUTH-APIKEY': key,
'X-AUTH-SIGNATURE': signature
}
def getbalance():
global df
global myINR
global myUSDT
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import calendar
import glob
import numpy as np
import openapi_client as dbitApi
import os
import pandas as pd
from datetime import datetime
api = dbitApi.MarketDataApi()
def format_datetime_to_expiry(date):
if os.name == 'nt':
return datetime.strftime(date, '%#d%b%y').upper()
else:
return datetime.strftime(date, '%-d%b%y').upper()
def get_near_next_terms(now):
c = calendar.Calendar(firstweekday=calendar.MONDAY)
this_month_cal = c.monthdatescalendar(now.year, now.month)
this_fridays = [datetime(day.year, day.month, day.day, 8, 0, 0)
for week in this_month_cal for day in week
if day.weekday() == calendar.FRIDAY and day.month == now.month
and datetime(day.year, day.month, day.day, 8, 0, 0) >= now]
next_year = now.year if now.month < 12 else now.year + 1
next_month = now.month + 1 if now.month < 12 else 1
next_month_cal = c.monthdatescalendar(next_year, next_month)
next_fridays = [datetime(day.year, day.month, day.day, 8, 0, 0)
for week in next_month_cal for day in week
if day.weekday() == calendar.FRIDAY and day.month == next_month
and datetime(day.year, day.month, day.day, 8, 0, 0) >= now]
fridays = this_fridays + next_fridays
near_term, next_term = fridays[0], fridays[1]
return (format_datetime_to_expiry(near_term), format_datetime_to_expiry(next_term), near_term, next_term)
def get_index(currency='BTC'):
try:
index_result = api.public_get_index_get(currency)['result'][currency]
return index_result
except dbitApi.exceptions.ApiException as e:
print(e)
#logger.exception('Exception when calling MarketDataApi->public_get_instruments_get!')
exit()
def get_instruments_with_expiry(expiry, currency='BTC', kind='option', expired='false'):
try:
instrument_result = api.public_get_instruments_get(currency, kind=kind, expired=expired)['result']
return [instrument['instrument_name'] for instrument in instrument_result if expiry in instrument['instrument_name']]
except dbitApi.exceptions.ApiException as e:
print(e)
#logger.exception('Exception when calling MarketDataApi->public_get_instruments_get!')
exit()
def get_ticker(instrument):
try:
instrument_result = api.public_ticker_get(instrument)['result']
return instrument_result
except dbitApi.exceptions.ApiException as e:
print(e)
#logger.exception('Exception when calling MarketDataApi->public_get_instruments_get!')
exit()
def get_bids_asks(near_list, next_list):
near_calls = dict()
near_puts = dict()
next_calls = dict()
next_puts = dict()
for instrument in near_list:
data = get_ticker(instrument)
best_bid, best_ask = data['best_bid_price'], data['best_ask_price']
strike, cp = int(instrument.split('-')[2]), instrument.split('-')[3]
if cp == 'C':
near_calls[strike] = {'best_bid': best_bid, 'best_ask': best_ask}
elif cp == 'P':
near_puts[strike] = {'best_bid': best_bid, 'best_ask': best_ask}
else:
print(f'Error {instrument}')
for instrument in next_list:
data = get_ticker(instrument)
best_bid, best_ask = data['best_bid_price'], data['best_ask_price']
strike, cp = int(instrument.split('-')[2]), instrument.split('-')[3]
if cp == 'C':
next_calls[strike] = {'best_bid': best_bid, 'best_ask': best_ask}
elif cp == 'P':
next_puts[strike] = {'best_bid': best_bid, 'best_ask': best_ask}
else:
print(f'Error {instrument}')
near_calls_df = pd.DataFrame.from_dict(near_calls, orient='index').sort_index().replace(0, np.nan)
near_puts_df = pd.DataFrame.from_dict(near_puts, orient='index').sort_index().replace(0, np.nan)
next_calls_df = pd.DataFrame.from_dict(next_calls, orient='index').sort_index().replace(0, np.nan)
next_puts_df = pd.DataFrame.from_dict(next_puts, orient='index').sort_index().replace(0, np.nan)
return near_calls_df, near_puts_df, next_calls_df, next_puts_df
def calculate_indices(time, near_datetime, next_datetime, const_mature_days, R, near_calls_df, near_puts_df, next_calls_df, next_puts_df):
# Compute strikes with min call/put price difference
near_prices = pd.DataFrame(index=near_calls_df.index)
near_prices['call_price'] = (near_calls_df['best_bid'] + near_calls_df['best_ask']) / 2
near_prices['put_price'] = (near_puts_df['best_bid'] + near_puts_df['best_ask']) / 2
near_prices['abs_diff'] = abs(near_prices['call_price'] - near_prices['put_price'])
min_near_strike = near_prices['abs_diff'].idxmin()
min_near_diff = near_prices.loc[min_near_strike].abs_diff
next_prices =
|
pd.DataFrame(index=next_calls_df.index)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def generate_data_ajive_fig2(seed=None, all_components=False):
"""
Samples the data from AJIVE figure 2. Note here we use rows as observations
i.e. data matrices are n x d where n = # observations.
Parameters
----------
seed: None, int
Random seed to generate data.
all_components: bool
Whether or not to return all components or just the data
if all_components is True then
X_obs, X_joint, X_indiv, X_noise, Y_obs, Y_joint, Y_indiv, Y_noise =
generate_data_ajive_fig2(all_components=True)
otherwise
X_obs, Y_obs = generate_data_ajive_fig2(all_components=False)
"""
# TODO: return ndarray instead of matrix
if seed:
np.random.seed(seed)
# Sample X data
X_joint = np.bmat([[np.ones((50, 50))],
[-1*np.ones((50, 50))]])
X_joint = 5000 * np.bmat([X_joint, np.zeros((100, 50))])
X_indiv = 5000 * np.bmat([[-1 * np.ones((25, 100))],
[np.ones((25, 100))],
[-1 * np.ones((25, 100))],
[np.ones((25, 100))]])
X_noise = 5000 * np.random.normal(loc=0, scale=1, size=(100, 100))
X_obs = X_joint + X_indiv + X_noise
# Sample Y data
Y_joint = np.bmat([[-1 * np.ones((50, 2000))],
[np.ones((50, 2000))]])
Y_joint = np.bmat([np.zeros((100, 8000)), Y_joint])
Y_indiv_t = np.bmat([[np.ones((20, 5000))],
[-1 * np.ones((20, 5000))],
[np.zeros((20, 5000))],
[np.ones((20, 5000))],
[-1 * np.ones((20, 5000))]])
Y_indiv_b = np.bmat([[np.ones((25, 5000))],
[-1 * np.ones((50, 5000))],
[np.ones((25, 5000))]])
Y_indiv = np.bmat([Y_indiv_t, Y_indiv_b])
Y_noise = np.random.normal(loc=0, scale=1, size=(100, 10000))
Y_obs = Y_joint + Y_indiv + Y_noise
if all_components:
return X_obs, X_joint, X_indiv, X_noise, Y_obs, Y_joint, Y_indiv, Y_noise
else:
return np.array(X_obs), np.array(Y_obs)
def np_matrix_to_pd_dataframe(mat):
df =
|
pd.DataFrame(mat)
|
pandas.DataFrame
|
#Copyright 2020 <NAME>
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#First part of the code:
# reading in EPIC data (locations with time stamps from a range of source csv files.
# collating the locations into a timeline for each patient and then creating the transfers file
# transfers file contains all the transfers for each patient in the data set with demographics attached.
#import the necessary libraries
import pandas as pd
from datetime import datetime
import numpy as np
from collections import deque, namedtuple
#separate out time and date from EPIC time stamp
def get_separate_date_time(datetimeentry):
print(datetimeentry)
if type(datetimeentry) == float:
return datetime.max
else:
#this returns the date in a format where the hours and days can be accessed eg d.year or d.minute
separate_date_time = datetime.strptime(datetimeentry,"%Y-%m-%d %H:%M:%S")
return separate_date_time
#function to remove "ADD" in our cae representing Addenbrookes hospital. This can be adjusted to remove whichever pieces of location name are not needed
def simplify_theatre_entries(df: pd.DataFrame):
df_nonan = df[df['adt_department_name'].notna()]
theatre_rows = df_nonan[df_nonan['adt_department_name'].str.contains('THEATRE')]
df_nonan.loc[theatre_rows.index, 'adt_department_name'] = 'THEATRE'
return df_nonan
#reading in data: Here the files are called ADM_POINT_aug.csv, SURGERIES_aug.csv, ENC_POINT_aug.csv, adm_info (demographics)
#each one contains information from a different part of the EPIC database
#admpoint contains the transfers of all the patients between wards
admpoint = pd.read_csv("ADM_POINT_aug.csv")
# only keep the columns actually needed
admpoint = admpoint[['STUDY_SUBJECT_DIGEST', 'in_dttm', 'out_dttm', 'adt_department_name']]
#add on a column indicating the origin of an entry in this case adm point for later when joining on information from other files
s_length = len(admpoint['in_dttm'])
admpoint['data_origin'] = np.repeat('adm', s_length, axis=0)
# Rename the 'STUDY_SUBJECT_DIGEST' column to 'ptid' to signify patient ID. This is needed for later joining the files
admpoint.rename(index=str, columns={'STUDY_SUBJECT_DIGEST': 'ptid'}, inplace=True)
#convert the date and time to a datetime object
admpoint['in_dttm'] = pd.to_datetime(admpoint['in_dttm'])
admpoint['out_dttm'] = pd.to_datetime(admpoint['out_dttm'])
#remove excessive information and nonsense entries
amdpoint = simplify_theatre_entries(admpoint)
#surgeriesinfo contains details about the surgery
surgeriesinfo =
|
pd.read_csv("SURGERIES_aug.csv")
|
pandas.read_csv
|
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import pandas as pd
import os
import h5py
from shapely.geometry import point,polygon
import icepyx as ipd
from datetime import date
from dateutil.relativedelta import relativedelta
from mpl_toolkits.axes_grid1 import make_axes_locatable
from playsound import playsound
def get_data(bbox,date_range,path) :
try:
os.mkdir(path+'/'+date_range[0]+'--'+date_range[1])
except:
None
path = path+'/'+date_range[0]+'--'+date_range[1]
#creating the icepyx object
region = ipd.Query('ATL06',bbox,date_range)
print(region.avail_granules())
region.granules.avail
#logging into earthdata
earthdata_uid = input("Enter your Earthdata username:")
email = input("Enter your Eathdata email:")
region.earthdata_login(earthdata_uid,email)
#creating a default variable list
region.order_vars.append(defaults=True)
#print(region.order_vars.wanted,sep='/n')
region.order_vars.remove(all=True)
#modifying the default variable list
#print(region.order_vars.wanted)
region.order_vars.append(var_list=['latitude'])
region.order_vars.append(var_list=['longitude'])
region.order_vars.append(var_list=['h_li'])
region.order_vars.append(var_list=['x_atc'])
region.order_vars.append(var_list=['atl06_quality_summary'])
print("The requested data is:")
print(region.order_vars.wanted)
region.subsetparams(Coverage=region.order_vars.wanted)
region.reqparams['page_size']=int(input("Enter desired number of granules per order:"))
#ordering data
email=input("Do you want an email containing information of your order requests(y/n)")
email=True if email=='y' else False
region.order_granules(email=email)
#downloading data
region.download_granules(path)
def data_to_csv(path_in):
group = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r']
try:
os.mkdir(path_in+'/CSV')
except:
None
path_out = path_in+'/CSV'
a=os.listdir(path_in)
try:
a.remove('.ipynb_checkpoints')
except:
None
for g in group:
beam = pd.DataFrame()
beam['lat']=[]
beam['lon']=[]
beam['h_li']=[]
beam['x_atc']=[]
beam['q_flag']=[]
for fname in a:
df = pd.DataFrame()
fname = path_in+'/'+fname
try:
with h5py.File(fname,'r') as f:
try:
df['lat'] = f['/'+g+'/land_ice_segments/latitude'][:]
df['lon'] = f['/'+g+'/land_ice_segments/longitude'][:]
df['h_li'] = f['/'+g+'/land_ice_segments/h_li'][:]
df['x_atc'] = f['/'+g+'/land_ice_segments/ground_track/x_atc'][:]
df['q_flag'] = f['/'+g+'/land_ice_segments/atl06_quality_summary'][:]
beam=beam.append(df,ignore_index=True)
except:
print(fname+" has no relevant data")
continue
except:
print(fname+" is not a hdf5 file.")
continue
beam=beam[beam['h_li']< 8611]
beam.to_csv(path_out+'/'+g+'.csv')
def h_li_plot(region,end_time):
year, month, day = map(int, end_time.split('-'))
start_time = date(year, month, day)+relativedelta(months=-3)
start_time = str(start_time)
print(start_time)
date_range=[start_time,end_time]
if region in ['Karakoram','West Himalaya','East Himalaya','Central Himalaya']:
#Data download
try:
os.mkdir(os.getcwd().rsplit('/package')+'/'+region)
except:
None
try:
os.mkdir(os.getcwd().rsplit('/package')+'/'+region+'/data')
except:
None
basemap=gpd.read_file(os.getcwd().rsplit('/package')+'/'+region+'/shapefile/'+region+'.shp')
if (os.path.isdir(os.getcwd().rsplit('/package')+'/'+region+'/data/'+date_range[0]+'--'+date_range[1])==False or len(os.listdir(os.getcwd().rsplit('/package')+'/'+region+'/data/'+date_range[0]+'--'+date_range[1]))<=1):
print("Downloding data")
path=os.getcwd().rsplit('/package')+'/'+region+'/data'
get_data(fname,date_range,path)
data_to_csv(path)
else:
print("Data aldready exists")
path=os.getcwd().rsplit('/package')+'/'+region+'/data/'+date_range[0]+'--'+date_range[1]
fname=os.getcwd().rsplit('/package')+'/'+region+'/shpfile/'+region+'.shp'
df1=pd.read_csv(path+'/CSV/gt1l.csv')
df2=pd.read_csv(path+'/CSV/gt2l.csv')
df3=
|
pd.read_csv(path+'/CSV/gt3l.csv')
|
pandas.read_csv
|
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import pandas as pd
from os.path import join
import os
import matplotlib as mpl
from scseirx import analysis_functions as af
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1 import make_axes_locatable
school_types = ['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary']
def q25(x):
return x.quantile(0.25)
def q75(x):
return x.quantile(0.75)
def hex_to_rgb(value):
'''
Converts hex to rgb colours
value: string of 6 characters representing a hex colour.
Returns: list length 3 of RGB values'''
value = value.strip("#") # removes hash symbol if present
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rgb_to_dec(value):
'''
Converts rgb to decimal colours (i.e. divides each value by 256)
value: list (length 3) of RGB values
Returns: list (length 3) of decimal values'''
return [v/256 for v in value]
def get_continuous_cmap(hex_list, float_list=None):
'''
Creates and returns a color map that can be used in heat map figures. If
float_list is not provided, colour map graduates linearly between each color
in hex_list. If float_list is provided, each color in hex_list is mapped to
the respective location in float_list.
Parameters
----------
hex_list: list
List of hex code strings
float_list: list
List of floats between 0 and 1, same length as hex_list. Must start with
0 and end with 1.
Returns
----------
colour map
'''
rgb_list = [rgb_to_dec(hex_to_rgb(i)) for i in hex_list]
if float_list:
pass
else:
float_list = list(np.linspace(0,1,len(rgb_list)))
cdict = dict()
for num, col in enumerate(['red', 'green', 'blue']):
col_list = [[float_list[i], rgb_list[i][num], rgb_list[i][num]] for i in range(len(float_list))]
cdict[col] = col_list
cmp = mpl.colors.LinearSegmentedColormap('my_cmp', segmentdata=cdict, N=256)
return cmp
# set the colormap and centre the colorbar
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side
from a prescribed midpoint value) e.g. im=ax1.imshow(array,
norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100)).
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
def get_data(stype, src_path, vaccinations=False):
'''
Convenience function to read all ensembles from different measures
of a given school type and return one single data frame
'''
data = pd.DataFrame()
stype_path = join(src_path, stype)
files = os.listdir(stype_path)
for f in files:
params, agents, half = get_measures(f.strip('.csv'),
vaccinations=vaccinations)
if vaccinations:
params['student_test_rate'] = 1
params['teacher_test_rate'] = 1
params['mask_efficiency_exhale'] = 0.7
params['mask_efficiency_inhale'] = 0.5
params['base_transmission_risk_multiplier'] = 1.0
ensmbl = pd.read_csv(join(stype_path, f))
try:
ensmbl = ensmbl.drop(columns=['Unnamed: 0'])
except KeyError:
pass
ensmbl['preventive_test_type'] = params['preventive_test_type']
ensmbl['index_case'] = params['index_case']
ensmbl['transmission_risk_ventilation_modifier'] = \
params['transmission_risk_ventilation_modifier']
if ('class_size_reduction' in params.keys()) and not\
('half_classes' in params.keys()):
if params['class_size_reduction'] > 0:
params['half_classes'] = True
ensmbl['half_classes'] = True
else:
params['half_classes'] = False
ensmbl['half_classes'] = False
if ('half_classes' in params.keys()) and not\
('class_size_reduction' in params.keys()):
if params['half_classes']:
params['class_size_reduction'] = 0.5
ensmbl['class_size_reduction'] = 0.5
else:
params['class_size_reduction'] = 0.0
ensmbl['class_size_reduction'] = 0.0
ensmbl['half_classes'] = params['half_classes']
ensmbl['class_size_reduction'] = params['class_size_reduction']
ensmbl['student_testing_rate'] = params['student_test_rate']
ensmbl['teacher_testing_rate'] = params['teacher_test_rate']
ensmbl['mask_efficiency_inhale'] = params['mask_efficiency_inhale']
ensmbl['mask_efficiency_exhale'] = params['mask_efficiency_exhale']
ensmbl['base_transmission_risk_multiplier'] = \
params['base_transmission_risk_multiplier']
ensmbl['student_mask'] = agents['student']['mask']
ensmbl['teacher_mask'] = agents['teacher']['mask']
ensmbl['student_screening_interval'] = agents['student']\
['screening_interval']
ensmbl['teacher_screening_interval'] = agents['teacher']\
['screening_interval']
ensmbl['teacher_vaccination_ratio'] = agents['teacher']\
['vaccination_ratio']
ensmbl['student_vaccination_ratio'] = agents['student']\
['vaccination_ratio']
ensmbl['family_member_vaccination_ratio'] = agents['family_member']\
['vaccination_ratio']
data = pd.concat([data, ensmbl])
data = data.reset_index(drop=True)
data['teacher_screening_interval'] = data['teacher_screening_interval']\
.replace({None:'never'})
data['student_screening_interval'] = data['student_screening_interval']\
.replace({None:'never'})
return data
def get_measures(measure_string, vaccinations=False):
'''
Convenience function to get the individual measures given a string
(filename) of measures.
'''
agents = {
'student':{
'screening_interval': None,
'index_probability': 0,
'mask':False},
'teacher':{
'screening_interval': None,
'index_probability': 0,
'mask':False},
'family_member':{
'screening_interval': None,
'index_probability': 0,
'mask':False}
}
turnovers = {0:'same', 1:'one', 2:'two', 3:'three'}
bmap = {'T':True, 'F':False}
interval_map = {'0':0, '3':3, '7':7, '14':14, 'None':None}
index_map = {'s':'student', 't':'teacher'}
stype, _ = measure_string.split('_test')
rest = measure_string.split(stype + '_')[1]
if vaccinations:
ttpype, turnover, index, tf, sf, tmask, smask, half, vent, tvacc,\
svacc = rest.split('_')
fvacc = 'fvacc-0.6'
tmp = [ttpype, turnover, index, tf, sf, tmask, smask, half, vent,\
tvacc, svacc, fvacc]
else:
ttpype, turnover, index, tf, sf, tmask, smask, vent, stestrate, \
ttestrate, trisk, meffexh, meffinh, csizered, fratio, svacc, tvacc, \
fvacc = rest.split('_')
tmp = [ttpype, turnover, index, tf, sf, tmask, smask, vent, stestrate,\
ttestrate, trisk, meffexh, meffinh, csizered, fratio, svacc, tvacc,\
fvacc]
tmp = [m.split('-') for m in tmp]
params = {}
half = False
for m in tmp:
if len(m) == 1:
pass
elif m[0] == 'test':
params['preventive_test_type'] = m[1]
elif m[0] == 'turnover':
params['turnover'] = int(m[1])
elif m[0] == 'index':
params['index_case'] = index_map[m[1]]
elif m[0] == 'tf':
agents['teacher']['screening_interval'] = interval_map[m[1]]
elif m[0] == 'sf':
agents['student']['screening_interval'] = interval_map[m[1]]
elif m[0] == 'tmask':
agents['teacher']['mask'] = bmap[m[1]]
elif m[0] == 'smask':
agents['student']['mask'] = bmap[m[1]]
elif m[0] == 'half':
params['half_classes'] = bmap[m[1]]
elif m[0] == 'vent':
params['transmission_risk_ventilation_modifier'] = float(m[1])
elif m[0] == 'csizered':
params['class_size_reduction'] = float(m[1])
elif m[0] == 'stestrate':
params['student_test_rate'] = float(m[1])
elif m[0] == 'ttestrate':
params['teacher_test_rate'] = float(m[1])
elif m[0] == 'fratio':
params['added_friendship_contacts'] = float(m[1])
elif m[0] == 'meffexh':
params['mask_efficiency_exhale'] = float(m[1])
elif m[0] == 'meffinh':
params['mask_efficiency_inhale'] = float(m[1])
elif m[0] == 'trisk':
params['base_transmission_risk_multiplier'] = float(m[1])
elif m[0] == 'tvacc':
agents['teacher']['vaccination_ratio'] = float(m[1])
elif m[0] == 'svacc':
agents['student']['vaccination_ratio'] = float(m[1])
elif m[0] == 'fvacc':
agents['family_member']['vaccination_ratio'] = float(m[1])
elif m[0] == 'atd':
params['age_transmission_discount'] = float(m[1])
elif m[0] == 'cw':
params['contact_weight'] = float(m[1])
else:
print('unknown measure type ', m[0])
params['preventive_test_type'] = '{}_day_{}'\
.format(turnovers[params['turnover']], params['preventive_test_type'])
return params, agents, half
def get_baseline_data(src_path,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary']):
baseline_data = pd.DataFrame()
for stype in school_types:
tmp = pd.read_csv(join(src_path, '{}_observables.csv'.format(stype)))
tmp['school_type'] = stype
baseline_data = pd.concat([baseline_data, tmp])
baseline_data['test_sensitivity'] = 1.0
baseline_data['student_testing_rate'] = 1.0
baseline_data['teacher_testing_rate'] = 1.0
baseline_data['mask_efficiency_inhale'] = 0.7
baseline_data['mask_efficiency_exhale'] = 0.5
baseline_data['base_transmission_risk_multiplier'] = 1.0
baseline_data['friendship_ratio'] = 0.0
baseline_data['student_vaccination_ratio'] = 0.0
baseline_data['teacher_vaccination_ratio'] = 0.0
baseline_data['family_member_vaccination_ratio'] = 0.0
baseline_data['class_size_reduction'] = 0
baseline_data.loc[baseline_data[baseline_data['half_classes'] == True].index,
'class_size_reduction'] = 0.5
baseline_data = baseline_data.drop(columns=['Unnamed: 0'])
baseline_data = baseline_data.reset_index(drop=True)
baseline_data['student_screen_interval'] = \
baseline_data['student_screen_interval'].replace({np.nan:'never'})
baseline_data['teacher_screen_interval'] = \
baseline_data['teacher_screen_interval'].replace({np.nan:'never'})
return baseline_data
def get_test_sensitivity_data(src_path, params, baseline_data,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
test_sensitivity_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, ttype = \
screening_params
turnover, _, test = ttype.split('_')
sensitivity = float(test.split('antigen')[1])
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == ttype) &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod)]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'\
.format(screening_params))
row = {'school_type':stype,
'test_type':test,
'turnover':turnover,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':sensitivity,
'class_size_reduction':class_size_reduction,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
test_sensitivity_data = test_sensitivity_data.append(row,
ignore_index=True)
test_sensitivity_data.to_csv(join(src_path, 'test_sensitivity_observables.csv'),
index=False)
# combine the sensitivity analysis data with the baseline data
# (only relevant columns)
baseline_chunk = baseline_data[\
(baseline_data['test_type'] == 'antigen') &\
(baseline_data['turnover'] == 0) &\
(baseline_data['student_screen_interval'] == s_screen_interval) &\
(baseline_data['teacher_screen_interval'] == t_screen_interval) &\
(baseline_data['student_mask'] == student_mask) &\
(baseline_data['teacher_mask'] == teacher_mask) &\
(baseline_data['class_size_reduction'] == class_size_reduction) &\
(baseline_data['ventilation_modification'] == vent_mod)]
test_sensitivity_data = pd.concat([test_sensitivity_data, \
baseline_chunk[test_sensitivity_data.columns].copy()])
return test_sensitivity_data
def get_testing_rate_data(src_path, params, baseline_data,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
testing_rate_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, s_testing_rate, \
t_testing_rate = screening_params
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod) &\
(stype_data['student_testing_rate'] == s_testing_rate) &\
(stype_data['teacher_testing_rate'] == t_testing_rate)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':class_size_reduction,
'student_testing_rate':s_testing_rate,
'teacher_testing_rate':t_testing_rate,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
testing_rate_data = testing_rate_data.append(row, ignore_index=True)
testing_rate_data.to_csv(join(src_path, 'testing_rate_data_observables.csv'),
index=False)
# combine the sensitivity analysis data with the baseline data
# (only relevant columns)
baseline_chunk = baseline_data[\
(baseline_data['test_type'] == 'antigen') &\
(baseline_data['turnover'] == 0) &\
(baseline_data['student_screen_interval'] == s_screen_interval) &\
(baseline_data['teacher_screen_interval'] == t_screen_interval) &\
(baseline_data['student_mask'] == student_mask) &\
(baseline_data['teacher_mask'] == teacher_mask) &\
(baseline_data['class_size_reduction'] == class_size_reduction) &\
(baseline_data['ventilation_modification'] == vent_mod)]
testing_rate_data = pd.concat([testing_rate_data, \
baseline_chunk[testing_rate_data.columns].copy()])
return testing_rate_data
def get_class_size_reduction_data(src_path, params,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
class_size_reduction_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, vent_mod, class_size_reduction = screening_params
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':class_size_reduction,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
class_size_reduction_data = \
class_size_reduction_data.append(row, ignore_index=True)
class_size_reduction_data.to_csv(join(src_path.split('/ensembles')[0],
'class_size_reduction_observables.csv'), index=False)
return class_size_reduction_data
def get_ventilation_efficiency_data(src_path, params, baseline_data,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
ventilation_efficiency_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod = screening_params
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':class_size_reduction,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
ventilation_efficiency_data = \
ventilation_efficiency_data.append(row, ignore_index=True)
ventilation_efficiency_data.to_csv(join(src_path.split('/ensembles')[0],
'ventilation_efficiency_observables.csv'), index=False)
# combine the sensitivity analysis data with the baseline data
# (only relevant columns)
baseline_chunk = baseline_data[\
(baseline_data['test_type'] == 'antigen') &\
(baseline_data['turnover'] == 0) &\
(baseline_data['student_screen_interval'] == s_screen_interval) &\
(baseline_data['teacher_screen_interval'] == t_screen_interval) &\
(baseline_data['student_mask'] == student_mask) &\
(baseline_data['teacher_mask'] == teacher_mask) &\
(baseline_data['class_size_reduction'] == class_size_reduction) &\
(baseline_data['ventilation_modification'] == 0.36)]
ventilation_efficiency_data = pd.concat([ventilation_efficiency_data, \
baseline_chunk[ventilation_efficiency_data.columns].copy()])
return ventilation_efficiency_data
def get_mask_efficiency_data(src_path, params,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
mask_efficiency_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, m_efficiency_exhale, \
m_efficiency_inhale = screening_params
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod) &\
(stype_data['mask_efficiency_inhale'] == m_efficiency_inhale) &\
(stype_data['mask_efficiency_exhale'] == m_efficiency_exhale)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':0.0,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':m_efficiency_exhale,
'mask_efficiency_exhale':m_efficiency_inhale,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
mask_efficiency_data = \
mask_efficiency_data.append(row, ignore_index=True)
mask_efficiency_data.to_csv(join(src_path.split('/ensembles')[0],
'mask_efficiency_observables.csv'), index=False)
return mask_efficiency_data
def get_added_friendship_contacts_data(src_path, params, baseline_data,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
added_friendship_contacts_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, friendship_ratio = screening_params
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod) &\
(stype_data['friendship_ratio'] == friendship_ratio)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':class_size_reduction,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':friendship_ratio,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
added_friendship_contacts_data = \
added_friendship_contacts_data.append(row, ignore_index=True)
added_friendship_contacts_data.to_csv(join(src_path.split('/ensembles')[0],
'added_friendship_contacts_observables.csv'), index=False)
baseline_chunk = baseline_data[\
(baseline_data['test_type'] == 'antigen') &\
(baseline_data['turnover'] == 0) &\
(baseline_data['student_screen_interval'] == s_screen_interval) &\
(baseline_data['teacher_screen_interval'] == t_screen_interval) &\
(baseline_data['student_mask'] == student_mask) &\
(baseline_data['teacher_mask'] == teacher_mask) &\
(baseline_data['class_size_reduction'] == class_size_reduction) &\
(baseline_data['ventilation_modification'] == 1.0) &\
(baseline_data['friendship_ratio'] == 0.0)]
added_friendship_contacts_data = pd.concat([added_friendship_contacts_data, \
baseline_chunk[added_friendship_contacts_data.columns].copy()])
return added_friendship_contacts_data
def get_worst_case_data(src_path, params,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
worst_case_data = pd.DataFrame()
for stype in school_types:
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, m_efficiency_exhale, \
m_efficiency_inhale, s_test_rate, t_test_rate, ttype, friendship_ratio \
= screening_params
turnover, _, test = ttype.split('_')
sensitivity = float(test.split('antigen')[1])
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == ttype) &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod)]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
half = False
if class_size_reduction > 0:
half = True
row = {'school_type':stype,
'test_type':test,
'turnover':turnover,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':sensitivity,
'class_size_reduction':class_size_reduction,
'half_classes':half,
'student_testing_rate':s_test_rate,
'teacher_testing_rate':t_test_rate,
'mask_efficiency_inhale':m_efficiency_inhale,
'mask_efficiency_exhale':m_efficiency_exhale,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':friendship_ratio,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
worst_case_data = \
worst_case_data.append(row, ignore_index=True)
worst_case_data['scenario'] = 'conservative'
return worst_case_data
def get_worst_case_and_vaccinations_data(src_path, params,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
worst_case_and_vaccinations_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on: 15:54:55 05-Jan-2020
Author: <NAME>
This code is licensed under the MIT license
"""
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from bokeh.models.widgets import (Div, FileInput, Panel)
from bokeh.plotting import Figure
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, LinearAxis, Range1d
import pandas as pd
from io import BytesIO
import base64
# %%---------------------------------------------------------------------------
# ReadDisplay
# -----------------------------------------------------------------------------
class ReadDisplay():
"""Reads data saved to file into the system and displays it."""
# %%
def __init__(self, controller):
"""Method sets up object. First part of two-part initialization."""
self.controller = controller
# Header for section to get file
self.file_header =\
Div(text="""<span style='font-weight:bold'>"""
"""Choose the file to display</span>""",
sizing_mode='stretch_width')
# Selects the data file to read into the system
self.select_file = FileInput(accept=".csv",
sizing_mode='stretch_width')
# Shows summary and status for data read in.
self.status = Div(text="""No file connected""",
sizing_mode='stretch_width')
# Chart to show temperature and/or humidity.
self.temphumidity = Figure(x_axis_type='datetime',
title="Humidity & temperature by datetime",
x_axis_label='Datetime',
y_axis_label='Temperature (C)')
df = pd.DataFrame(
{'Timestamp': [
|
pd.to_datetime('4/12/2016 8:15:33 AM')
|
pandas.to_datetime
|
import getpass
import pandas as pd
from PySide2 import QtGui
from PySide2.QtWidgets import QMainWindow, QMessageBox
from components.mensagens import Mensagens
from dao.relatorio_dao import RelatorioDao
from view.ui_tela_relatorio_chamados import Ui_RelatorioChamado
class TelaRelatorioChamado(QMainWindow, Ui_RelatorioChamado):
"""Classe da tela de relatório de chamados.
Esta classe tem por finalidade gerar vários relatórios conforme necessidade do usuário.
"""
def __init__(self):
super(TelaRelatorioChamado, self).__init__()
self.setupUi(self)
self.setWindowTitle("Relatório de Chamados")
self.setFixedSize(400, 466)
self.popula_combo_solucao()
self.mensagem = Mensagens()
self.btn_cancelar.clicked.connect(self.close)
self.btn_gerar_solucao.clicked.connect(self.gerar_relatorio_solucao)
"""Função que chamado o método de gerar relatório de soluções."""
self.btn_gerar_data.clicked.connect(self.gerar_relatorio_chamado_data)
"""Função que chamado o método de gerar relatório por Data."""
self.btn_gerar_tipo.clicked.connect(self.gerar_relatorio_tipo_chamado)
"""Função que chamado o método de gerar relatório por tipo."""
self.btn_gerar_status.clicked.connect(self.gerar_relatorio_status_chamado)
"""Função que chamado o método de gerar relatório por Status."""
self.btn_gerar_relatorio_padrao.clicked.connect(self.gerar_relatorio_padrao)
"""Função que chamado o método de gerar relatório padrão."""
def popula_combo_solucao(self):
"""Popular combo solução
Popula a combo de solução com o nome das soluções cadastradas.
:return: Lista de Soluções.
"""
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.consulta_nome_solucao()
for i in resultado:
self.combo_solucao.addItem(str(i[0]))
def gerar_relatorio_chamado_data(self):
"""Gerar relatório por data.
Gera um relatório tendo como parametro a data e salva em .xlsx.
:return: Arquivo .xlsx
"""
user_windows = getpass.getuser()
if self.txt_data.text() == "":
self.mensagem.mensagem_campo_vazio('DATA')
else:
data = self.txt_data.text()
if self.radio_numero_chamado.isChecked():
try:
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.relatorio_chamado_data_ordenado_por_numero(data)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Relatório de Chamados")
msg.setText('Não há dados para gerar este relatório.')
msg.exec_()
else:
dados =
|
pd.DataFrame(resultado)
|
pandas.DataFrame
|
#!/home/bryanfeeney/anaconda3/bin/python3.6
#
# Simple script that uses the Microsoft Light Gradient-Boosted Machine-Learnign
# toolkit to make predictions *separately* for each value.
#
from datetime import date, timedelta, datetime
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import sys
import json
import psycopg2
FutureDaysToCalculate = 16
WeeksOfHistoryForMinTrainigData = 20
WeeksOfHistoryForFeature = 7
WeeksOfHistoryForFeatureOnValidate = 3
TrainingTimePeriodCount = 6
def load_data_csv (cumul_sales_path, cumul_sales_query_path, items_path, stores_path, query_start_date=None):
"""
Loads four datasets from the file-system in CSV format:
cumul_sale_path is the cumulative sales data, should be the last 12 months
cumul_sale_query_path enumerates the things to predict
items is item data
stores is store data
query_start_date if this is None, it's inferred from the first row of the cumul_sales_query_path documents. If
this is not None, then cumul_sales_query rows before this date are removed.
"""
cumul_sales = pd.read_csv(
cumul_sales_path,
usecols=[1, 2, 3, 4, 5],
dtype={'onpromotion': bool},
converters={'unit_sales': lambda u: np.log1p(float(u)) if float(u) > 0 else 0},
parse_dates=["date"]
)
if cumul_sales_query_path is not None:
cumul_sales_query = pd.read_csv(
cumul_sales_query_path,
usecols=[0, 1, 2, 3, 4],
dtype={'onpromotion': bool},
parse_dates=["date"],
)
if query_start_date is None:
query_start_date = str(cumul_sales_query.iloc[0,1]).split(" ")[0]
else:
cumul_sales_query = cumul_sales_query[cumul_sales_query.date >= query_start_date]
cumul_sales_query = cumul_sales_query.set_index(
['store_nbr', 'item_nbr', 'date']
)
items = pd.read_csv(
items_tbl,
).set_index("item_nbr")
stores = pd.read_csv(
stores_tbl
).set_index("store_nbr")
return cumul_sales, cumul_sales_query, query_start_date, items, stores
def load_data_sql (cumul_sales_path, cumul_sales_query_path, items_path, stores_path, query_start_date=None):
"""
Loads three datasets from the file-system in CSV format:
cumul_sale_path is the cumulative sales data, should be the last 12 months
cumul_sale_query_path enumerates the things to predict
items is item data
stores is store data
"""
with open('db.json') as f:
conf = json.load(f)
print (str(conf))
conn_str = "host={} dbname={} user={} password={}".format(conf['host'], conf['database'], conf['user'], conf['passw'])
conn = psycopg2.connect(conn_str)
cumul_sales_query = pd.DataFrame()
c = 1
for chunk in pd.read_sql("select * from " + cumul_sales_query_path + " where date > CURRENT_DATE and date < CURRENT_DATE + INTERVAL '16 days' order by date asc", con=conn, chunksize=100000):
print ("Appending chunk " + str(c) + " to future promotions")
c += 1
cumul_sales_query = cumul_sales_query.append(chunk)
cumul_sales_query.date = pd.to_datetime(cumul_sales_query.date)
if query_start_date is None:
query_start_date = str(cumul_sales_query.iloc[0,1]).split(" ")[0]
else:
cumul_sales_query = cumul_sales_query[cumul_sales_query.date >= query_start_date]
cumul_sales_query = cumul_sales_query.set_index(
['store_nbr', 'item_nbr', 'date']
)
print("Future promotions loaded")
cumul_sales =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import sys
import re
import json
import yaml
import pandas as pd
import numpy as np
sys.path.append('../')
from load_paths import load_box_paths
try:
print(Location)
except NameError:
if os.name == "posix":
Location = "NUCLUSTER"
else:
Location = "Local"
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)
class covidModel:
def __init__(self,subgroups, expandModel, observeLevel='primary', add_interventions='baseline',
change_testDelay=False, intervention_config='intervention_emodl_config.yaml',
add_migration=False, fit_params=None,emodl_name=None, git_dir=git_dir):
self.model = 'locale'
self.grpList = subgroups
self.expandModel = expandModel
self.add_migration = add_migration
self.observeLevel = observeLevel
self.add_interventions = add_interventions
self.change_testDelay = change_testDelay
self.intervention_config = intervention_config
self.emodl_name = emodl_name
self.startdate = pd.Timestamp('2020-02-13')
self.emodl_dir = os.path.join(git_dir, 'emodl')
self.fit_param = fit_params # Currenly support single parameter only
def get_configs(key, config_file='intervention_emodl_config.yaml'):
yaml_file = open(os.path.join('./experiment_configs', config_file))
config_dic = yaml.safe_load(yaml_file)
config_dic = config_dic[key]
return config_dic
## For postprocessing that splits by '_', it is easier if EMS are names EMS-1 not EMS_1
## This might change depending on the postprocessing
def sub(x):
xout = re.sub('_', '-', str(x), count=1)
return xout
def DateToTimestep(date, startdate):
datediff = date - startdate
timestep = datediff.days
return timestep
def get_trigger(grp, channel):
grp_nr = grp.replace('EMS_','')
file_path = os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'hospital_capacity_thresholds')
files = os.listdir(file_path)
files = [name for name in files if not 'extra_thresholds' in name]
filedates = [item.replace('capacity_weekday_average_', '') for item in files]
filedates = [item.replace('.csv', '') for item in filedates]
latest_filedate = max([int(x) for x in filedates])
fname = 'capacity_weekday_average_' + str(latest_filedate) + '.csv'
ems_fname = os.path.join(datapath, 'covid_IDPH/Corona virus reports/hospital_capacity_thresholds/', fname)
df = pd.read_csv(ems_fname)
df = df.drop_duplicates()
df = df[df['geography_modeled'] == f'covidregion_{grp_nr}']
df = df[df['overflow_threshold_percent'] == 1]
df['ems'] = df['geography_modeled']
df['ems'] = df['geography_modeled'].replace("covidregion_", "", regex=True)
df = df[['ems', 'resource_type', 'avg_resource_available']]
df = df.drop_duplicates()
## if conflicting numbers, take the lower ones!
dups = df.groupby(["ems", "resource_type"])["avg_resource_available"].nunique()
if int(dups.nunique()) > 1:
print(f'{ems_fname} contains multiple capacity values, selecting the lower ones.')
df = df.loc[df.groupby(["ems", "resource_type"])["avg_resource_available"].idxmax()]
df = df.pivot(index='ems', columns='resource_type', values='avg_resource_available')
df.index.name = 'ems'
df.reset_index(inplace=True)
df = df.rename(columns={ 'hb_availforcovid':'hosp_det',
'hb_availforcovid':'total_hosp_census',
'icu_availforcovid': 'crit_det',
'vent_availforcovid':'ventilators'})
return int(df[channel])
def get_species(self):
state_SE = ['S', 'E']
state_nosymptoms = ['As', 'As_det1', 'P', 'P_det']
state_symptoms = ['Sym', 'Sym_det2', 'Sys', 'Sys_det3']
# state_hospitalized = ['H1', 'H2', 'H3', 'H1_det3', 'H2_det3', 'H3_det3']
state_hospitalized = ['H1', 'H2pre', 'H2post', 'H3', 'H1_det3', 'H2pre_det3', 'H2post_det3', 'H3_det3']
state_critical = ['C2', 'C3', 'C2_det3', 'C3_det3']
state_deaths = ['D3', 'D3_det3']
state_recoveries = ['RAs', 'RSym', 'RH1', 'RC2', 'RAs_det1', 'RSym_det2', 'RH1_det3', 'RC2_det3']
state_testDelay_SymSys = ['Sym_preD', 'Sys_preD']
state_testDelay_AsSymSys = ['As_preD', 'Sym_preD', 'Sym_det2a', 'Sym_det2b', 'Sys_preD', 'Sys_det3a', 'Sys_det3b']
state_variables = state_SE + state_nosymptoms + state_symptoms + state_hospitalized + state_critical + state_deaths + state_recoveries
if self.expandModel == "SymSys" or self.expandModel == "uniform":
state_variables = state_variables + state_testDelay_SymSys
if self.expandModel == "AsSymSys":
state_variables = state_variables + state_testDelay_AsSymSys
if 'vaccine' in self.add_interventions:
state_variables_vaccine = [f'{state}_V' for state in state_variables ]
state_variables = state_variables + state_variables_vaccine
return state_variables
def write_species(self, grp):
state_variables = covidModel.get_species(self)
def write_species_emodl():
grp_suffix = "::{grp}"
grp_suffix2 = "_{grp}"
species_emodl = ""
for state in state_variables:
if state == "S":
species_emodl = species_emodl + f'(species {state}{grp_suffix} @speciesS{grp_suffix2}@)\n'
else:
species_emodl = species_emodl + f'(species {state}{grp_suffix} 0)\n'
return species_emodl
def write_species_str(species_emodl, grp):
grp = str(grp)
species_str = species_emodl.format(grp=grp)
return species_str
species_emodl = write_species_emodl()
species_str = write_species_str(species_emodl, grp)
return species_str
def get_channels(self):
"""Channels to exclude from final list"""
channels_not_observe = ['presymp_det','presymp_cumul','presymp_det_cumul']
"""Define channels to observe """
primary_channels_notdet = ['susceptible','infected','recovered','symp_mild','symp_severe','hosp','crit','deaths']
secondary_channels_notdet = ['exposed','asymp','presymp','detected']
tertiary_channels = ['infectious_undet', 'infectious_det', 'infectious_det_symp', 'infectious_det_AsP']
channels_notdet = primary_channels_notdet
if self.observeLevel != 'primary':
channels_notdet = channels_notdet + secondary_channels_notdet
channels_det = [channel + '_det' for channel in channels_notdet if channel not in ['susceptible', 'exposed','detected']]
channels_cumul = [channel + '_cumul' for channel in channels_notdet + channels_det
if channel not in ['susceptible','exposed', 'recovered', 'deaths', 'recovered_det']]
channels = channels_notdet + channels_det + channels_cumul
if self.observeLevel == 'tertiary':
channels = channels + tertiary_channels
channels = [channel for channel in channels if channel not in channels_not_observe]
channels = list(set(channels))
if 'vaccine' in self.add_interventions:
channels_vaccine = [f'{channel}_V' for channel in channels]
channels = channels + channels_vaccine
return channels
def write_observe(self, grp):
grp = str(grp)
grpout = covidModel.sub(grp)
def write_observe_emodl():
#grp_suffix = "::{grp}"
#grp_suffix2 = "_{grp}"
if 'vaccine' in self.add_interventions:
channels = covidModel.get_channels(self)
channels = channels[int(len(channels) / 2):]
observe_emodl = f"(observe vaccinated_cumul_{grpout} vaccinated_cumul_{grp})\n"
for channel in channels:
if channel == 'crit_V':
channel = 'critical_V'
if channel == 'hosp_V':
channel = 'hospitalized_V'
if channel == "susceptible_V":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} S_V::{grp})\n'
elif channel == "exposed_V":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} E_V::{grp})\n'
elif channel == "deaths_det_V":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} D3_det3_V::{grp})\n'
else:
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} {channel}_{grp})\n'
channels = covidModel.get_channels(self)
channels = channels[:int(len(channels) / 2)]
for channel in channels:
if channel == 'crit':
channel = 'critical'
if channel == 'hosp':
channel = 'hospitalized'
if channel == "susceptible":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} (+ S::{grp} S_V::{grp}))\n'
elif channel == "exposed":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} (+ E::{grp} E_V::{grp}))\n'
elif channel == "deaths_det":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} (+ D3_det3::{grp} D3_det3_V::{grp}))\n'
else:
observe_emodl= observe_emodl + f'(observe {channel}_{grpout} (+ {channel}_{grp} {channel}_V_{grp}))\n'
else:
channels = covidModel.get_channels(self)
observe_emodl = ""
for channel in channels:
if channel == 'crit':
channel = 'critical'
if channel == 'hosp':
channel = 'hospitalized'
if channel == "susceptible":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} S::{grp})\n'
elif channel == "exposed":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} E::{grp})\n'
elif channel == "deaths_det":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} D3_det3::{grp})\n'
else:
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} {channel}_{grp})\n'
"""Observe all state variables over time"""
if self.observeLevel=='all':
state_variables = covidModel.get_species(self)
for state in state_variables:
observe_emodl = observe_emodl + f'(observe {state}_{grp} {state}::{grp})\n'
return observe_emodl
def write_observe_str(observe_emodl, grp):
grp = str(grp)
observe_str = observe_emodl.format(grp=grp)
return observe_str
observe_emodl = write_observe_emodl()
observe_str = write_observe_str(observe_emodl, grp)
return observe_str
def write_functions(self, grp):
grp = str(grp)
func_dic = {'presymp_{grp}': ['P::{grp}', 'P_det::{grp}'],
'hospitalized_{grp}': ['H1::{grp}', 'H2pre::{grp}', 'H2post::{grp}', 'H3::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}'],
'hosp_det_{grp}': ['H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}'],
'critical_{grp}': ['C2::{grp}', 'C3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'crit_det_{grp}': ['C2_det3::{grp}', 'C3_det3::{grp}'],
'deaths_{grp}': ['D3::{grp}', 'D3_det3::{grp}'],
'recovered_{grp}': ['RAs::{grp}', 'RSym::{grp}', 'RH1::{grp}', 'RC2::{grp}', 'RAs_det1::{grp}', 'RSym_det2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}'],
'recovered_det_{grp}': ['RAs_det1::{grp}', 'RSym_det2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}'],
'asymp_cumul_{grp}': ['asymp_{grp}', 'RAs::{grp}', 'RAs_det1::{grp}'],
'asymp_det_cumul_{grp}': ['As_det1::{grp}', 'RAs_det1::{grp}'],
'symp_mild_cumul_{grp}': ['symp_mild_{grp}', 'RSym::{grp}', 'RSym_det2::{grp}'],
'symp_mild_det_cumul_{grp}': ['symp_mild_det_{grp}', 'RSym_det2::{grp}'],
'symp_severe_cumul_{grp}': ['symp_severe_{grp}', 'hospitalized_{grp}', 'critical_{grp}', 'deaths_{grp}', 'RH1::{grp}', 'RC2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}'],
'symp_severe_det_cumul_{grp}': ['symp_severe_det_{grp}', 'hosp_det_{grp}', 'crit_det_{grp}', 'D3_det3::{grp}', ' RH1_det3::{grp}', 'RC2_det3::{grp}'],
'hosp_cumul_{grp}': ['hospitalized_{grp}', 'critical_{grp}', 'deaths_{grp}', 'RH1::{grp}', 'RC2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}'],
'hosp_det_cumul_{grp}': ['H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', ' H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}', 'D3_det3::{grp}', ' RH1_det3::{grp}', ' RC2_det3::{grp}'],
'crit_cumul_{grp}': ['deaths_{grp}', 'critical_{grp}', 'RC2::{grp}', 'RC2_det3::{grp}'],
'crit_det_cumul_{grp}': ['C2_det3::{grp}', 'C3_det3::{grp}', 'D3_det3::{grp}', 'RC2_det3::{grp}'],
'detected_cumul_{grp}': ['As_det1::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', ' H2post_det3::{grp}', ' C2_det3::{grp}', 'C3_det3::{grp}', 'RAs_det1::{grp}', 'RSym_det2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}', 'D3_det3::{grp}'],
'infected_{grp}': ['infectious_det_{grp}', 'infectious_undet_{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infected_det_{grp}': ['infectious_det_{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infected_cumul_{grp}': ['infected_{grp}', 'recovered_{grp}', 'deaths_{grp}'],
'infected_det_cumul_{grp}': ['infected_det_{grp}', 'recovered_det_{grp}', 'D3_det3::{grp}']
}
func_dic_base = {'asymp_{grp}': ['As::{grp}', 'As_det1::{grp}'],
'symp_mild_{grp}': ['Sym::{grp}', 'Sym_det2::{grp}'],
'symp_severe_{grp}': ['Sys::{grp}', 'Sys_det3::{grp}'],
'detected_{grp}': ['As_det1::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', ' H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infectious_undet_{grp}': ['As::{grp}', 'P::{grp}', 'Sym::{grp}', 'Sys::{grp}', 'H1::{grp}', 'H2pre::{grp}', 'H2post::{grp}', ' H3::{grp}', 'C2::{grp}', 'C3::{grp}'],
'infectious_det_{grp}': ['As_det1::{grp}', 'P_det::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}'],
'infectious_det_symp_{grp}': ['Sym_det2::{grp}', 'Sys_det3::{grp}'],
'infectious_det_AsP_{grp}': ['As_det1::{grp}', 'P_det::{grp}']
}
func_dic_SymSys = {'asymp_{grp}': ['As::{grp}', 'As_det1::{grp}'],
'symp_mild_{grp}': ['Sym::{grp}', 'Sym_preD::{grp}', 'Sym_det2::{grp}'],
'symp_mild_det_{grp}': ['Sym_preD::{grp}', 'Sym_det2::{grp}'],
'symp_severe_{grp}': ['Sys::{grp}', 'Sys_preD::{grp}', 'Sys_det3::{grp}'],
'symp_severe_det_{grp}': ['Sys_preD::{grp}', 'Sys_det3::{grp}'],
'detected_{grp}': ['As_det1::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', ' H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infectious_undet_{grp}': ['As::{grp}', 'P::{grp}', 'Sym_preD::{grp}', 'Sym::{grp}', 'Sys_preD::{grp}', 'Sys::{grp}', 'H1::{grp}', 'H2pre::{grp}', ' H2post::{grp}', ' H3::{grp}', 'C2::{grp}', 'C3::{grp}'],
'infectious_det_{grp}': ['As_det1::{grp}', 'P_det::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}'],
'infectious_det_symp_{grp}': ['Sym_det2::{grp}', 'Sys_det3::{grp}'],
'infectious_det_AsP_{grp}': ['As_det1::{grp}', 'P_det::{grp}']
}
func_dic_AsSymSys = {'asymp_{grp}': ['As_preD::{grp}', 'As::{grp}', 'As_det1::{grp}'],
'symp_mild_{grp}': ['Sym::{grp}', 'Sym_preD::{grp}', 'Sym_det2a::{grp}', 'Sym_det2b::{grp}'],
'symp_mild_det_{grp}': ['Sym_preD::{grp}', 'Sym_det2a::{grp}', 'Sym_det2b::{grp}'],
'symp_severe_{grp}': ['Sys::{grp}', 'Sys_preD::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}'],
'symp_severe_det_{grp}': ['Sys_preD::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}'],
'detected_{grp}': ['As_det1::{grp}', 'Sym_det2a::{grp}', 'Sym_det2b::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infectious_undet_{grp}': ['As_preD::{grp}', 'As::{grp}', 'P::{grp}', 'Sym::{grp}', 'Sym_preD::{grp}', 'Sys::{grp}', 'Sys_preD::{grp}', 'H1::{grp}', 'H2pre::{grp}', 'H2post::{grp}', 'H3::{grp}', 'C2::{grp}', 'C3::{grp}'],
'infectious_det_{grp}': ['As_det1::{grp}', 'P_det::{grp}', 'Sym_det2a::{grp}', 'Sym_det2b::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}'],
'infectious_undet_symp_{grp}': ['P::{grp}', 'Sym::{grp}', 'Sym_preD::{grp}', 'Sys::{grp}', 'Sys_preD::{grp}', 'H1::{grp}', 'H2pre::{grp}', 'H2post::{grp}', 'H3::{grp}', 'C2::{grp}', 'C3::{grp}'],
'infectious_undet_As_{grp}': ['As_preD::{grp}', 'As::{grp}'],
'infectious_det_symp_{grp}': ['Sym_det2a::{grp}', 'Sym_det2b::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}'],
'infectious_det_AsP_{grp}': ['As_det1::{grp}', 'P_det::{grp}']
}
func_str = f'(func deaths_det_cumul_{grp} D3_det3::{grp})\n(func asymp_det_{grp} As_det1::{grp})\n'
if self.expandModel == "SymSys" or self.expandModel == "uniform":
func_dic_SymSys.update(func_dic)
func_dic_all = func_dic_SymSys
elif self.expandModel == "AsSymSys":
func_dic_AsSymSys.update(func_dic)
func_dic_all = func_dic_AsSymSys
else:
func_str = func_str + f'(func symp_mild_det_{grp} Sym_det2::{grp})\n' \
f'(func symp_severe_det_{grp} Sys_det3::{grp})\n'
func_dic_base.update(func_dic)
func_dic_all = func_dic_base
if 'vaccine' in self.add_interventions:
vacc_cumul = f'(func vaccinated_cumul_{grp} (+ S_V::{grp} infected_V_{grp} recovered_V_{grp} deaths_V_{grp} ))\n'
func_str_V = func_str.replace(f'_{grp}',f'_V_{grp}')
func_str_V = func_str_V.replace(f'::{grp}',f'_V::{grp}')
func_str = func_str + func_str_V
func_dic_all_V = {}
for key, value in func_dic_all.items():
key_V = key.replace('_{grp}','_V_{grp}')
func_dic_all_V[key_V] = [item.replace('_{grp}','_V_{grp}') if '_{grp}' in item
else item.replace('::{grp}','_V::{grp}') for item in func_dic_all[key]]
func_dic_all.update(func_dic_all_V)
for key in func_dic_all.keys():
func_str = func_str + f"(func {key} (+ {' '.join(func_dic_all[key])}))\n".format(grp=grp)
if 'vaccine' in self.add_interventions:
func_str = func_str + vacc_cumul
return func_str
def write_params(self):
yaml_sampled_param = list(covidModel.get_configs(key ='sampled_parameters', config_file='extendedcobey_200428.yaml').keys())
yaml_sampled_param_str = ''.join([f'(param {param} @{param}@)\n' for param in yaml_sampled_param])
"""calculated parameters"""
param_dic = {'fraction_hospitalized' : '(- 1 (+ fraction_critical fraction_dead))',
'Kr_a' : '(/ 1 recovery_time_asymp)',
'Kr_m' : '(/ 1 recovery_time_mild)',
'Kl' : '(/ (- 1 fraction_symptomatic ) time_to_infectious)',
'Ks' :'(/ fraction_symptomatic time_to_infectious)',
'Ksys' :'(* fraction_severe (/ 1 time_to_symptoms))',
'Ksym' :'(* (- 1 fraction_severe) (/ 1 time_to_symptoms))',
'Km' :'(/ 1 time_to_death)',
'Kc' :'(/ 1 time_to_critical)',
'Kr_hc' :'(/ 1 recovery_time_postcrit)',
'Kr_h' :'(/ 1 recovery_time_hosp)',
'Kr_c' :'(/ 1 recovery_time_crit)'
}
param_dic_base = {'Kh1':'(/ fraction_hospitalized time_to_hospitalization)',
'Kh2':'(/ fraction_critical time_to_hospitalization )',
'Kh3':'(/ fraction_dead time_to_hospitalization)'
}
param_dic_uniform = {'time_D':'@time_to_detection@',
'Ksym_D':'(/ 1 time_D)',
'Ksys_D':'(/ 1 time_D)',
'Kh1':'(/ fraction_hospitalized time_to_hospitalization)',
'Kh2':'(/ fraction_critical time_to_hospitalization )',
'Kh3':'(/ fraction_dead time_to_hospitalization)',
'Kh1_D':'(/ fraction_hospitalized (- time_to_hospitalization time_D))',
'Kh2_D':'(/ fraction_critical (- time_to_hospitalization time_D) )',
'Kh3_D':'(/ fraction_dead (- time_to_hospitalization time_D))',
'Kr_m_D':'(/ 1 (- recovery_time_mild time_D ))'
}
param_dic_SymSys = {'time_D_Sym':'@time_to_detection_Sym@',
'time_D_Sys':'@time_to_detection_Sys@',
'Ksym_D':'(/ 1 time_D_Sym)',
'Ksys_D':'(/ 1 time_D_Sys)',
'Kh1':'(/ fraction_hospitalized time_to_hospitalization)',
'Kh2':'(/ fraction_critical time_to_hospitalization )',
'Kh3':'(/ fraction_dead time_to_hospitalization)',
'Kh1_D':'(/ fraction_hospitalized (- time_to_hospitalization time_D_Sys))',
'Kh2_D':'(/ fraction_critical (- time_to_hospitalization time_D_Sys) )',
'Kh3_D':'(/ fraction_dead (- time_to_hospitalization time_D_Sys))',
'Kr_m_D':'(/ 1 (- recovery_time_mild time_D_Sym ))'
}
param_dic_AsSymSys = {'Kh1':'(/ fraction_hospitalized time_to_hospitalization)',
'Kh2':'(/ fraction_critical time_to_hospitalization )',
'Kh3':'(/ fraction_dead time_to_hospitalization)',
'time_D_Sys':'@time_to_detection_Sys@',
'Ksys_D':'(/ 1 time_D_Sys)',
'Kh1_D':'(/ fraction_hospitalized (- time_to_hospitalization time_D_Sys))',
'Kh2_D':'(/ fraction_critical (- time_to_hospitalization time_D_Sys) )',
'Kh3_D':'(/ fraction_dead (- time_to_hospitalization time_D_Sys))',
'time_D_Sym':'@time_to_detection_Sym@',
'Ksym_D':'(/ 1 time_D_Sym)',
'Kr_m_D':'(/ 1 (- recovery_time_mild time_D_Sym ))',
'time_D_As':'@time_to_detection_As@',
'Kl_D':'(/ 1 time_D_As)',
'Kr_a_D':'(/ 1 (- recovery_time_asymp time_D_As ))'
}
if self.expandModel == "SymSys":
param_dic_expand = param_dic_SymSys
elif self.expandModel == "uniform":
param_dic_expand = param_dic_uniform
elif self.expandModel == "AsSymSys":
param_dic_expand = param_dic_AsSymSys
else:
param_dic_expand = param_dic_base
calculated_params_str = ''.join([f'(param {key} {param_dic[key]})\n' for key in list(param_dic.keys())])
calculated_params_expand_str = ''.join([f'(param {key} {param_dic_expand[key]})\n' for key in list(param_dic_expand.keys())])
params_str = yaml_sampled_param_str + calculated_params_str + calculated_params_expand_str
if 'vaccine' in self.add_interventions:
#custom_param_vacc = ['fraction_symptomatic_V', 'fraction_severe_V']
custom_param_vacc_str = '(param fraction_symptomatic_V (* fraction_symptomatic @reduced_fraction_Sym@))\n' \
'(param fraction_severe_V (* fraction_severe @reduced_fraction_Sys@))\n'
param_symptoms_dic_V = {'KlV ': '(/ (- 1 fraction_symptomatic_V ) time_to_infectious)',
'KsV ': '(/ fraction_symptomatic_V time_to_infectious)',
'KsysV ': '(* fraction_severe_V (/ 1 time_to_symptoms))',
'KsymV ': '(* (- 1 fraction_severe_V ) (/ 1 time_to_symptoms))'
}
param_symptoms_str_V = ''.join([f'(param {key} {param_symptoms_dic_V[key]})\n' for key in list(param_symptoms_dic_V.keys())])
params_str = params_str + custom_param_vacc_str + param_symptoms_str_V
return params_str
def write_migration_param(self):
x1 = range(1, len(self.grpList) + 1)
x2 = range(1, len(self.grpList) + 1)
param_str = ""
for x1_i in x1:
param_str = param_str + "\n"
for x2_i in x2:
# x1_i=1
param_str = param_str + f'(param toEMS_{x1_i}_from_EMS_{x2_i} @toEMS_{x1_i}_from_EMS_{x2_i}@)\n'
return param_str
def write_travel_reaction(grp, travelspeciesList=None):
x1_i = int(grp.split("_")[1])
x2 = list(range(1, 12))
x2 = [i for i in x2 if i != x1_i]
reaction_str = ""
if travelspeciesList == None:
travelspeciesList = ["S", "E", "As", "P"]
for travelspecies in travelspeciesList:
reaction_str = reaction_str + "\n"
for x2_i in x2:
# x1_i=1
reaction_str = reaction_str + f'\n(reaction {travelspecies}_travel_EMS_{x2_i}to{x1_i} ' \
f'({travelspecies}::EMS_{x2_i}) ({travelspecies}::EMS_{x1_i}) ' \
f'(* {travelspecies}::EMS_{x2_i} toEMS_{x1_i}_from_EMS_{x2_i} ' \
f'(/ N_EMS_{x2_i} ' \
f'(+ S::EMS_{x2_i} E::EMS_{x2_i} As::EMS_{x2_i} P::EMS_{x2_i} recovered_EMS_{x2_i})' \
f')))\n'
return reaction_str
def write_Ki_timevents(grp):
grp = str(grp)
grpout = covidModel.sub(grp)
params_str = f'(param Ki_{grp} @Ki_{grp}@)\n' \
f'(observe Ki_t_{grpout} Ki_{grp})\n' \
f'(time-event time_infection_import @time_infection_import_{grp}@ ' \
f'(' \
f'(As::{grp} @initialAs_{grp}@) ' \
f'(S::{grp} (- S::{grp} @initialAs_{grp}@))' \
f')' \
f')\n'
return params_str
def write_N_population(self):
stringAll = ""
for grp in self.grpList:
string1 = f'(param N_{grp} (+ @speciesS_{grp}@ @initialAs_{grp}@))\n'
stringAll = stringAll + string1
string2 = f'(param N_All (+ {covidModel.repeat_string_by_grp("N_", self.grpList)}))\n'
string3 = '(observe N_All N_All)\n'
stringAll = stringAll + string2 + string3
return stringAll
def repeat_string_by_grp(fixedstring, grpList):
stringAll = ""
for grp in grpList:
temp_string = " " + fixedstring + grp
stringAll = stringAll + temp_string
return stringAll
def write_observe_All(self):
grpList = self.grpList
if "vaccine" in self.add_interventions:
observe_channels_All_str = f"(observe vaccinated_cumul_All (+ " + covidModel.repeat_string_by_grp('vaccinated_cumul_',grpList) + "))\n"
channels = covidModel.get_channels(self)
channels = channels[:int(len(channels) / 2)]
for channel in channels:
if channel == 'crit':
channel = 'critical'
if channel == 'hosp':
channel = 'hospitalized'
if channel == "susceptible":
temp_str = f"(observe {channel}_All " \
f"(+ " +\
covidModel.repeat_string_by_grp('S::', grpList) + \
covidModel.repeat_string_by_grp('S_V::', grpList) + \
"))\n"
elif channel == "deaths_det":
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp('D3_det3::', grpList) + \
covidModel.repeat_string_by_grp('D3_det3_V::', grpList) + \
"))\n"
elif channel == "exposed":
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp('E::', grpList) + \
covidModel.repeat_string_by_grp('E_V::', grpList) + \
"))\n"
elif channel == "asymp_det":
temp_str = f"(observe {channel}_All (+ " +\
covidModel.repeat_string_by_grp('As_det1::', grpList) + \
covidModel.repeat_string_by_grp('As_det1_V::', grpList) + \
"))\n"
elif channel == "presymp":
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp('P::', grpList) + \
covidModel.repeat_string_by_grp('P_V::', grpList) + \
"))\n"
elif channel == "presymp_det":
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp('P_det::',grpList) + \
covidModel.repeat_string_by_grp('P_det_V::', grpList) + \
"))\n"
else:
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp(f'{channel}_', grpList) + \
covidModel.repeat_string_by_grp(f'{channel}_V_', grpList) + \
"))\n"
observe_channels_All_str = observe_channels_All_str + temp_str
del temp_str
channels = covidModel.get_channels(self)
channels = channels[int(len(channels) / 2):]
for channel in channels:
if channel == 'crit_V':
channel = 'critical_V'
if channel == 'hosp_V':
channel = 'hospitalized_V'
if channel == "susceptible_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('S_V::',grpList) + "))\n"
elif channel == "deaths_det_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('D3_det3_V::', grpList) + "))\n"
elif channel == "exposed_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('E_V::',grpList) + "))\n"
elif channel == "asymp_det_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('As_det1_V::', grpList) + "))\n"
elif channel == "presymp_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('P_V::', grpList) + "))\n"
elif channel == "presymp_det_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('P_det_V::', grpList) + "))\n"
else:
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp(f'{channel}_', grpList) + "))\n"
observe_channels_All_str = observe_channels_All_str + temp_str
del temp_str
else:
observe_channels_All_str = ""
channels = covidModel.get_channels(self)
for channel in channels :
if channel == 'crit':
channel = 'critical'
if channel == 'hosp':
channel = 'hospitalized'
if channel == "susceptible":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('S::', grpList) + "))\n"
elif channel == "deaths_det":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('D3_det3::', grpList) + "))\n"
elif channel == "exposed":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('E::', grpList) + "))\n"
elif channel == "asymp_det":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('As_det1::', grpList) + "))\n"
elif channel == "presymp":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('P::',grpList) + "))\n"
elif channel == "presymp_det":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('P_det::', grpList) + "))\n"
else:
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp(f'{channel}_', grpList) + "))\n"
observe_channels_All_str = observe_channels_All_str + temp_str
del temp_str
return observe_channels_All_str
def write_reactions(self, grp):
grp = str(grp)
reaction_str_I = f'\n(reaction exposure_{grp} ' \
f'(S::{grp}) (E::{grp}) ' \
f'(* Ki_{grp} S::{grp} ' \
f'(/ ' \
f'(+ infectious_undet_symp_{grp} ' \
f'(* infectious_undet_As_{grp} reduced_infectious_As ) ' \
f'(* infectious_det_symp_{grp} reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_{grp} reduced_inf_of_det_cases)' \
f') N_{grp} )' \
f'))\n'
reaction_str_Ia = f'\n(reaction exposure_{grp} ' \
f'(S::{grp}) (E::{grp}) ' \
f'(* Ki_{grp} S::{grp} ' \
f'(/ ' \
f'(+ infectious_undet_symp_{grp}' \
f'(* (+ infectious_undet_symp_V_{grp} infectious_undet_As_V_{grp} ) reduced_infectious_V ) ' \
f'(* infectious_undet_As_{grp} reduced_infectious_As ) ' \
f'(* infectious_det_symp_{grp} reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_{grp} reduced_inf_of_det_cases)' \
f'(* infectious_det_symp_V_{grp} reduced_infectious_V reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_V_{grp} reduced_infectious_V reduced_inf_of_det_cases)' \
f') N_{grp} )' \
f'))\n'
reaction_str_Ib = f'\n(reaction exposure_{grp} ' \
f'(S_V::{grp}) (E_V::{grp}) ' \
f'(* Ki_{grp} S_V::{grp} ' \
f'(/ ' \
f'(+ infectious_undet_symp_{grp}' \
f'(* (+ infectious_undet_symp_V_{grp} infectious_undet_As_V_{grp} ) reduced_infectious_V ) ' \
f'(* infectious_undet_As_{grp} reduced_infectious_As ) ' \
f'(* infectious_det_symp_{grp} reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_{grp} reduced_inf_of_det_cases)' \
f'(* infectious_det_symp_V_{grp} reduced_infectious_V reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_V_{grp} reduced_infectious_V reduced_inf_of_det_cases)' \
f') N_{grp} )' \
f'))\n'
if 'vaccine' in self.add_interventions:
reaction_str_I = f'(reaction vaccination_{grp} (S::{grp}) (S_V::{grp}) (* Kv_{grp} S::{grp}))\n'
reaction_str_I = reaction_str_I + reaction_str_Ia + reaction_str_Ib
reaction_str_III = f'(reaction recovery_H1_{grp} (H1::{grp}) (RH1::{grp}) (* Kr_h{grp} H1::{grp}))\n' \
f'(reaction recovery_C2_{grp} (C2::{grp}) (H2post::{grp}) (* Kr_c{grp} C2::{grp}))\n' \
f'(reaction recovery_H2post_{grp} (H2post::{grp}) (RC2::{grp}) (* Kr_hc H2post::{grp}))\n' \
f'(reaction recovery_H1_det3_{grp} (H1_det3::{grp}) (RH1_det3::{grp}) (* Kr_h{grp} H1_det3::{grp}))\n' \
f'(reaction recovery_C2_det3_{grp} (C2_det3::{grp}) (H2post_det3::{grp}) (* Kr_c{grp} C2_det3::{grp}))\n' \
f'(reaction recovery_H2post_det3_{grp} (H2post_det3::{grp}) (RC2_det3::{grp}) (* Kr_hc H2post_det3::{grp}))\n'
expand_base_str = f'(reaction infection_asymp_undet_{grp} (E::{grp}) (As::{grp}) (* Kl E::{grp} (- 1 d_As)))\n' \
f'(reaction infection_asymp_det_{grp} (E::{grp}) (As_det1::{grp}) (* Kl E::{grp} d_As))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P::{grp}) (* Ks E::{grp} (- 1 d_P)))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P_det::{grp}) (* Ks E::{grp} d_P))\n' \
f'(reaction mild_symptomatic_undet_{grp} (P::{grp}) (Sym::{grp}) (* Ksym P::{grp} (- 1 d_Sym)))\n' \
f'(reaction mild_symptomatic_det_{grp} (P::{grp}) (Sym_det2::{grp}) (* Ksym P::{grp} d_Sym))\n' \
f'(reaction severe_symptomatic_undet_{grp} (P::{grp}) (Sys::{grp}) (* Ksys P::{grp} (- 1 d_Sys)))\n' \
f'(reaction severe_symptomatic_det_{grp} (P::{grp}) (Sys_det3::{grp}) (* Ksys P::{grp} d_Sys))\n' \
f'(reaction mild_symptomatic_det_{grp} (P_det::{grp}) (Sym_det2::{grp}) (* Ksym P_det::{grp}))\n' \
f'(reaction severe_symptomatic_det_{grp} (P_det::{grp}) (Sys_det3::{grp}) (* Ksys P_det::{grp} ))\n' \
f'(reaction hospitalization_1_{grp} (Sys::{grp}) (H1::{grp}) (* Kh1 Sys::{grp}))\n' \
f'(reaction hospitalization_2_{grp} (Sys::{grp}) (H2pre::{grp}) (* Kh2 Sys::{grp}))\n' \
f'(reaction hospitalization_3_{grp} (Sys::{grp}) (H3::{grp}) (* Kh3 Sys::{grp}))\n' \
f'(reaction critical_2_{grp} (H2pre::{grp}) (C2::{grp}) (* Kc H2pre::{grp}))\n' \
f'(reaction critical_3_{grp} (H3::{grp}) (C3::{grp}) (* Kc H3::{grp}))\n' \
f'(reaction deaths_{grp} (C3::{grp}) (D3::{grp}) (* Km C3::{grp}))\n' \
f'(reaction hospitalization_1_det_{grp} (Sys_det3::{grp}) (H1_det3::{grp}) (* Kh1 Sys_det3::{grp}))\n' \
f'(reaction hospitalization_2_det_{grp} (Sys_det3::{grp}) (H2pre_det3::{grp}) (* Kh2 Sys_det3::{grp}))\n' \
f'(reaction hospitalization_3_det_{grp} (Sys_det3::{grp}) (H3_det3::{grp}) (* Kh3 Sys_det3::{grp}))\n' \
f'(reaction critical_2_det2_{grp} (H2pre_det3::{grp}) (C2_det3::{grp}) (* Kc H2pre_det3::{grp}))\n' \
f'(reaction critical_3_det2_{grp} (H3_det3::{grp}) (C3_det3::{grp}) (* Kc H3_det3::{grp}))\n' \
f'(reaction deaths_det3_{grp} (C3_det3::{grp}) (D3_det3::{grp}) (* Km C3_det3::{grp}))\n' \
f'(reaction recovery_As_{grp} (As::{grp}) (RAs::{grp}) (* Kr_a As::{grp}))\n' \
f'(reaction recovery_As_det_{grp} (As_det1::{grp}) (RAs_det1::{grp}) (* Kr_a As_det1::{grp}))\n' \
f'(reaction recovery_Sym_{grp} (Sym::{grp}) (RSym::{grp}) (* Kr_m Sym::{grp}))\n' \
f'(reaction recovery_Sym_det2_{grp} (Sym_det2::{grp}) (RSym_det2::{grp}) (* Kr_m Sym_det2::{grp}))\n'
expand_testDelay_SymSys_str = f'(reaction infection_asymp_undet_{grp} (E::{grp}) (As::{grp}) (* Kl E::{grp} (- 1 d_As)))\n' \
f'(reaction infection_asymp_det_{grp} (E::{grp}) (As_det1::{grp}) (* Kl E::{grp} d_As))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P::{grp}) (* Ks E::{grp}))\n' \
f'; developing symptoms - same time to symptoms as in master emodl\n' \
f'(reaction mild_symptomatic_{grp} (P::{grp}) (Sym_preD::{grp}) (* Ksym P::{grp}))\n' \
f'(reaction severe_symptomatic_{grp} (P::{grp}) (Sys_preD::{grp}) (* Ksys P::{grp}))\n' \
f'; never detected \n' \
f'(reaction mild_symptomatic_undet_{grp} (Sym_preD::{grp}) (Sym::{grp}) (* Ksym_D Sym_preD::{grp} (- 1 d_Sym)))\n' \
f'(reaction severe_symptomatic_undet_{grp} (Sys_preD::{grp}) (Sys::{grp}) (* Ksys_D Sys_preD::{grp} (- 1 d_Sys)))\n' \
f'; new detections - time to detection is substracted from hospital time\n' \
f'(reaction mild_symptomatic_det_{grp} (Sym_preD::{grp}) (Sym_det2::{grp}) (* Ksym_D Sym_preD::{grp} d_Sym))\n' \
f'(reaction severe_symptomatic_det_{grp} (Sys_preD::{grp}) (Sys_det3::{grp}) (* Ksys_D Sys_preD::{grp} d_Sys))\n' \
f'(reaction hospitalization_1_{grp} (Sys::{grp}) (H1::{grp}) (* Kh1_D Sys::{grp}))\n' \
f'(reaction hospitalization_2_{grp} (Sys::{grp}) (H2pre::{grp}) (* Kh2_D Sys::{grp}))\n' \
f'(reaction hospitalization_3_{grp} (Sys::{grp}) (H3::{grp}) (* Kh3_D Sys::{grp}))\n' \
f'(reaction critical_2_{grp} (H2pre::{grp}) (C2::{grp}) (* Kc H2pre::{grp}))\n' \
f'(reaction critical_3_{grp} (H3::{grp}) (C3::{grp}) (* Kc H3::{grp}))\n' \
f'(reaction deaths_{grp} (C3::{grp}) (D3::{grp}) (* Km C3::{grp}))\n' \
f'(reaction hospitalization_1_det_{grp} (Sys_det3::{grp}) (H1_det3::{grp}) (* Kh1_D Sys_det3::{grp}))\n' \
f'(reaction hospitalization_2_det_{grp} (Sys_det3::{grp}) (H2pre_det3::{grp}) (* Kh2_D Sys_det3::{grp}))\n' \
f'(reaction hospitalization_3_det_{grp} (Sys_det3::{grp}) (H3_det3::{grp}) (* Kh3_D Sys_det3::{grp}))\n' \
f'(reaction critical_2_det2_{grp} (H2pre_det3::{grp}) (C2_det3::{grp}) (* Kc H2pre_det3::{grp}))\n' \
f'(reaction critical_3_det2_{grp} (H3_det3::{grp}) (C3_det3::{grp}) (* Kc H3_det3::{grp}))\n' \
f'(reaction deaths_det3_{grp} (C3_det3::{grp}) (D3_det3::{grp}) (* Km C3_det3::{grp}))\n' \
f'(reaction recovery_As_{grp} (As::{grp}) (RAs::{grp}) (* Kr_a As::{grp}))\n' \
f'(reaction recovery_As_det_{grp} (As_det1::{grp}) (RAs_det1::{grp}) (* Kr_a As_det1::{grp}))\n' \
f'(reaction recovery_Sym_{grp} (Sym::{grp}) (RSym::{grp}) (* Kr_m_D Sym::{grp}))\n' \
f'(reaction recovery_Sym_det2_{grp} (Sym_det2::{grp}) (RSym_det2::{grp}) (* Kr_m_D Sym_det2::{grp}))\n'
expand_testDelay_AsSymSys_str = f'(reaction infection_asymp_det_{grp} (E::{grp}) (As_preD::{grp}) (* Kl E::{grp}))\n' \
f'(reaction infection_asymp_undet_{grp} (As_preD::{grp}) (As::{grp}) (* Kl_D As_preD::{grp} (- 1 d_As)))\n' \
f'(reaction infection_asymp_det_{grp} (As_preD::{grp}) (As_det1::{grp}) (* Kl_D As_preD::{grp} d_As))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P::{grp}) (* Ks E::{grp} (- 1 d_P)))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P_det::{grp}) (* Ks E::{grp} d_P))\n' \
f'; developing symptoms - same time to symptoms as in master emodl\n' \
f'(reaction mild_symptomatic_{grp} (P::{grp}) (Sym_preD::{grp}) (* Ksym P::{grp}))\n' \
f'(reaction severe_symptomatic_{grp} (P::{grp}) (Sys_preD::{grp}) (* Ksys P::{grp}))\n' \
f'; never detected\n' \
f'(reaction mild_symptomatic_undet_{grp} (Sym_preD::{grp}) (Sym::{grp}) (* Ksym_D Sym_preD::{grp} (- 1 d_Sym)))\n' \
f'(reaction severe_symptomatic_undet_{grp} (Sys_preD::{grp}) (Sys::{grp}) (* Ksys_D Sys_preD::{grp} (- 1 d_Sys)))\n' \
f'; new detections - time to detection is subtracted from hospital time\n' \
f'(reaction mild_symptomatic_det_{grp} (Sym_preD::{grp}) (Sym_det2a::{grp}) (* Ksym_D Sym_preD::{grp} d_Sym))\n' \
f'(reaction severe_symptomatic_det_{grp} (Sys_preD::{grp}) (Sys_det3a::{grp}) (* Ksys_D Sys_preD::{grp} d_Sys))\n' \
f'; developing symptoms - already detected, same time to symptoms as in master emodl\n' \
f'(reaction mild_symptomatic_det_{grp} (P_det::{grp}) (Sym_det2b::{grp}) (* Ksym P_det::{grp}))\n' \
f'(reaction severe_symptomatic_det_{grp} (P_det::{grp}) (Sys_det3b::{grp}) (* Ksys P_det::{grp} ))\n' \
f'(reaction hospitalization_1_{grp} (Sys::{grp}) (H1::{grp}) (* Kh1_D Sys::{grp}))\n' \
f'(reaction hospitalization_2_{grp} (Sys::{grp}) (H2pre::{grp}) (* Kh2_D Sys::{grp}))\n' \
f'(reaction hospitalization_3_{grp} (Sys::{grp}) (H3::{grp}) (* Kh3_D Sys::{grp}))\n' \
f'(reaction critical_2_{grp} (H2pre::{grp}) (C2::{grp}) (* Kc H2pre::{grp}))\n' \
f'(reaction critical_3_{grp} (H3::{grp}) (C3::{grp}) (* Kc H3::{grp}))\n' \
f'(reaction deaths_{grp} (C3::{grp}) (D3::{grp}) (* Km C3::{grp}))\n' \
f'(reaction hospitalization_1_det_{grp} (Sys_det3a::{grp}) (H1_det3::{grp}) (* Kh1_D Sys_det3a::{grp}))\n' \
f'(reaction hospitalization_2_det_{grp} (Sys_det3a::{grp}) (H2pre_det3::{grp}) (* Kh2_D Sys_det3a::{grp}))\n' \
f'(reaction hospitalization_3_det_{grp} (Sys_det3a::{grp}) (H3_det3::{grp}) (* Kh3_D Sys_det3a::{grp}))\n' \
f'(reaction hospitalization_1_det_{grp} (Sys_det3b::{grp}) (H1_det3::{grp}) (* Kh1 Sys_det3b::{grp}))\n' \
f'(reaction hospitalization_2pre_det_{grp} (Sys_det3b::{grp}) (H2pre_det3::{grp}) (* Kh2 Sys_det3b::{grp}))\n' \
f'(reaction hospitalization_3_det_{grp} (Sys_det3b::{grp}) (H3_det3::{grp}) (* Kh3 Sys_det3b::{grp}))\n' \
f'(reaction critical_2_det2_{grp} (H2pre_det3::{grp}) (C2_det3::{grp}) (* Kc H2pre_det3::{grp}))\n' \
f'(reaction critical_3_det2_{grp} (H3_det3::{grp}) (C3_det3::{grp}) (* Kc H3_det3::{grp}))\n' \
f'(reaction deaths_det3_{grp} (C3_det3::{grp}) (D3_det3::{grp}) (* Km C3_det3::{grp}))\n' \
f'(reaction recovery_As_{grp} (As::{grp}) (RAs::{grp}) (* Kr_a_D As::{grp}))\n' \
f'(reaction recovery_As_det_{grp} (As_det1::{grp}) (RAs_det1::{grp}) (* Kr_a_D As_det1::{grp}))\n' \
f'(reaction recovery_Sym_{grp} (Sym::{grp}) (RSym::{grp}) (* Kr_m_D Sym::{grp}))\n' \
f'(reaction recovery_Sym_det2a_{grp} (Sym_det2a::{grp}) (RSym_det2::{grp}) (* Kr_m_D Sym_det2a::{grp}))\n' \
f'(reaction recovery_Sym_det2b_{grp} (Sym_det2b::{grp}) (RSym_det2::{grp}) (* Kr_m Sym_det2b::{grp}))\n'
if self.expandModel == None:
reaction_str = expand_base_str + reaction_str_III
if self.expandModel == "SymSys" or self.expandModel == "uniform":
reaction_str = expand_testDelay_SymSys_str + reaction_str_III
if self.expandModel == 'AsSymSys':
reaction_str = expand_testDelay_AsSymSys_str + reaction_str_III
if 'vaccine' in self.add_interventions:
reaction_str_V = reaction_str.replace(f'_{grp}',f'_V_{grp}')
reaction_str_V = reaction_str_V.replace(f'::{grp}', f'_V::{grp}')
reaction_str = reaction_str + reaction_str_V
"""Custom adjustments - not automated/integrated yet"""
reaction_str = reaction_str.replace('_V_V', '_V')
reaction_str = reaction_str.replace('Ki_V', 'Ki')
reaction_str = reaction_str.replace('N_V', 'N')
"""Vaccinated-population specific parameters"""
reaction_str = reaction_str.replace('Kl E_V::', 'KlV E_V::')
reaction_str = reaction_str.replace('Ks E_V::', 'KsV E_V::')
reaction_str = reaction_str.replace('Ksym P_V::', 'KsymV P_V::')
reaction_str = reaction_str.replace('Ksys P_V::', 'KsysV P_V::')
reaction_str = reaction_str.replace('Ksym P_det_V::', 'KsymV P_det_V::')
reaction_str = reaction_str.replace('Ksys P_det_V::', 'KsysV P_det_V::')
reaction_str = reaction_str_I + reaction_str
return reaction_str
def write_time_varying_parameter(self, total_string):
"""Time varying parameter that have been fitted to data, or informed by local data.
Parameters and corresponding sub-functions:
- fraction_critical: `write_frac_crit_change`
- fraction_dead: `write_fraction_dead_change`
- dSys: `write_dSys_change`
- d_Sym: `write_d_Sym_P_As_change`
- dP_As: `write_d_Sym_P_As_change`
- Ki (monthly multipliers): `write_ki_multiplier_change`
- recovery_time_crit: `write_recovery_time_crit_change`
- recovery_time_hosp: `write_recovery_time_hosp_change`
All functions take required argument: nchanges, that defines number of updates.
The default has been set within the function and currently would need to be edited manually.
"""
def write_frac_crit_change(nchanges):
n_frac_crit_change = range(1, nchanges+1)
frac_crit_change_observe = '(observe fraction_severe_t fraction_severe)\n(observe frac_crit_t fraction_critical)\n'
frac_crit_change_timeevent = ''.join([f'(time-event frac_crit_adjust{i} @crit_time_{i}@ '
f'('
f'(fraction_critical @fraction_critical_change{i}@) '
f'(fraction_hospitalized (- 1 (+ fraction_critical fraction_dead))) '
f'(Kh1 (/ fraction_hospitalized time_to_hospitalization)) '
f'(Kh2 (/ fraction_critical time_to_hospitalization )) '
f'(Kh1_D (/ fraction_hospitalized (- time_to_hospitalization time_D_Sys))) '
f'(Kh2_D (/ fraction_critical (- time_to_hospitalization time_D_Sys)))'
f')'
f')'
f'\n' for i in n_frac_crit_change])
return frac_crit_change_observe + frac_crit_change_timeevent
def write_fraction_dead_change(nchanges):
n_fraction_dead_change = range(1, nchanges+1)
fraction_dead_change_observe = '(observe fraction_dead_t fraction_dead)\n' \
'(observe fraction_hospitalized_t fraction_hospitalized)\n'
fraction_dead_change_timeevent = ''.join([f'(time-event fraction_dead_adjust2 @fraction_dead_time_{i}@ '
f'('
f'(fraction_dead @fraction_dead_change{i}@) '
f'(fraction_hospitalized (- 1 (+ fraction_critical fraction_dead))) '
f'(Kh1 (/ fraction_hospitalized time_to_hospitalization)) '
f'(Kh2 (/ fraction_critical time_to_hospitalization )) '
f'(Kh1_D (/ fraction_hospitalized (- time_to_hospitalization time_D_Sys))) '
f'(Kh2_D (/ fraction_critical (- time_to_hospitalization time_D_Sys)))'
f')'
f')'
f' \n' for i in n_fraction_dead_change])
return fraction_dead_change_observe + fraction_dead_change_timeevent
def write_dSys_change(nchanges):
n_dSys_change = range(1, nchanges+1)
dSys_change_observe = '(observe d_Sys_t d_Sys)\n'
dSys_change_timeevent = ''.join([f'(time-event dSys_change{i} @d_Sys_change_time_{i}@ '
f'((d_Sys @d_Sys_incr{i}@))'
f')'
f'\n' for i in n_dSys_change])
return dSys_change_observe + dSys_change_timeevent
def write_ki_multiplier_change(nchanges,fit_param):
n_ki_multiplier = ['3a','3b','3c'] + list(range(4, nchanges+1))
ki_multiplier_change_str = ''
for grp in self.grpList:
temp_str_param = ''.join([f'(param Ki_red{i}_{grp} '
f'(* Ki_{grp} @ki_multiplier_{i}_{grp}@)'
f')'
f'\n' for i in n_ki_multiplier])
temp_str_timeevent = ''.join([f'(time-event ki_multiplier_change_{i} @ki_multiplier_time_{i}@ '
f'((Ki_{grp} Ki_red{i}_{grp}))'
f')'
f'\n' for i in n_ki_multiplier])
if 'ki_multiplier' in fit_param:
i = fit_param.split('_')[-1]
temp_str_param = temp_str_param.replace(f'@ki_multiplier_{i}_{grp}@', f'(* @ki_multiplier_{i}_{grp}@ @scalingfactor@)')
ki_multiplier_change_str = ki_multiplier_change_str + temp_str_param + temp_str_timeevent
return ki_multiplier_change_str
def write_d_Sym_P_As_change(nchanges):
d_Sym_P_As_change_observe = '(observe d_Sym_t d_Sym)\n' \
'(observe d_P_t d_P)\n' \
'(observe d_As_t d_As)\n'
n_d_PAs_changes = range(1,nchanges+1)
d_Sym_P_As_change_param = ''.join([f'(param d_PAs_change{i} '
f'(/ @d_Sym_change{i}@ dSym_dAsP_ratio)'
f')'
f'\n' for i in n_d_PAs_changes])
d_Sym_P_As_change_timeevent = ''.join([f'(time-event d_Sym_change{i} @d_Sym_change_time_{i}@ '
f'('
f'(d_Sym @d_Sym_change{i}@) ' \
f'(d_P d_PAs_change1) ' \
f'(d_As d_PAs_change{i}))'
f')'
f'\n' for i in n_d_PAs_changes])
return d_Sym_P_As_change_observe + d_Sym_P_As_change_param + d_Sym_P_As_change_timeevent
def write_recovery_time_crit_change(nchanges):
n_recovery_time_crit_change = range(1,nchanges+1)
recovery_time_crit_change = ''
for grp in self.grpList:
grpout = covidModel.sub(grp)
recovery_time_crit_change_param = f'(param recovery_time_crit_{grp} recovery_time_crit)\n' \
f'(param Kr_c{grp} (/ 1 recovery_time_crit_{grp}))\n' \
f'(observe recovery_time_crit_t_{grpout} recovery_time_crit_{grp})' \
f'\n'
recovery_time_crit_change_timeevent = ''.join([f'(time-event LOS_ICU_change_{i} @recovery_time_crit_change_time_{i}_{grp}@ '
f'('
f'(recovery_time_crit_{grp} @recovery_time_crit_change{i}_{grp}@) '
f'(Kr_c{grp} '
f'(/ 1 @recovery_time_crit_change{i}_{grp}@))'
f')'
f')'
f'\n' for i in n_recovery_time_crit_change])
recovery_time_crit_change = recovery_time_crit_change + \
recovery_time_crit_change_param + \
recovery_time_crit_change_timeevent
return recovery_time_crit_change
def write_recovery_time_hosp_change(nchanges):
n_recovery_time_hosp_change = range(1, nchanges + 1)
recovery_time_hosp_change = ''
for grp in self.grpList:
grpout = covidModel.sub(grp)
recovery_time_hosp_change_param = f'(param recovery_time_hosp_{grp} recovery_time_hosp)\n' \
f'(param Kr_h{grp} (/ 1 recovery_time_hosp_{grp}))\n' \
f'(observe recovery_time_hosp_t_{grpout} recovery_time_hosp_{grp})' \
f'\n'
recovery_time_hosp_change_timeevent = ''.join(
[f'(time-event LOS_nonICU_change_{i} @recovery_time_hosp_change_time_{i}_{grp}@ '
f'('
f'(recovery_time_hosp_{grp} @recovery_time_hosp_change{i}_{grp}@) '
f'(Kr_h{grp} (/ 1 @recovery_time_hosp_change{i}_{grp}@))'
f')'
f')'
f'\n' for i in n_recovery_time_hosp_change])
recovery_time_hosp_change = recovery_time_hosp_change + recovery_time_hosp_change_param + recovery_time_hosp_change_timeevent
return recovery_time_hosp_change
config_dic = covidModel.get_configs(key ='time_varying_parameter', config_file='intervention_emodl_config.yaml')
param_update_string = write_ki_multiplier_change(nchanges=config_dic['n_ki_multiplier'], fit_param = self.fit_param) + \
write_dSys_change(nchanges=config_dic['n_dSys_change']) + \
write_d_Sym_P_As_change(nchanges=config_dic['n_d_Sym_P_As_change']) + \
write_frac_crit_change(nchanges=config_dic['n_frac_crit_change']) + \
write_fraction_dead_change(nchanges=config_dic['n_fraction_dead_change']) + \
write_recovery_time_crit_change(nchanges=config_dic['n_recovery_time_crit_change']) + \
write_recovery_time_hosp_change(nchanges=config_dic['n_recovery_time_hosp_change'])
total_string = total_string.replace(';[TIMEVARYING_PARAMETERS]', param_update_string)
return total_string
def get_intervention_dates(intervention_param,scen):
"""intervention dates"""
n_gradual_steps = intervention_param['n_gradual_steps']
config_dic_dates = covidModel.get_configs(key ='time_parameters', config_file='extendedcobey_200428.yaml') #FIXME args.masterconfig
#FIXME more flexible read in and return of dates for any number of scenarios
#for i in range(1,nscenarios)
intervention_start = pd.to_datetime(config_dic_dates[f'{scen}_start']['function_kwargs']['dates'])
intervention_scaleupend = pd.to_datetime(config_dic_dates[f'{scen}_scaleupend']['function_kwargs']['dates'])
#intervention_end = pd.to_datetime(config_dic_dates[f'{scen}_end']['function_kwargs']['dates'])
if n_gradual_steps > 1 and intervention_scaleupend < pd.Timestamp('2090-01-01') :
date_freq = (intervention_scaleupend - intervention_start) /(n_gradual_steps-1)
intervention_dates = pd.date_range(start=intervention_start,end=intervention_scaleupend, freq=date_freq).tolist()
else:
n_gradual_steps = 1
intervention_dates = [intervention_start]
return n_gradual_steps, intervention_dates
def write_interventions(self, total_string):
""" Write interventions
Interventions defined in sub-functions:
- bvariant: `write_bvariant`
- intervention_stop: `write_intervention_stop`
- transmission_increase: `write_transmission_increase`
- rollback: `write_rollback`
- gradual_reopening: `write_gradual_reopening`
"""
""" Get intervention configurations """
intervention_param = covidModel.get_configs(key ='interventions', config_file=self.intervention_config)
def write_vaccine_generic():
emodl_str = ';COVID-19 vaccine scenario\n'
emodl_param_initial = '(param Kv 0)\n(observe daily_vaccinated Kv)\n'
csvfile = intervention_param['vaccination_csv']
if csvfile != "":
df = pd.read_csv(os.path.join("./experiment_configs", 'input_csv', csvfile))
intervention_dates = list(df['Date'].values)
intervention_effectsizes = list(df['daily_cov'].values)
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event vaccination_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ((Kv {intervention_effectsizes[i-1]})))\n'
emodl_timeevents = emodl_timeevents + temp_str
else:
n_gradual_steps, intervention_dates = covidModel.get_intervention_dates(intervention_param,scen='vaccine')
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event vaccination_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ((Kv (* @vacc_daily_cov@ {(1 / (len(intervention_dates)) * i)}) )))\n'
emodl_timeevents = emodl_timeevents + temp_str
emodl_str = emodl_str + emodl_param_initial + emodl_timeevents
return emodl_str
def write_vaccine():
emodl_str = ';COVID-19 vaccine scenario\n'
csvfile = intervention_param['vaccination_csv']
df = pd.read_csv(os.path.join("./experiment_configs", 'input_csv', csvfile))
df['Date'] = pd.to_datetime(df['date'])
emodl_str_grp = ""
for grp in self.grpList:
grp_num = grp.replace('EMS_','')
df_grp = df[df['covid_region']==int(grp_num)]
emodl_param_initial = f'(param Kv_{grp} 0)\n' \
f'(observe n_daily_vaccinated_{grp} (* Kv_{grp} S::{grp} ))\n'
intervention_dates = list(df_grp['Date'].values) + [max(df_grp['Date']) + pd.Timedelta(1,'days')]
intervention_effectsizes = list(df_grp['daily_first_vacc_perc'].values) + [0]
#intervention_effectsizes = list(df_grp['daily_first_vacc'].values) + [0]
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event daily_vaccinations_{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ((Kv_{grp} {intervention_effectsizes[i-1]})))\n'
emodl_timeevents = emodl_timeevents + temp_str
emodl_str_grp = emodl_str_grp + emodl_param_initial + emodl_timeevents
del df_grp
"""Adjust fraction severe"""
df = pd.read_csv(os.path.join(git_dir,"experiment_configs", 'input_csv', 'vaccination_fractionSevere_adjustment_IL.csv'))
df['Date'] = pd.to_datetime(df['date'])
intervention_dates = df['Date'].unique()
fraction_severe_notV = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f"(time-event fraction_severe_changeV_{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} (" \
f"(fraction_severe (- @fraction_severe@ (* (- @fraction_severe@ (* @fraction_severe@ reduced_fraction_Sys_notV)) {df['persons_above65_first_vaccinated_perc'][i-1]}))) " \
"(Ksys (* fraction_severe (/ 1 time_to_symptoms))) " \
"(Ksym (* (- 1 fraction_severe) (/ 1 time_to_symptoms)))))\n"
fraction_severe_notV = fraction_severe_notV + temp_str
emodl_str = fraction_severe_notV + emodl_str + emodl_str_grp
return emodl_str
def write_bvariant():
emodl_str = ';COVID-19 bvariant scenario\n'
csvfile = intervention_param['bvariant_csv']
if csvfile != "":
df = pd.read_csv(os.path.join("./experiment_configs", 'input_csv', csvfile))
intervention_dates = list(df['Date'].values)
fracinfect = list(df['variant_freq'].values)
fracinfect_timevent = ''.join([f'(time-event bvariant_fracinfect {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} '
f'((bvariant_fracinfect {fracinfect[i - 1]})))\n'
for i, date in enumerate(intervention_dates, 1)])
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event ki_bvariant_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ('
temp_str = temp_str + ''.join([f' (Ki_{grp} ( + Ki_{grp} (* (* Ki_{grp} 0.5) (* @bvariant_fracinfect@ {fracinfect[i - 1]} ))))' for grp in self.grpList])
temp_str = temp_str + f'))\n'
emodl_timeevents = emodl_timeevents + temp_str
else:
n_gradual_steps, intervention_dates = covidModel.get_intervention_dates(intervention_param,scen='bvariant')
fracinfect_timevent = ''.join([f'(time-event bvariant_fracinfect {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)}'
f' ((bvariant_fracinfect (* @bvariant_fracinfect@ '
f'{(1 / (len(intervention_dates)) * i)})))'
f')\n' for i, date in enumerate(intervention_dates, 1)])
emodl_param = ''.join([ f'(param Ki_bvariant_initial_{grp} 0)\n'
f'(time-event ki_bvariant_initial {covidModel.DateToTimestep(pd.Timestamp(intervention_dates[0])-pd.Timedelta(2,"days"), self.startdate)} ('
f'(Ki_bvariant_initial_{grp} Ki_{grp})'
f'))\n ' for grp in self.grpList])
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event ki_bvariant_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ('
temp_str = temp_str + ''.join([f' (Ki_{grp} ( + Ki_bvariant_initial_{grp} (* (* Ki_bvariant_initial_{grp} @bvariant_infectivity@) (* @bvariant_fracinfect@ {(1 / (len(intervention_dates)) * i)} ))))' for grp in self.grpList])
temp_str = temp_str + f'))\n'
emodl_timeevents = emodl_timeevents + temp_str
bvariant_infectivity = emodl_param + emodl_timeevents
"""keep track of fracinfect, and use for update symptom development reactions"""
fracinfect_str = '(param bvariant_fracinfect 0)\n' \
'(observe bvariant_fracinfect_t bvariant_fracinfect)\n' + fracinfect_timevent
"""fraction severe adjustment over time"""
frac_severe_timevent = ''.join([f'(time-event fraction_severe_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} '
f'('
f'(fraction_severe (+ '
f'(* @fraction_severe@ (- 1 bvariant_fracinfect)) '
f'(* fraction_severeB bvariant_fracinfect ) '
f')) '
f'(Ksys ( * fraction_severe (/ 1 time_to_symptoms))) '
f'(Ksym ( * (- 1 fraction_severe)(/ 1 time_to_symptoms)))'
f')'
f')\n' for i, date in enumerate(intervention_dates, 1)])
frac_severe_str = '(param fraction_severeB (* @fraction_severe@ @bvariant_severity@))\n' + frac_severe_timevent
if 'vaccine' in self.add_interventions:
"""fraction severe adjustment over time"""
frac_severeV_timevent = ''.join([f'(time-event fraction_severe_V_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} '
f'('
f'(fraction_severe_V (+ '
f'(* @fraction_severe@ @reduced_fraction_Sys@ (- 1 bvariant_fracinfect)) '
f'(* fraction_severeB @reduced_fraction_Sys@ bvariant_fracinfect ) '
f')) '
f'(KsysV ( * fraction_severe_V (/ 1 time_to_symptoms))) '
f'(KsymV ( * (- 1 fraction_severe_V)(/ 1 time_to_symptoms)))'
f')'
f')\n' for i, date in enumerate(intervention_dates, 1)])
frac_severeV_str = '(observe fraction_severe_V_t fraction_severe_V)\n' + frac_severeV_timevent
frac_severe_str = frac_severe_str + frac_severeV_str
emodl_str = emodl_str + bvariant_infectivity + fracinfect_str + frac_severe_str
return emodl_str
def write_rollback():
emodl_str = ';COVID-19 rollback scenario\n'
rollback_regionspecific = intervention_param['rollback_regionspecific']
csvfile = intervention_param['rollback_csv']
if csvfile != "":
df = pd.read_csv(os.path.join("./experiment_configs", 'input_csv', csvfile))
intervention_dates = list(df['Date'].values)
perc_rollback = list(df['perc_reopen'].values)
else:
n_gradual_steps, intervention_dates = covidModel.get_intervention_dates(intervention_param,scen='rollback')
perc_rollback = ['@rollback_multiplier@' for _ in range(len(intervention_dates))]
emodl_param = ''.join([ f'(param Ki_rollback_initial_{grp} 0)\n'
f'(time-event ki_rollback_initial_ {covidModel.DateToTimestep(pd.Timestamp(intervention_dates[0])-
|
pd.Timedelta(2,"days")
|
pandas.Timedelta
|
"""
Logistic Regression based upon sklearn.
"""
import datatable as dt
import numpy as np
import random
import pandas as pd
import os
import copy
import codecs
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import roc_auc_score, make_scorer
from h2oaicore.models import CustomModel
from h2oaicore.systemutils import config, physical_cores_count, save_obj_atomically, load_obj, DefaultOrderedDict
from h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning
from h2oaicore.transformers import CatOriginalTransformer, FrequentTransformer, CVTargetEncodeTransformer
from h2oaicore.transformer_utils import Transformer
from h2oaicore.transformers_more import CatTransformer, LexiLabelEncoderTransformer
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.ensemble import VotingClassifier
class LogisticRegressionModel(CustomModel):
"""
Logistic Regression
Useful when weak or no interactions between features,
or large inherent number of levels in categorical features
Other useful DAI options if want to only use feature made internally by this model:
config.prob_prune_genes = False
config.prob_prune_by_features = False
# Useful if want training to ultimately see all data with validated max_iter
config.fixed_ensemble_level=0
Recipe to do:
1) Add separate LogisticRegressionEarlyStopping class to use warm start to take iterations a portion at a time,
and score with known/given metric, and early stop to avoid overfitting on validation.
2) Improve bisection stepping for search
3) Consider from deployml.sklearn import LogisticRegressionBase
4) Implement LinearRegression/ElasticNet (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model)
5) Implement other categorical missing encodings (same strategies as numerics)
6) Implement other scorers (i.e. checking score_f_name -> sklearn metric or using DAI metrics)
"""
_kaggle = False # some kaggle specific optimizations for https://www.kaggle.com/c/cat-in-the-dat
# with _kaggle_features=False and no catboost features:
# gives 0.8043 DAI validation for some seeds/runs,
# which leads to 0.80802 public score after only 2 minutes of running on accuracy=2, interpretability=1
# with _kaggle_features=False and catboost features:
# gives 0.8054 DAI validation for some seeds/runs,
# which leads to 0.80814 public score after only 10 minutes of running on accuracy=7, interpretability=1
# whether to generate features for kaggle
# these features do not help the score, but do make sense as plausible features to build
_kaggle_features = False
# whether to use validation and train together (assumes test with sample_weight=0 already part of train+valid) for features
_kaggle_mode = False
# numerical imputation for all columns (could be done per column chosen by mutations)
_impute_num_type = 'sklearn' # best for linear models
# _impute_num_type = 'oob' # risky for linear models, but can be used for testing
_impute_int_type = 'oob'
_impute_bool_type = 'oob'
_oob_bool = False
# categorical imputation for all columns (could be done per column chosen by mutations)
_impute_cat_type = 'oob'
_oob_cat = "__OOB_CAT__"
# unique identifier for OHE feature names
_ohe_postfix = "_*#!^()^{}"
# not required to be this strict, but good starting point to only use this recipe's features
_included_transformers = ['CatOriginalTransformer', 'OriginalTransformer', 'CatTransformer']
if _kaggle and 'CatTransformer' in _included_transformers:
# Just handle all cats directly
_included_transformers.remove('CatTransformer')
_can_handle_non_numeric = True # tell DAI we can handle non-numeric (i.e. strings)
_can_handle_categorical = True # tell DAI we can handle numerically encoded categoricals for use as categoricals
_num_as_cat = False or _kaggle # treating numeric as categorical best handled per column, but can force all numerics as cats
_num_as_num = False
_mutate_all = True # tell DAI we fully control mutation
_mutate_by_one = False # tell our recipe only changes one key at a time, can limit exploration if set as True
_mutate_by_one_sometimes = True
_always_defaults = False
_randomized_random_state = False
_overfit_limit_iteration_step = 10
# tell DAI want to keep track of self.params changes during fit, and to average numeric values across folds (if any)
_used_return_params = True
_average_return_params = True
# other DAI vars
_regression = False
_binary = True
_multiclass = True
_parallel_task = True # set to False may lead to faster performance if not doing grid search or cv search (should also set expert batch_cpu_tuning_max_workers to number of cores)
_fit_by_iteration = True
_fit_iteration_name = 'max_iter'
_display_name = "LR"
_description = "Logistic Regression"
_allow_basis_of_default_individuals = False
_fs_permute_must_use_self = True
_check_stall = False # avoid stall check, joblib loky stuff detatches sometimes
_testing_can_skip_failure = False # ensure tested as if shouldn't fail
# recipe vars for encoding choices
_use_numerics = True
_use_ohe_encoding = True
_use_target_encoding = False
_use_target_encoding_other = False
_use_ordinal_encoding = False
_use_catboost_encoding = False or _kaggle # Note: Requires data be randomly shuffled so target is not in special order
_use_woe_encoding = False
# tell DAI what pip modules we will use
_modules_needed_by_name = ['category_encoders']
if _use_target_encoding_other:
_modules_needed_by_name.extend(['target_encoding'])
# _modules_needed_by_name.extend(['git+https://github.com/h2oai/target_encoding#egg=target_encoding'])
# whether to show debug prints and write munged view to disk
_debug = True
# wehther to cache feature results, only by transformer instance and X shape, so risky to use without care.
_cache = False
_ensemble = False
def set_default_params(self, accuracy=10, time_tolerance=10,
interpretability=1, **kwargs):
# Fill up parameters we care about
self.params = {}
self.mutate_params(get_default=True, accuracy=accuracy, time_tolerance=time_tolerance,
interpretability=interpretability, **kwargs)
def mutate_params(self, accuracy=10, time_tolerance=10, interpretability=1, **kwargs):
get_default = 'get_default' in kwargs and kwargs['get_default'] or self._always_defaults
params_orig = copy.deepcopy(self.params)
# control some behavior by how often the model was mutated.
# Good models that improve get repeatedly mutated, bad models tend to be one-off mutations of good models
if get_default:
self.params['mutation_count'] = 0
else:
if 'mutation_count' in self.params:
self.params['mutation_count'] += 1
else:
self.params['mutation_count'] = 0
# keep track of fit count, for other control over hyper parameter search in this recipe
if 'fit_count' not in self.params:
self.params['fit_count'] = 0
self.params['random_state'] = kwargs.get("random_state", 1234)
if self._randomized_random_state:
self.params['random_state'] = random.randint(0, 32000)
self.params['n_jobs'] = self.params_base.get('n_jobs', max(1, physical_cores_count))
# Modify certain parameters for tuning
if self._kaggle:
C_list = [0.095, 0.1, 0.115, 0.11, 0.105, 0.12, 0.125, 0.13, 0.14]
else:
C_list = [0.05, 0.075, 0.1, 0.15, 0.2, 1.0, 5.0]
self.params["C"] = float(np.random.choice(C_list)) if not get_default else 0.12
tol_list = [1e-4, 1e-3, 1e-5]
if accuracy < 5:
default_tol = 1e-4
elif accuracy < 6:
default_tol = 1e-5
elif accuracy <= 7:
default_tol = 1e-6
else:
default_tol = 1e-7
if self._kaggle:
default_tol = 1e-8
if default_tol not in tol_list:
tol_list.append(default_tol)
self.params["tol"] = float(np.random.choice(tol_list)) if not (self._kaggle or get_default) else default_tol
# solver_list = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
# newton-cg too slow
# sag too slow
# solver_list = ['lbfgs', 'liblinear', 'saga']
solver_list = ['lbfgs']
self.params["solver"] = str(np.random.choice(solver_list)) if not get_default else 'lbfgs'
if self._kaggle:
max_iter_list = [300, 350, 400, 450, 500, 700, 800, 900, 1000, 1500]
else:
max_iter_list = [150, 175, 200, 225, 250, 300]
self.params["max_iter"] = int(np.random.choice(max_iter_list)) if not get_default else 700
# self.params["max_iter"] = 37
if self.params["solver"] in ['lbfgs', 'newton-cg', 'sag']:
penalty_list = ['l2', 'none']
elif self.params["solver"] in ['saga']:
penalty_list = ['l1', 'l2', 'none']
elif self.params["solver"] in ['liblinear']:
penalty_list = ['l1']
else:
raise RuntimeError("No such solver: %s" % self.params['solver'])
self.params["penalty"] = str(np.random.choice(penalty_list)) if not (self._kaggle or get_default) else 'l2'
if self.params["penalty"] == 'elasticnet':
l1_ratio_list = [0, 0.25, 0.5, 0.75, 1.0]
self.params["l1_ratio"] = float(np.random.choice(l1_ratio_list))
else:
self.params.pop('l1_ratio', None)
if self.params["penalty"] == 'none':
self.params.pop('C', None)
else:
self.params['C'] = float(np.random.choice(C_list)) if not get_default else 0.12
if self.num_classes > 2:
self.params['multi_class'] = 'auto'
strategy_list = ['mean', 'median', 'most_frequent', 'constant']
self.params['strategy'] = str(np.random.choice(strategy_list)) if not get_default else 'mean'
if self._use_target_encoding:
min_samples_leaf_list = [1, 10, 50, 100]
self.params['min_samples_leaf'] = float(np.random.choice(min_samples_leaf_list))
smoothing_list = [1.0, 0.5, 10.0, 50.0]
self.params['smoothing'] = float(np.random.choice(smoothing_list))
if self._use_catboost_encoding:
if self._kaggle:
sigma_list = [None, 0.1, 0.2, 0.3, 0.4, 0.45, 0.5, 0.55, 0.6, 0.7, 0.8, 0.9]
else:
sigma_list = [None, 0.01, 0.05, 0.1, 0.5]
self.params['sigma'] = random.choice(sigma_list)
if self._use_woe_encoding:
randomized_list = [True, False]
self.params['randomized'] = random.choice(randomized_list)
sigma_woe_list = [0.05, 0.001, 0.01, 0.1, 0.005]
self.params['sigma_woe'] = random.choice(sigma_woe_list)
regularization_list = [1.0, 0.1, 2.0]
self.params['regularization'] = random.choice(regularization_list)
# control search in recipe
self.params['grid_search_iterations'] = accuracy >= 8
# cv search for hyper parameters, can be used in conjunction with _grid_search_by_iterations = True or False
self.params['cv_search'] = accuracy >= 9
if self._mutate_by_one_sometimes:
if np.random.random() > 0.5:
do_mutate_by_one = True
else:
do_mutate_by_one = False
else:
do_mutate_by_one = self._mutate_by_one
if do_mutate_by_one and not get_default and params_orig:
pick_key = str(np.random.choice(list(self.params.keys()), size=1)[0])
value = self.params[pick_key]
self.params = copy.deepcopy(params_orig)
self.params[pick_key] = value
# validate parameters to avoid single key leading to invalid overall parameters
if pick_key == 'penalty':
# has restrictions need to switch other keys if mismatched
if self.params["solver"] in ['lbfgs', 'newton-cg', 'sag']:
penalty_list = ['l2', 'none']
elif self.params["solver"] in ['saga']:
penalty_list = ['l1', 'l2', 'none']
elif self.params["solver"] in ['liblinear']:
penalty_list = ['l1']
if not self.params['penalty'] in penalty_list:
self.params['penalty'] = penalty_list[0] # just choose first
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):
if self._kaggle_mode and eval_set is not None:
new_X = dt.rbind([X, eval_set[0][0]])
new_sample_weight = np.concatenate([sample_weight, sample_weight_eval_set[0]])
new_sample_weight[X.shape[0]:X.shape[0] + eval_set[0][0].shape[0]] = 0
new_y = np.concatenate([y, eval_set[0][1]])
X = new_X
y = new_y
sample_weight = new_sample_weight
orig_dir = os.getcwd()
os.chdir(self.context.experiment_tmp_dir) # for joblib
os.makedirs(self.context.experiment_tmp_dir, exist_ok=True) # another copy for DAI transformers
orig_cols = list(X.names)
if self.num_classes >= 2:
lb = LabelEncoder()
lb.fit(self.labels)
y = lb.transform(y)
min_count = np.min(np.unique(y, return_counts=True)[1])
if min_count < 9:
self.params['cv_search'] = False
if min_count < 3:
self.params['grid_search_iterations'] = False
self.params['cv_search'] = False
if self._ensemble:
self.params['grid_search_iterations'] = False
self.params['cv_search'] = False
# save pre-datatable-imputed X
X_dt = X
# Apply OOB imputation
self.oob_imputer = OOBImpute(self._impute_num_type, self._impute_int_type, self._impute_bool_type,
self._impute_cat_type, self._oob_bool, self._oob_cat)
X = self.oob_imputer.fit_transform(X)
# convert to pandas for sklearn
X = X.to_pandas()
X_orig_cols_names = list(X.columns)
if self._kaggle_features:
self.features = make_features(cache=self._cache)
X = self.features.fit_transform(X, y, **kwargs)
else:
self.features = None
# print("LR: pandas dtypes: %s" % (str(list(X.dtypes))))
# FEATURE GROUPS
# Choose which features are numeric or categorical
cat_features = [x for x in X_orig_cols_names if CatOriginalTransformer.is_me_transformed(x)]
catlabel_features = [x for x in X_orig_cols_names if CatTransformer.is_me_transformed(x)]
# can add explicit column name list to below force_cats
force_cats = cat_features + catlabel_features
actual_numerical_features = (X.dtypes == 'float') | (X.dtypes == 'float32') | (
X.dtypes == 'float64') # | (X.dtypes == 'int') | (X.dtypes == 'int32') | (X.dtypes == 'int64') | (X.dtypes == 'bool')
# choose if numeric is treated as categorical
if not self._num_as_cat or self._num_as_num:
# treat (e.g.) binary as both numeric and categorical
numerical_features = copy.deepcopy(actual_numerical_features)
else:
# no numerics
numerical_features = X.dtypes == 'invalid'
if self._num_as_cat:
# then can't have None sent to cats, impute already up front
# force oob imputation for numerics
self.oob_imputer = OOBImpute('oob', 'oob', 'oob',
self._impute_cat_type, self._oob_bool, self._oob_cat)
X = self.oob_imputer.fit_transform(X_dt)
X = X.to_pandas()
if self._kaggle_features:
X = self.features.fit_transform(X, y, **kwargs)
if self._kaggle_features:
numerical_features = self.features.update_numerical_features(numerical_features)
if not self._num_as_cat:
# then cats are only things that are not numeric
categorical_features = ~actual_numerical_features
else:
# then everything is a cat
categorical_features = ~numerical_features # (X.dtypes == 'invalid')
# below can lead to overlap between what is numeric and what is categorical
more_cats = (pd.Series([True if x in force_cats else False for x in list(categorical_features.index)],
index=categorical_features.index))
categorical_features = (categorical_features) | (more_cats)
if self._kaggle_features:
categorical_features = self.features.update_categorical_features(categorical_features)
cat_X = X.loc[:, categorical_features]
num_X = X.loc[:, numerical_features]
if self._debug:
print("LR: Cat names: %s" % str(list(cat_X.columns)))
print("LR: Num names: %s" % str(list(num_X.columns)))
# TRANSFORMERS
lr_params = copy.deepcopy(self.params)
lr_params.pop('grid_search_by_iterations', None)
lr_params.pop('cv_search', None)
grid_search = False # WIP
full_features_list = []
transformers = []
if self._use_numerics and any(numerical_features.values):
impute_params = {}
impute_params['strategy'] = lr_params.pop('strategy', 'mean')
full_features_list.extend(list(num_X.columns))
transformers.append(
(make_pipeline(SimpleImputer(**impute_params), StandardScaler()), numerical_features)
)
# http://contrib.scikit-learn.org/categorical-encoding/
if self._use_ordinal_encoding and any(categorical_features.values):
ord_params = dict(handle_missing='value', handle_unknown='value')
full_features_list.extend(list(cat_X.columns))
# Note: OrdinalEncoder doesn't handle unseen features, while CategoricalEncoder used too
import category_encoders as ce
transformers.append(
(ce.OrdinalEncoder(**ord_params), categorical_features)
)
if self._use_catboost_encoding and any(categorical_features.values):
cb_params = dict(handle_missing='value', handle_unknown='value')
cb_params['sigma'] = lr_params.pop('sigma')
full_features_list.extend(list(cat_X.columns))
import category_encoders as ce
transformers.append(
(ce.CatBoostEncoder(**cb_params), categorical_features)
)
if self._use_woe_encoding and any(categorical_features.values):
woe_params = dict(handle_missing='value', handle_unknown='value')
woe_params['randomized'] = lr_params.pop('randomized')
woe_params['sigma'] = lr_params.pop('sigma_woe')
woe_params['regularization'] = lr_params.pop('regularization')
full_features_list.extend(list(cat_X.columns))
import category_encoders as ce
transformers.append(
(ce.WOEEncoder(**woe_params), categorical_features)
)
if self._use_target_encoding and any(categorical_features.values):
te_params = dict(handle_missing='value', handle_unknown='value')
te_params['min_samples_leaf'] = lr_params.pop('min_samples_leaf')
te_params['smoothing'] = lr_params.pop('smoothing')
full_features_list.extend(list(cat_X.columns))
import category_encoders as ce
transformers.append(
(ce.TargetEncoder(**te_params), categorical_features)
)
if self._use_target_encoding_other and any(categorical_features.values):
full_features_list.extend(list(cat_X.columns))
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=self.params['random_state'])
split_cv = [cv]
# split_cv = [3, 3]
ALPHA, MAX_UNIQUE, FEATURES_COUNT = get_TE_params(cat_X, debug=self._debug)
from target_encoding import TargetEncoder
transformers.append(
(TargetEncoder(alpha=ALPHA, max_unique=MAX_UNIQUE, split_in=split_cv),
categorical_features)
)
if self._use_ohe_encoding and any(categorical_features.values):
transformers.append(
(OneHotEncoder(handle_unknown='ignore', sparse=True), categorical_features)
)
assert len(transformers) > 0, "should have some features"
preprocess = make_column_transformer(*transformers)
# ESTIMATOR
lr_defaults = dict(penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='warn', max_iter=100,
multi_class='warn', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None)
allowed_lr_kwargs_keys = lr_defaults.keys()
lr_params_copy = copy.deepcopy(lr_params)
for k, v in lr_params_copy.items():
if k not in allowed_lr_kwargs_keys:
lr_params.pop(k, None)
del lr_params_copy
can_score = self.num_classes == 2 and 'AUC' in self.params_base['score_f_name'].upper()
# print("LR: can_score: %s" % str(can_score))
if can_score:
scorer = make_scorer(roc_auc_score, greater_is_better=True, needs_proba=True)
else:
scorer = None
if not ('C' in lr_params or 'l1_ratios' in lr_params):
# override
self.params['cv_search'] = False
if not self.params['cv_search']:
estimator = LogisticRegression(**lr_params)
estimator_name = 'logisticregression'
else:
lr_params_cv = copy.deepcopy(lr_params)
if 'C' in lr_params:
lr_params_cv['Cs'] = self.get_param_range(self.params['C'], self.params['fit_count'], func_type='log')
# print("LR: CV: Cs: %s" % str(lr_params_cv['Cs']))
if 'l1_ratios' in lr_params:
lr_params_cv['l1_ratios'] = self.get_param_range(self.params['l1_ratio'], self.params['fit_count'],
func_type='linear')
# print("LR: CV: l1_ratios: %s" % str(lr_params_cv['l1_ratios']))
lr_params_cv.pop('n_jobs', None)
lr_params_cv.pop('C', None)
lr_params_cv.pop('l1_ratio', None)
if lr_params_cv['penalty'] == 'none':
lr_params_cv['penalty'] = 'l2'
estimator = LogisticRegressionCV(n_jobs=self.params['n_jobs'],
cv=3, refit=True, scoring=scorer, **lr_params_cv)
estimator_name = 'logisticregressioncv'
# PIPELINE
if not self._ensemble:
model = make_pipeline(
preprocess,
estimator, memory="./")
else:
ALPHA, MAX_UNIQUE, FEATURES_COUNT = get_TE_params(cat_X, debug=self._debug)
from target_encoding import TargetEncoderClassifier
te_estimator = TargetEncoderClassifier(alpha=ALPHA, max_unique=MAX_UNIQUE, used_features=FEATURES_COUNT)
estimators = [(estimator_name, estimator), ('teclassifier', te_estimator)]
model = make_pipeline(
preprocess,
VotingClassifier(estimators))
# FIT
if self.params['grid_search_iterations'] and can_score:
# WIP FIXME for multiclass and other scorers
from sklearn.model_selection import GridSearchCV
max_iter_range = self.get_param_range(self.params['max_iter'], self.params['fit_count'],
range_limit=self._overfit_limit_iteration_step, func_type='log')
# print("LR: max_iter_range: %s" % str(max_iter_range))
param_grid = {
'%s__max_iter' % estimator_name: max_iter_range,
}
grid_clf = GridSearchCV(model, param_grid, n_jobs=self.params['n_jobs'],
cv=3, iid=True, refit=True, scoring=scorer)
fitkwargs = dict()
fitkwargs["%s__sample_weight" % estimator_name] = sample_weight
grid_clf.fit(X, y, **fitkwargs)
model = grid_clf.best_estimator_
# print("LR: best_index=%d best_score: %g best_params: %s" % (
# grid_clf.best_index_, grid_clf.best_score_, str(grid_clf.best_params_)))
elif grid_search:
# WIP
from sklearn.model_selection import GridSearchCV
param_grid = {
'columntransformer__pipeline__simpleimputer__strategy': ['mean', 'median'],
'%s__C' % estimator_name: [0.1, 0.5, 1.0],
}
grid_clf = GridSearchCV(model, param_grid, cv=10, iid=False)
fitkwargs = dict()
fitkwargs["%s__sample_weight" % estimator_name] = sample_weight
grid_clf.fit(X, y, **fitkwargs)
model = grid_clf.best_estimator_
# self.best_params = grid_clf.best_params_
else:
fitkwargs = dict()
fitkwargs["%s__sample_weight" % estimator_name] = sample_weight
X = X.replace([np.inf, -np.inf], np.nan)
X = X.fillna(value=0)
model.fit(X, y, **fitkwargs)
# get actual LR model
lr_model = model.named_steps[estimator_name]
# average importances over classes
importances = np.average(np.fabs(np.array(lr_model.coef_)), axis=0)
# average iterations over classes (can't take max_iter per class)
iterations = int(np.average(lr_model.n_iter_))
# print("LR: iterations: %d" % iterations)
if self._debug:
full_features_list_copy = copy.deepcopy(full_features_list)
# reduce OHE features to original names
ohe_features_short = []
if self._use_ohe_encoding and any(categorical_features.values):
input_features = [x + self._ohe_postfix for x in cat_X.columns]
ohe_features = pd.Series(
model.named_steps['columntransformer'].named_transformers_['onehotencoder'].get_feature_names(
input_features=input_features))
def f(x):
return '_'.join(x.split(self._ohe_postfix + '_')[:-1])
# identify OHE features
ohe_features_short = ohe_features.apply(lambda x: f(x))
full_features_list.extend(list(ohe_features_short))
if self._debug:
full_features_list_copy.extend(list(ohe_features))
imp = pd.Series(importances, index=full_features_list_copy).sort_values(ascending=False)
import uuid
struuid = str(uuid.uuid4())
imp.to_csv("prepreimp_%s.csv" % struuid)
if self._debug:
imp = pd.Series(importances, index=full_features_list).sort_values(ascending=False)
import uuid
struuid = str(uuid.uuid4())
imp.to_csv("preimp_%s.csv" % struuid)
# aggregate our own features
if self._kaggle_features:
full_features_list = self.features.aggregate(full_features_list, importances)
msg = "LR: num=%d cat=%d : ohe=%d : imp=%d full=%d" % (
len(num_X.columns), len(cat_X.columns), len(ohe_features_short), len(importances), len(full_features_list))
if self._debug:
print(msg)
assert len(importances) == len(full_features_list), msg
if self._debug:
imp = pd.Series(importances, index=full_features_list).sort_values(ascending=False)
import uuid
struuid = str(uuid.uuid4())
imp.to_csv("imp_%s.csv" % struuid)
# aggregate importances by dai feature name
importances = pd.Series(np.abs(importances), index=full_features_list).groupby(level=0).mean()
assert len(importances) == len(
X_orig_cols_names), "lenimp=%d lenorigX=%d msg=%s : X.columns=%s dtypes=%s : full_features_list=%s" % (
len(importances), len(X_orig_cols_names), msg,
str(list(X.columns)), str(list(X.dtypes)), str(full_features_list))
# save hyper parameter searched results for next search
self.params['max_iter'] = iterations
if self.params['cv_search']:
self.params['C'] = np.average(lr_model.C_, axis=0)
if 'l1_ratios' in lr_params and self.params['cv_search']:
self.params['l1_ratio'] = np.average(lr_model.l1_ratio_, axis=0)
if 'fit_count' in self.params:
self.params['fit_count'] += 1
else:
self.params['fit_count'] = 0
importances_list = importances.tolist()
importances_list = list(np.array(importances_list) / np.max(importances_list))
self.set_model_properties(model=(model, self.features),
features=orig_cols,
importances=importances_list,
iterations=iterations)
self.features = None
os.chdir(orig_dir)
def get_param_range(self, param, fit_count, range_limit=None, func_type='linear'):
if func_type == 'log':
f = np.log
inv_f = np.exp
bottom = 1.0
top = 1.0
else:
f = np.abs
inv_f = np.abs
top = bottom = 1.0
# bisect toward optimal param
step_count = 3
params_step = 2 + fit_count
start_range = param * (1.0 - bottom / params_step)
end_range = param * (1.0 + top / params_step)
if range_limit is not None:
if end_range - start_range < range_limit:
# if below some threshold, don't keep refining to avoid overfit
return [param]
start = f(start_range)
end = f(end_range)
step = 1.0 * (end - start) / step_count
param_range = np.arange(start, end, step)
if type(param) == int:
param_range = [int(inv_f(x)) for x in param_range if int(inv_f(x)) > 0]
else:
param_range = [inv_f(x) for x in param_range if inv_f(x) > 0]
if param not in param_range:
param_range.append(param)
param_range = sorted(param_range)
return param_range
def predict(self, X, **kwargs):
orig_dir = os.getcwd()
os.chdir(self.context.experiment_tmp_dir) # for joblib
X = dt.Frame(X)
X = self.oob_imputer.transform(X)
model_tuple, _, _, _ = self.get_model_properties()
model, features = model_tuple
X = X.to_pandas()
if self._kaggle_features and features is not None:
X = features.transform(X)
X = X.replace([np.inf, -np.inf], np.nan)
X = X.fillna(value=0)
if self.num_classes == 1:
preds = model.predict(X)
else:
preds = model.predict_proba(X)
os.chdir(orig_dir)
return preds
class OOBImpute(object):
def __init__(self, impute_num_type, impute_int_type, impute_bool_type, impute_cat_type, oob_bool, oob_cat):
self._impute_num_type = impute_num_type
self._impute_int_type = impute_int_type
self._impute_bool_type = impute_bool_type
self._impute_cat_type = impute_cat_type
self._oob_bool = oob_bool
self._oob_cat = oob_cat
def fit(self, X: dt.Frame):
# just ignore output
self.fit_transform(X)
def fit_transform(self, X: dt.Frame):
# IMPUTE
# print("LR: types number of columns: %d : %d %d %d %d" % (len(X.names), len(X[:, [float]].names), len(X[:, [int]].names), len(X[:, [bool]].names), len(X[:, [str]].names)))
for col in X[:, [float]].names:
XX = X[:, col]
XX.replace(None, np.nan)
X[:, col] = XX
if self._impute_num_type == 'oob':
# Replace missing values with a value smaller than all observed values
self.min = dict()
for col in X[:, [float]].names:
XX = X[:, col]
self.min[col] = XX.min1()
if self.min[col] is None or np.isnan(self.min[col]):
self.min[col] = -1e10
else:
self.min[col] -= 1
XX.replace(None, self.min[col])
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
if self._impute_int_type == 'oob':
# Replace missing values with a value smaller than all observed values
self.min_int = dict()
for col in X[:, [int]].names:
XX = X[:, col]
self.min_int[col] = XX.min1()
if self.min_int[col] is None or np.isnan(self.min_int[col]):
self.min_int[col] = 0
XX.replace(None, self.min_int[col])
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
if self._impute_bool_type == 'oob':
for col in X[:, [bool]].names:
XX = X[:, col]
XX.replace(None, self._oob_bool)
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
if self._impute_cat_type == 'oob':
for col in X[:, [str]].names:
XX = X[:, col]
XX.replace(None, self._oob_cat)
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
return X
def transform(self, X: dt.Frame):
if self._impute_num_type == 'oob':
for col in X[:, [float]].names:
XX = X[:, col]
XX.replace(None, self.min[col])
X[:, col] = XX
if self._impute_int_type == 'oob':
for col in X[:, [int]].names:
XX = X[:, col]
XX.replace(None, self.min_int[col])
X[:, col] = XX
if self._impute_bool_type == 'oob':
for col in X[:, [bool]].names:
XX = X[:, col]
XX.replace(None, self._oob_bool)
X[:, col] = XX
if self._impute_cat_type == 'oob':
for col in X[:, [str]].names:
XX = X[:, col]
XX.replace(None, self._oob_cat)
X[:, col] = XX
return X
class make_features(object):
_postfix = "@%@(&#%@))){}#"
def __init__(self, cache=False):
self.cache = cache
self.dai_te = False
self.other_te = True
self.new_names_dict = {}
self.raw_names_dict = {}
self.raw_names_dict_reversed = {}
self.spring = None
self.summer = None
self.fall = None
self.winter = None
self.monthcycle1 = None
self.monthcycle2 = None
self.weekend = None
self.daycycle1 = None
self.daycycle2 = None
self.lexi = None
self.ord5sorted = None
self.ord5more1 = None
self.ord5more2 = None
def apply_clone(self, src):
for k, v in src.__dict__.items():
setattr(self, k, v)
def fit_transform(self, X: pd.DataFrame, y=None, transform=False, **kwargs):
if not transform:
self.orig_cols = list(X.columns)
if 'IS_LEAKAGE' in kwargs or 'IS_SHIFT' in kwargs:
self.raw_names_dict = {v: v for v in list(X.columns)}
self.raw_names_dict_reversed = {v: k for k, v in self.raw_names_dict.items()}
else:
self.raw_names_dict = {Transformer.raw_feat_name(v): v for v in list(X.columns)}
self.raw_names_dict_reversed = {v: k for k, v in self.raw_names_dict.items()}
file = "munged_%s_%s_%d_%d.csv" % (__name__, transform, X.shape[0], X.shape[1])
file = file.replace("csv", "pkl")
file2 = file.replace("munged", "clone")
if self.cache and os.path.isfile(file) and os.path.isfile(file2):
# X = pd.read_csv(file, sep=',', header=0)
X = load_obj(file)
X = X.drop("target", axis=1, errors='ignore')
if not transform:
self.apply_clone(load_obj(file2))
return X
if 'bin_0' in self.raw_names_dict:
X.drop(self.raw_names_dict['bin_0'], errors='ignore')
if 'bin_3' in self.raw_names_dict:
X.drop(self.raw_names_dict['bin_3'], errors='ignore')
# use circular color wheel position for nom_0
def nom12num(x):
# use number of sides
d = {'Circle': 0, 'Polygon': -1, 'Star': 10, 'Triangle': 3, 'Square': 4, 'Trapezoid': 5}
return d[x]
X, self.sides = self.make_feat(X, 'nom_1', 'sides', nom12num)
def nom22num(x):
# use family level features expanded encoding or relative size for nom_2
# ordered by height
d = {'Snake': 0, 'Axolotl': 1, 'Hamster': 2, 'Cat': 3, 'Dog': 4, 'Lion': 5}
return d[x]
X, self.animal = self.make_feat(X, 'nom_2', 'animal', nom22num)
# def has_char(x, char):
# x_str = str(x)
# return 1 if char.upper() in x_str.upper() else 0
# self.haschars = [None] * len(self.orig_cols)
# for ni, c in enumerate(self.orig_cols):
# X, self.lenfeats[ni] = self.make_feat(X, c, 'len', get_len)
def get_len(x):
x_str = str(x)
return len(x_str)
self.lenfeats = [None] * len(self.orig_cols)
for ni, c in enumerate(self.orig_cols):
X, self.lenfeats[ni] = self.make_feat(X, c, 'len', get_len)
#
def get_first(x):
x_str = str(x)
return x_str[0] if len(x_str) > 0 else ""
self.firstchar = [None] * len(self.orig_cols)
for ni, c in enumerate(self.orig_cols):
X, self.firstchar[ni] = self.make_feat(X, c, 'firstc', get_first, is_float=False)
#
def get_last(x):
x_str = str(x)
return x_str[-1] if len(x_str) > 0 else ""
self.lastchar = [None] * len(self.orig_cols)
for ni, c in enumerate(self.orig_cols):
X, self.lastchar[ni] = self.make_feat(X, c, 'lastc', get_last, is_float=False)
#
hex_strings = ['nom_5', 'nom_6', 'nom_7', 'nom_8', 'nom_9']
#
if True:
# convert hex to binary and use as 8-feature (per hex feature) encoding
def get_charnum(x, i=None):
return str(x)[i]
width = 9
self.hexchar = [None] * len(hex_strings) * width
for ni, c in enumerate(hex_strings):
for nii in range(0, width):
X, self.hexchar[ni * width + nii] = self.make_feat(X, c, 'hexchar%d' % nii, get_charnum,
is_float=False, i=nii)
#
def hex_to_int(x):
x_int = int(eval('0x' + str(x)))
return x_int
self.hexints = [None] * len(hex_strings)
for ni, c in enumerate(hex_strings):
X, self.hexints[ni] = self.make_feat(X, c, 'hex2int', hex_to_int)
#
if False: # ValueError: could not convert string to float: b'\x05\x0f\x11k\xcf'
def hex_to_string(x):
try:
x_str = codecs.decode('0' + x, 'hex')
except:
x_str = codecs.decode(x, 'hex')
return x_str
self.hexstr = [None] * len(hex_strings)
for ni, c in enumerate(hex_strings):
X, self.hexstr[ni] = self.make_feat(X, c, 'hex2str', hex_to_string, is_float=False)
def bin012a(x):
return bool(x[0]) & bool(x[1]) & bool(x[2])
X, self.bin012a = self.make_feat(X, ['bin_0', 'bin_1', 'bin_2'], 'bin012a', bin012a)
def bin012b(x):
return (bool(x[0]) ^ bool(x[1])) ^ bool(x[2])
X, self.bin012b = self.make_feat(X, ['bin_0', 'bin_1', 'bin_2'], 'bin012b', bin012b)
def bin012c(x):
return bool(x[0]) ^ (bool(x[1]) ^ bool(x[2]))
X, self.bin012c = self.make_feat(X, ['bin_0', 'bin_1', 'bin_2'], 'bin012c', bin012c)
# TODO: manual OHE fixed width for out of 16 digits always (not sure all rows lead to all values)
# one-hot encode text by each character
# use geo-location for nom_3
# use static mapping encoding for ord_2 and ord_1
def ord12num1(x):
# ordered label
d = {'Novice': 0, 'Contributor': 1, 'Expert': 2, 'Master': 3, 'Grandmaster': 4}
return d[x]
X, self.kaggle1 = self.make_feat(X, 'ord_1', 'kaggle1', ord12num1)
def ord12num2(x):
# medals total
d = {'Novice': 0, 'Contributor': 0, 'Expert': 2, 'Master': 3, 'Grandmaster': 6}
return d[x]
X, self.kaggle2 = self.make_feat(X, 'ord_1', 'kaggle2', ord12num2)
def ord1master(x):
return 1 if 'master' in x or 'Master' in x else 0
X, self.kaggle3 = self.make_feat(X, 'ord_1', 'kaggle3', ord1master)
def ord22num(x):
# ordered label
d = {'Freezing': 0, 'Cold': 1, 'Warm': 2, 'Hot': 3, 'Boiling Hot': 4, 'Lava Hot': 5}
return d[x]
X, self.temp1 = self.make_feat(X, 'ord_2', 'temp1', ord22num)
def ord22num2(x):
# temp in F
d = {'Freezing': 32, 'Cold': 50, 'Warm': 80, 'Hot': 100, 'Boiling Hot': 212, 'Lava Hot': 1700}
return d[x]
X, self.temp2 = self.make_feat(X, 'ord_2', 'temp2', ord22num2)
def ord2hot(x):
return 1 if 'hot' in x or 'Hot' in x else 0
X, self.temp4 = self.make_feat(X, 'ord_2', 'temp4', ord2hot)
# lower ord_5
def ord5more0(x):
return x.lower()
X, self.ord5more0 = self.make_feat(X, 'ord_5', 'more0', ord5more0, is_float=False)
# 1st char, keep for OHE
def ord5more1(x):
return x[0]
X, self.ord5more1 = self.make_feat(X, 'ord_5', 'more1', ord5more1, is_float=False)
# 2nd char, keep for OHE
def ord5more2(x):
return x[1]
X, self.ord5more2 = self.make_feat(X, 'ord_5', 'more2', ord5more2, is_float=False)
# 1st char, keep for OHE
def ord5more3(x):
return x[0].lower()
X, self.ord5more3 = self.make_feat(X, 'ord_5', 'more3', ord5more3, is_float=False)
# 2nd char, keep for OHE
def ord5more4(x):
return x[1].lower()
X, self.ord5more4 = self.make_feat(X, 'ord_5', 'more4', ord5more4, is_float=False)
# 1st word, keep for OHE
def ord2more1(x):
return x.split(" ")[0]
X, self.ord2more1 = self.make_feat(X, 'ord_2', 'more1', ord2more1, is_float=False)
# 2nd word, keep for OHE
def ord2more2(x):
a = x.split(" ")
if len(a) > 1:
return a[1]
else:
return a[0]
X, self.ord2more2 = self.make_feat(X, 'ord_2', 'more2', ord2more2, is_float=False)
# use lexi LE directly as integers for alphabetical (ord_5, ord_4, ord_3)
orig_feat_names = ['ord_5', 'ord_4', 'ord_3',
'nom_0', 'nom_1', 'nom_2',
'nom_3', 'nom_4', 'nom_5',
'nom_6', 'nom_7', 'nom_8',
'nom_9', 'ord_1', 'ord_2']
orig_feat_names = [self.raw_names_dict_reversed[x] for x in
list(self.orig_cols)] # try just encoding all columns
new_names = ['lexi%d' % x for x in range(len(orig_feat_names))]
if not transform:
self.lexi = [None] * len(orig_feat_names)
self.lexi_names = [None] * len(orig_feat_names)
for ni, (new_name, orig_feat_name) in enumerate(zip(new_names, orig_feat_names)):
if orig_feat_name in self.raw_names_dict and self.raw_names_dict[orig_feat_name] in X.columns:
dai_feat_name = self.raw_names_dict[orig_feat_name]
if transform:
Xnew = self.lexi[ni].transform(X[[dai_feat_name]])
else:
self.lexi[ni] = LexiLabelEncoderTransformer([dai_feat_name])
Xnew = self.lexi[ni].fit_transform(X[[dai_feat_name]])
extra_name = self._postfix + new_name
new_feat_name = dai_feat_name + extra_name
Xnew.columns = [new_feat_name]
assert not any(pd.isnull(Xnew).values.ravel())
X = pd.concat([X, Xnew], axis=1)
self.new_names_dict[new_feat_name] = [dai_feat_name]
self.lexi_names[ni] = new_feat_name
if False: # already done by lexi encoding
# sorted label encoding of ord_5, use for numeric
orig_feat_name = 'ord_5'
new_name = 'ord5sorted'
if orig_feat_name in self.raw_names_dict and self.raw_names_dict[orig_feat_name] in X.columns:
dai_feat_name = self.raw_names_dict[orig_feat_name]
extra_name = self._postfix + new_name
new_feat_name = dai_feat_name + extra_name
if not transform:
self.ord_5_sorted = sorted(list(set(X[dai_feat_name].values)))
self.ord_5_sorted = dict(zip(self.ord_5_sorted, range(len(self.ord_5_sorted))))
X.loc[:, new_feat_name] = X[dai_feat_name].apply(
lambda x: self.ord_5_sorted[x] if x in self.ord_5_sorted else -1).astype(np.float32)
self.new_names_dict[new_feat_name] = [dai_feat_name]
self.ord5sorted = new_feat_name
# frequency encode everything
# keep as cat for OHE
if not transform:
self.freq = [None] * len(self.orig_cols)
self.freq_names = [None] * len(self.orig_cols)
for ni, c in enumerate(list(self.orig_cols)):
new_name = "freq%d" % ni
dai_feat_name = c
if transform:
Xnew = self.freq[ni].transform(X[[dai_feat_name]].astype(str)).to_pandas()
else:
self.freq[ni] = FrequentTransformer([dai_feat_name])
Xnew = self.freq[ni].fit_transform(X[[dai_feat_name]].astype(str)).to_pandas()
extra_name = self._postfix + new_name
new_feat_name = dai_feat_name + extra_name
Xnew.columns = [new_feat_name]
assert not any(pd.isnull(Xnew).values.ravel())
X = pd.concat([X, Xnew], axis=1)
self.new_names_dict[new_feat_name] = [dai_feat_name]
self.freq_names[ni] = new_feat_name
if self.dai_te:
# target encode everything
# use as numeric and categorical
if not transform:
self.te = [None] * len(self.orig_cols)
self.te_names = [None] * len(self.orig_cols)
for ni, c in enumerate(list(self.orig_cols)):
new_name = "te%d" % ni
dai_feat_name = c
if transform:
Xnew = self.te[ni].transform(X[[dai_feat_name]].astype(str), y).to_pandas()
else:
self.te[ni] = CVTargetEncodeTransformer([dai_feat_name])
Xnew = self.te[ni].fit_transform(X[[dai_feat_name]].astype(str), y).to_pandas()
extra_name = self._postfix + new_name
new_feat_name = dai_feat_name + extra_name
Xnew.columns = [new_feat_name]
assert not any(
|
pd.isnull(Xnew)
|
pandas.isnull
|
"""Class definition for LazyDatarameLoader."""
import re
from hashlib import md5
from pathlib import Path
from string import ascii_letters, digits
from typing import List, Optional, Pattern, Union
import pandas as pd
class LazyDataFrameLoader:
"""
Loads a dataframe lazily when called.
This avoids the bottleneck of loading dataframes during model training
and feature engineering, by parallelizing the loading when
RawDataSetFeaturizerViaLambda.__slow_featurize is called with
multiprocessing.
Attributes:
df {Optional[pd.DataFrame]}
-- A raw dataframe. (default: {None})
csv_path {Optional[Union[str, Path]]}
-- Path to a dataframe (default: {None})
Methods:
__call__ -- Loads a dataframe
"""
def __init__(
self,
*,
df: Optional[pd.DataFrame] = None,
csv_path: Optional[Union[str, Path]] = None,
remove_id_substring: Optional[bool] = False,
):
"""Init function.
Only `df` or `csv_path` can be specified.
Keyword Arguments:
df {Optional[pd.DataFrame]}
-- A raw dataframe. (default: {None})
csv_path {Optional[Union[str, Path]]}
-- Path to a dataframe (default: {None})
remove_id_substring {Optional[bool]}
-- If True, replaces 'id'-like substrings in column names
of dataframes (default: False)
pattern {Pattern}
-- Compiled regex pattern
Raises:
ValueError: When both `df` and `csv_path` are specified
TypeError: When `df` is not a pd.DataFrame
TypeError: When `csv_path` is not a str or a Path
ValueError: When `csv_path` is not a csv file
"""
# Ensure only one of two optional keyword arguments is provided
if (df is not None) == (csv_path is not None):
raise ValueError("Only one of `df` or `csv_path` can be provided.")
if (df is not None) and not isinstance(df, pd.DataFrame):
raise TypeError(
"Expecting `df` of type pd.DataFrame. " f"Got type {type(df)}."
)
if csv_path is not None:
if not (isinstance(csv_path, str) or isinstance(csv_path, Path)):
raise TypeError(
"Expecting `csv_path` of type str or Path. "
f"Got type {type(csv_path)}."
)
if str(csv_path)[-3:].lower() != "csv":
raise ValueError("A CSV file is expected for `csv_path`.")
# Either one of this will have a non-None value
self.df = df
self.csv_path = csv_path
self.remove_id_substring = remove_id_substring
self.pattern = re.compile("_?[iI][dD]")
def __call__(self) -> pd.DataFrame:
"""Loads the provided (path to a) dataframe."""
if self.df is not None:
result = self.df
else:
result = pd.read_csv(
self.csv_path, encoding="latin", low_memory=False
)
# Optionally clean 'id'-like substrings from column names
if self.remove_id_substring:
result.columns = self._replace_id_in_col_name(result)
return result
def _replace_id_in_col_name(self, columns: List[str]) -> List[str]:
sub = HashSubstituter()
replacement = [
re.sub(self.pattern, sub.substitute(col), col) for col in columns
]
# Hard fails if there is a collision post-ID-substitution
if
|
pd.Series(replacement)
|
pandas.Series
|
'''
* @author Waileinsamkeit
* @email <EMAIL>
* @create date 2020-08-07 15:51:58
* @modify date 2020-08-07 15:51:58
'''
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
from utils import flatten_lists
class Eval_unit(object):
def __init__(self,tp,fp,fn,tn,label):
super(Eval_unit,self).__init__()
self.id=label
self.d={'tp':tp,'fp':fp,'fn':fn,'tn':tn}
self.accuracy=self.cal_accuracy(tp,fp,fn,tn)
self.precision=self.cal_precision(tp,fp,fn,tn)
self.recall=self.cal_recall(tp,fp,fn,tn)
self.f1_score=self.cal_f1_score(tp,fp,fn,tn)
def __getattr__(self,name):
return self[name] if name in self.__dict__ else self.d[name]
def todict(self):
return {"acc":self.accuracy,"prec":self.precision,"recall":self.recall,"f1_score":self.f1_score}
@classmethod
def cal_accuracy(cls,tp:int,fp:int,fn:int,tn:int)->float:
return float(tp+tn)/(tp+tn+fp+fn)
@classmethod
def cal_precision(cls,tp:int,fp:int,fn:int,tn:int)->float:
return float(tp)/(tp+fp) if tp+fp!=0 else 0.
@classmethod
def cal_recall(cls,tp:int,fp:int,fn:int,tn:int)->float:
return float(tp)/(tp+fn) if tp+fn!=0 else 0.
@classmethod
def cal_f1_score(cls,tp:int,fp:int,fn:int,tn:int)->float:
p=cls.cal_precision(tp,fp,fn,tn)
r=cls.cal_recall(tp,fp,fn,tn)
return 2*p*r/(r+p) if r+p !=0 else 0.
def evaluate_single_label(pred,label,classes):
pred=flatten_lists(pred)
label=flatten_lists(label)
matrix=confusion_matrix(pred,label,classes)
TP=np.diag(matrix)
FP=matrix.sum(axis=1)-TP
FN=matrix.sum(axis=0)-TP
TN=matrix.sum()-TP-FN-FP
unit_list=[]
for i in range(len(classes)):
cla=classes[i]
unit=Eval_unit(TP[i],FP[i],FN[i],TN[i],cla)
unit_list.append(unit)
return unit_list
"""
B-LOC
I-LOC
B-ORG
I-ORG
B-PER
I-PER
O
"""
def evaluate_entity_label(pred,label,classes):
pred=flatten_lists(pred)
label=flatten_lists(label)
assert len(pred)==len(label)
cla=[i.split('-')[-1] for i in classes if i!='O']
cla=list(set(cla))
cla2ind={}
cla2ind=dict((c,ind) for ind,c in enumerate(cla))
index=0;
pred_entities=np.zeros(len(cla),dtype=int) #TP+FP
label_entities=np.zeros(len(cla),dtype=int) #TP+FN
acc=np.zeros(len(cla),dtype=int) #TP
while index<len(label):
label_tag=label[index]
if label_tag=='O':
index+=1
else:
c=label_tag.split('-')[-1]
c=cla2ind[c]
next_tag='I'+label_tag[1:]
j=index+1
while label[j]==next_tag and j<len(label):
j+=1
label_entities[c]+=1
label_entity= ''.join(label[index:j])
pred_entity=''.join(pred[index:j])
if label_entity==pred_entity:
acc[c]+=1
index=j
#统计Pred_tag 上的Entity
index=0
while index<len(pred):
pred_tag=pred[index]
if pred_tag=='O':
index+=1
elif pred_tag.split('-')[0]=='B':
c=pred_tag.split('-')[-1]
c=cla2ind[c]
next_tag='I'+pred_tag[1:]
j=index+1
while pred[j]==next_tag and j<len(pred):
j+=1
pred_entities[c]+=1
index=j
else:
index+=1
# if index%100==0:
# print(index,end=' ')
units=[]
TP=acc
FP=pred_entities-acc
FN=label_entities-acc
TN=acc.sum()-acc
for c,ind in cla2ind.items():
units.append(Eval_unit(TP[ind],FP[ind],FN[ind],TN[ind],c))
return units
def evaluate_multiclass(units:list,type:str):
assert type in ['macro','micro']
if type=='macro':
P=float(sum([unit.precision for unit in units]))/len(units)
R=float(sum([unit.recall for unit in units]))/len(units)
else:
tp=float(sum([unit.tp for unit in units]))/len(units)
fp=float(sum([unit.fp for unit in units]))/len(units)
fn=float(sum([unit.fn for unit in units]))/len(units)
P=tp/(tp+fp)
R=tp/(tp+fn)
f1=2*P*R/(P+R)
return {"prec":P,"recall":R,"f1_score":f1}
'''
将Eval_unit 的list 转化成 pandas 中的 DataFrame
'''
def unitstopd(units):
d={}
macro=evaluate_multiclass(units,"macro")
micro=evaluate_multiclass(units,"micro")
d=dict((unit.id,unit.todict()) for unit in units)
d["macro"]=macro
d["micro"]=micro
df=
|
pd.DataFrame(d)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_isna_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(self.index)
def test_level_setting_resets_attributes(self):
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic
assert not Index(i.values).is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],
['mom', 'next', 'zenith']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237',
'nl0000289783',
'nl0000289965', 'nl0000301109']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
def test_is_monotonic_decreasing(self):
i = MultiIndex.from_product([np.arange(9, -1, -1),
np.arange(9, -1, -1)],
names=['one', 'two'])
assert i.is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
# string ordering
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['three', 'two', 'one']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['zenith', 'next', 'mom']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965',
'nl0000289783', 'lu0197800237',
'gb00b03mlx29']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
def test_is_strictly_monotonic_increasing(self):
idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_increasing
assert not idx._is_strictly_monotonic_increasing
def test_is_strictly_monotonic_decreasing(self):
idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
def test_reconstruct_sort(self):
# starts off lexsorted & monotonic
mi = MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert mi.is_lexsorted()
assert mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert recons.is_lexsorted()
assert recons.is_monotonic
assert mi is recons
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'),
('x', 'b'), ('y', 'a'), ('z', 'b')],
names=['one', 'two'])
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
labels=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=['col1', 'col2'])
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
def test_reconstruct_remove_unused(self):
# xref to GH 2770
df = DataFrame([['deleteMe', 1, 9],
['keepMe', 2, 9],
['keepMeToo', 3, 9]],
columns=['first', 'second', 'third'])
df2 = df.set_index(['first', 'second'], drop=False)
df2 = df2[df2['first'] != 'deleteMe']
# removed levels are there
expected = MultiIndex(levels=[['deleteMe', 'keepMe', 'keepMeToo'],
[1, 2, 3]],
labels=[[1, 2], [1, 2]],
names=['first', 'second'])
result = df2.index
tm.assert_index_equal(result, expected)
expected = MultiIndex(levels=[['keepMe', 'keepMeToo'],
[2, 3]],
labels=[[0, 1], [0, 1]],
names=['first', 'second'])
result = df2.index.remove_unused_levels()
tm.assert_index_equal(result, expected)
# idempotent
result2 = result.remove_unused_levels()
tm.assert_index_equal(result2, expected)
assert result2.is_(result)
@pytest.mark.parametrize('level0', [['a', 'd', 'b'],
['a', 'd', 'b', 'unused']])
@pytest.mark.parametrize('level1', [['w', 'x', 'y', 'z'],
['w', 'x', 'y', 'z', 'unused']])
def test_remove_unused_nan(self, level0, level1):
# GH 18417
mi = pd.MultiIndex(levels=[level0, level1],
labels=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]])
result = mi.remove_unused_levels()
tm.assert_index_equal(result, mi)
for level in 0, 1:
assert('unused' not in result.levels[level])
@pytest.mark.parametrize('first_type,second_type', [
('int64', 'int64'),
('datetime64[D]', 'str')])
def test_remove_unused_levels_large(self, first_type, second_type):
# GH16556
# because tests should be deterministic (and this test in particular
# checks that levels are removed, which is not the case for every
# random input):
rng = np.random.RandomState(4) # seed is arbitrary value that works
size = 1 << 16
df = DataFrame(dict(
first=rng.randint(0, 1 << 13, size).astype(first_type),
second=rng.randint(0, 1 << 10, size).astype(second_type),
third=rng.rand(size)))
df = df.groupby(['first', 'second']).sum()
df = df[df.third < 0.1]
result = df.index.remove_unused_levels()
assert len(result.levels[0]) < len(df.index.levels[0])
assert len(result.levels[1]) < len(df.index.levels[1])
assert result.equals(df.index)
expected = df.reset_index().set_index(['first', 'second']).index
tm.assert_index_equal(result, expected)
def test_isin(self):
values = [('foo', 2), ('bar', 3), ('quux', 4)]
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = MultiIndex.from_arrays([[], []])
result = idx.isin(values)
assert len(result) == 0
assert result.dtype == np.bool_
@pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_not_pypy(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, False]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, False]))
@pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_pypy(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, True]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, True]))
def test_isin_level_kwarg(self):
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
vals_0 = ['foo', 'bar', 'quux']
vals_1 = [2, 3, 10]
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))
pytest.raises(IndexError, idx.isin, vals_0, level=5)
pytest.raises(IndexError, idx.isin, vals_0, level=-5)
pytest.raises(KeyError, idx.isin, vals_0, level=1.0)
pytest.raises(KeyError, idx.isin, vals_1, level=-1.0)
pytest.raises(KeyError, idx.isin, vals_1, level='A')
idx.names = ['A', 'B']
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A'))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B'))
pytest.raises(KeyError, idx.isin, vals_1, level='C')
def test_reindex_preserves_names_when_target_is_list_or_ndarray(self):
# GH6552
idx = self.index.copy()
target = idx.copy()
idx.names = target.names = [None, None]
other_dtype = pd.MultiIndex.from_product([[1, 2], [3, 4]])
# list & ndarray cases
assert idx.reindex([])[0].names == [None, None]
assert idx.reindex(np.array([]))[0].names == [None, None]
assert idx.reindex(target.tolist())[0].names == [None, None]
assert idx.reindex(target.values)[0].names == [None, None]
assert idx.reindex(other_dtype.tolist())[0].names == [None, None]
assert idx.reindex(other_dtype.values)[0].names == [None, None]
idx.names = ['foo', 'bar']
assert idx.reindex([])[0].names == ['foo', 'bar']
assert idx.reindex(np.array([]))[0].names == ['foo', 'bar']
assert idx.reindex(target.tolist())[0].names == ['foo', 'bar']
assert idx.reindex(target.values)[0].names == ['foo', 'bar']
assert idx.reindex(other_dtype.tolist())[0].names == ['foo', 'bar']
assert idx.reindex(other_dtype.values)[0].names == ['foo', 'bar']
def test_reindex_lvl_preserves_names_when_target_is_list_or_array(self):
# GH7774
idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']],
names=['foo', 'bar'])
assert idx.reindex([], level=0)[0].names == ['foo', 'bar']
assert idx.reindex([], level=1)[0].names == ['foo', 'bar']
def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']])
assert idx.reindex([], level=0)[0].levels[0].dtype.type == np.int64
assert idx.reindex([], level=1)[0].levels[1].dtype.type == np.object_
def test_groupby(self):
groups = self.index.groupby(np.array([1, 1, 1, 2, 2, 2]))
labels = self.index.get_values().tolist()
exp = {1: labels[:3], 2: labels[3:]}
tm.assert_dict_equal(groups, exp)
# GH5620
groups = self.index.groupby(self.index)
exp = {key: [key] for key in self.index}
tm.assert_dict_equal(groups, exp)
def test_index_name_retained(self):
# GH9857
result = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]})
result = result.set_index('z')
result.loc[10] = [9, 10]
df_expected = pd.DataFrame({'x': [1, 2, 6, 9],
'y': [2, 2, 8, 10],
'z': [-5, 0, 5, 10]})
df_expected = df_expected.set_index('z')
tm.assert_frame_equal(result, df_expected)
def test_equals_operator(self):
# GH9785
assert (self.index == self.index).all()
def test_large_multiindex_error(self):
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]),
columns=['dest'])
with pytest.raises(KeyError):
df_below_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_below_1000000.loc[(3, 0), 'dest']
df_above_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]),
columns=['dest'])
with pytest.raises(KeyError):
df_above_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_above_1000000.loc[(3, 0), 'dest']
def test_partial_string_timestamp_multiindex(self):
# GH10331
dr = pd.date_range('2016-01-01', '2016-01-03', freq='12H')
abc = ['a', 'b', 'c']
ix = pd.MultiIndex.from_product([dr, abc])
df = pd.DataFrame({'c1': range(0, 15)}, index=ix)
idx = pd.IndexSlice
# c1
# 2016-01-01 00:00:00 a 0
# b 1
# c 2
# 2016-01-01 12:00:00 a 3
# b 4
# c 5
# 2016-01-02 00:00:00 a 6
# b 7
# c 8
# 2016-01-02 12:00:00 a 9
# b 10
# c 11
# 2016-01-03 00:00:00 a 12
# b 13
# c 14
# partial string matching on a single index
for df_swap in (df.swaplevel(),
df.swaplevel(0),
df.swaplevel(0, 1)):
df_swap = df_swap.sort_index()
just_a = df_swap.loc['a']
result = just_a.loc['2016-01-01']
expected = df.loc[idx[:, 'a'], :].iloc[0:2]
expected.index = expected.index.droplevel(1)
tm.assert_frame_equal(result, expected)
# indexing with IndexSlice
result = df.loc[idx['2016-01-01':'2016-02-01', :], :]
expected = df
tm.assert_frame_equal(result, expected)
# match on secondary index
result = df_swap.loc[idx[:, '2016-01-01':'2016-01-01'], :]
expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# Even though this syntax works on a single index, this is somewhat
# ambiguous and we don't want to extend this behavior forward to work
# in multi-indexes. This would amount to selecting a scalar from a
# column.
with pytest.raises(KeyError):
df['2016-01-01']
# partial string match on year only
result = df.loc['2016']
expected = df
tm.assert_frame_equal(result, expected)
# partial string match on date
result = df.loc['2016-01-01']
expected = df.iloc[0:6]
tm.assert_frame_equal(result, expected)
# partial string match on date and hour, from middle
result = df.loc['2016-01-02 12']
expected = df.iloc[9:12]
tm.assert_frame_equal(result, expected)
# partial string match on secondary index
result = df_swap.loc[idx[:, '2016-01-02'], :]
expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]]
tm.assert_frame_equal(result, expected)
# tuple selector with partial string match on date
result = df.loc[('2016-01-01', 'a'), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# Slicing date on first level should break (of course)
with pytest.raises(KeyError):
df_swap.loc['2016-01-01']
# GH12685 (partial string with daily resolution or below)
dr = date_range('2013-01-01', periods=100, freq='D')
ix = MultiIndex.from_product([dr, ['a', 'b']])
df = DataFrame(np.random.randn(200, 1), columns=['A'], index=ix)
result = df.loc[idx['2013-03':'2013-03', :], :]
expected = df.iloc[118:180]
tm.assert_frame_equal(result, expected)
def test_rangeindex_fallback_coercion_bug(self):
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({'foo': foo.stack(), 'bar': bar.stack()}, axis=1)
df.index.names = ['fizz', 'buzz']
str(df)
expected = pd.DataFrame({'bar': np.arange(100),
'foo': np.arange(100)},
index=pd.MultiIndex.from_product(
[range(10), range(10)],
names=['fizz', 'buzz']))
tm.assert_frame_equal(df, expected, check_like=True)
result = df.index.get_level_values('fizz')
expected = pd.Int64Index(np.arange(10), name='fizz').repeat(10)
|
tm.assert_index_equal(result, expected)
|
pandas.util.testing.assert_index_equal
|
# -*- coding: utf-8 -*-
"""Util functions to make data processing and feature engineering"""
import pandas as pd
import pickle
import sys
from satlomasproc import __version__
from sklearn.preprocessing import MinMaxScaler
__author__ = "<NAME>"
__copyright__ = "Dymaxion Labs"
__license__ = "apache-2.0"
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""
Convert series of data to supervised squence learning
"""
n_vars = 1 if type(data) is list else data.shape[1]
df = data
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += ["{}_t-{}".format(var_name, i) for var_name in df.columns]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += ["{}_t".format(var_name) for var_name in df.columns]
else:
names += ["{}_t+{}".format(var_name, i) for var_name in df.columns]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
def get_interest_variable(
in_dataset, sensor_var, date_col, hr_col, numeric_var, target_sensor="A620"
):
"""
Extract var to predict on from dataset
"""
dataset_pproc = in_dataset.loc[
in_dataset[sensor_var] == target_sensor, [date_col, hr_col] + [numeric_var]
]
hrs_str = dataset_pproc[hr_col].to_string()
dates_str = dataset_pproc[date_col]
dataset_pproc[date_col] =
|
pd.to_datetime(dataset_pproc[date_col])
|
pandas.to_datetime
|
"""
opencadd.databases.klifs.remote
Defines a remote KLIFS session.
"""
import logging
from copy import deepcopy
from pathlib import Path
import pandas as pd
from bravado.client import SwaggerClient
from .core import (
KinasesProvider,
LigandsProvider,
StructuresProvider,
BioactivitiesProvider,
InteractionsProvider,
PocketsProvider,
CoordinatesProvider,
)
from .schema import REMOTE_COLUMNS_MAPPING, DATAFRAME_COLUMNS
from .utils import metadata_to_filepath, silence_logging
from opencadd.io import DataFrame, Rdkit
_logger = logging.getLogger(__name__)
class SerializableSwaggerClient(SwaggerClient):
# Since they are using __attributes to mangle the namespace
# we need to hardcode the parent class name in saved attributes
# (only also_return_response in this case)
# Sorry about the hackiness :)
def __setstate__(self, state):
self._SwaggerClient__also_return_response = state["also_return_response"]
self.swagger_spec = state["swagger_spec"]
def __getstate__(self, *args):
return {
"also_return_response": deepcopy(self._SwaggerClient__also_return_response),
"swagger_spec": deepcopy(self.swagger_spec),
}
class RemoteInitializer:
"""
Base class used to define __init__ for all remote classes.
Attributes
----------
_client : bravado.client.SwaggerClient
KLIFS client.
"""
def __init__(self, client, *args, **kwargs):
self._client = client
class Kinases(RemoteInitializer, KinasesProvider):
"""
Extends KinasesProvider to provide remote kinases requests.
Refer to KinasesProvider documentation for more information:
opencadd.databases.klifs.core.KinasesProvider
"""
def all_kinase_groups(self):
# Use KLIFS API
result = self._client.Information.get_kinase_groups().response().result
# Convert list to DataFrame (1 column)
column = DATAFRAME_COLUMNS["kinase_groups"][0]
kinase_groups = pd.DataFrame({column[0]: pd.Series(result, dtype=column[1])})
return kinase_groups
def all_kinase_families(self, group=None):
# Use KLIFS API
result = self._client.Information.get_kinase_families(kinase_group=group).response().result
# Convert list to DataFrame (1 column)
column = DATAFRAME_COLUMNS["kinase_families"][0]
kinase_families = pd.DataFrame({column[0]: pd.Series(result, dtype=column[1])})
return kinase_families
def all_kinases(self, group=None, family=None, species=None):
# Use KLIFS API
result = (
self._client.Information.get_kinase_names(
kinase_group=group, kinase_family=family, species=species
)
.response()
.result
)
# Convert list of ABC objects to DataFrame
kinases = self._abc_to_dataframe(result)
# Standardize DataFrame
kinases = self._standardize_dataframe(
kinases, DATAFRAME_COLUMNS["kinases_all"], REMOTE_COLUMNS_MAPPING["kinases_all"]
)
return kinases
def by_kinase_klifs_id(self, kinase_klifs_ids):
kinase_klifs_ids = self._ensure_list(kinase_klifs_ids)
# Use KLIFS API
result = (
self._client.Information.get_kinase_information(kinase_ID=kinase_klifs_ids)
.response()
.result
)
# Convert list of ABC objects to DataFrame
kinases = self._abc_to_dataframe(result)
# Standardize DataFrame
kinases = self._standardize_dataframe(
kinases, DATAFRAME_COLUMNS["kinases"], REMOTE_COLUMNS_MAPPING["kinases"]
)
return kinases
def by_kinase_name(self, kinase_names, species=None):
kinase_names = self._ensure_list(kinase_names)
# Use KLIFS API (send requests iteratively)
kinases = self._multiple_remote_requests(self._by_kinase_name, kinase_names, species)
# Standardize DataFrame
kinases = self._standardize_dataframe(
kinases, DATAFRAME_COLUMNS["kinases"], REMOTE_COLUMNS_MAPPING["kinases"]
)
return kinases
def _by_kinase_name(self, kinase_name, species=None):
"""
Get kinases by kinase name.
Parameters
----------
kinase_name : str
Kinase name.
Returns
-------
pandas.DataFrame or None
Kinases (rows) with columns as described in the class docstring.
"""
# Use KLIFS API
result = (
self._client.Information.get_kinase_ID(kinase_name=kinase_name, species=species)
.response()
.result
)
# Convert list of ABC objects to DataFrame
kinases = self._abc_to_dataframe(result)
# Standardize DataFrame
kinases = self._standardize_dataframe(
kinases, DATAFRAME_COLUMNS["kinases"], REMOTE_COLUMNS_MAPPING["kinases"]
)
return kinases
class Ligands(RemoteInitializer, LigandsProvider):
"""
Extends LigandsProvider to provide remote ligands requests.
Refer to LigandsProvider documentation for more information:
opencadd.databases.klifs.core.LigandsProvider
"""
def all_ligands(self):
# Use KLIFS API: Get all kinase KLIFS IDs
kinases_remote = Kinases(self._client)
kinases = kinases_remote.all_kinases()
# Use KLIFS API: Get ligands
kinase_klifs_ids = kinases["kinase.klifs_id"].to_list()
result = (
self._client.Ligands.get_ligands_list(kinase_ID=kinase_klifs_ids).response().result
)
# Convert list of ABC objects to DataFrame
ligands = self._abc_to_dataframe(result)
# Standardize DataFrame
ligands = self._standardize_dataframe(
ligands, DATAFRAME_COLUMNS["ligands"], REMOTE_COLUMNS_MAPPING["ligands"]
)
return ligands
def by_kinase_klifs_id(self, kinase_klifs_ids):
# Use KLIFS API (send requests iteratively)
ligands = self._multiple_remote_requests(self._by_kinase_klifs_id, kinase_klifs_ids)
# Standardize DataFrame
ligands = self._standardize_dataframe(
ligands,
DATAFRAME_COLUMNS["ligands"] + [("kinase.klifs_id (query)", "int32")],
REMOTE_COLUMNS_MAPPING["ligands"],
)
return ligands
def _by_kinase_klifs_id(self, kinase_klifs_id):
"""
Get ligands by kinase KLIFS ID.
Parameters
----------
kinase_klifs_id : int
Kinase KLIFS ID.
Returns
-------
pandas.DataFrame
Ligands (rows) with columns as described in the class docstring.
"""
# Use KLIFS API
result = (
self._client.Ligands.get_ligands_list(kinase_ID=[kinase_klifs_id]).response().result
)
# Convert list of ABC objects to DataFrame
ligands = self._abc_to_dataframe(result)
# Standardize DataFrame
ligands = self._standardize_dataframe(
ligands, DATAFRAME_COLUMNS["ligands"], REMOTE_COLUMNS_MAPPING["ligands"]
)
# Rename column to indicate query key
ligands["kinase.klifs_id (query)"] = kinase_klifs_id
return ligands
def by_kinase_name(self, kinase_names):
kinase_names = self._ensure_list(kinase_names)
# Use KLIFS API: Get kinase KLIFS IDs for input kinase names (remotely)
# Note: One kinase name can be linked to multiple kinase KLIFS IDs (due to multiple species)
_logger.info(f"Fetch kinase KLIFS IDs for input kinase names...")
kinases_remote = Kinases(self._client)
kinases = kinases_remote.by_kinase_name(kinase_names)
# Select and rename columns to indicate columns involved in query
kinases = kinases[
["kinase.klifs_id", "kinase.klifs_name", "kinase.hgnc_name", "species.klifs"]
]
kinases.rename(
columns={
"kinase.klifs_id": "kinase.klifs_id (query)",
"kinase.klifs_name": "kinase.klifs_name (query)",
"kinase.hgnc_name": "kinase.hgnc_name (query)",
"species.klifs": "species.klifs (query)",
},
inplace=True,
)
# Use KLIFS API: Get ligands by kinase KLIFS IDs
_logger.info(f"Fetch ligands based on these KLIFS IDs...")
kinase_klifs_ids = kinases["kinase.klifs_id (query)"].to_list()
ligands = self.by_kinase_klifs_id(kinase_klifs_ids)
# Add kinase name and species details to rationalize kinase KLIFS IDs
ligands = ligands.merge(kinases, on="kinase.klifs_id (query)", how="left")
return ligands
def by_ligand_klifs_id(self, ligand_klifs_ids):
ligand_klifs_ids = self._ensure_list(ligand_klifs_ids)
# Use KLIFS API: Get all ligands
ligands = self.all_ligands()
# Select ligands by ligand KLIFS IDs
ligands = ligands[ligands["ligand.klifs_id"].isin(ligand_klifs_ids)]
# Standardize DataFrame
ligands = self._standardize_dataframe(
ligands, DATAFRAME_COLUMNS["ligands"], REMOTE_COLUMNS_MAPPING["ligands"]
)
return ligands
def by_ligand_expo_id(self, ligand_expo_ids):
ligand_expo_ids = self._ensure_list(ligand_expo_ids)
# Use KLIFS API: Get all ligands
ligands = self.all_ligands()
# Select ligands by Ligand Expo IDs
ligands = ligands[ligands["ligand.expo_id"].isin(ligand_expo_ids)]
# Standardize DataFrame
ligands = self._standardize_dataframe(
ligands, DATAFRAME_COLUMNS["ligands"], REMOTE_COLUMNS_MAPPING["ligands"]
)
return ligands
class Structures(RemoteInitializer, StructuresProvider):
"""
Extends StructuresProvider to provide remote structures requests.
Refer to StructuresProvider documentation for more information:
opencadd.databases.klifs.core.StructuresProvider
"""
def all_structures(self):
# Use KLIFS API: Get all kinase KLIFS IDs
kinases_remote = Kinases(self._client)
kinases = kinases_remote.all_kinases()
# Use KLIFS API: Get all structures from these kinase KLIFS IDs
kinase_klifs_ids = kinases["kinase.klifs_id"].to_list()
structures = self.by_kinase_klifs_id(kinase_klifs_ids)
# Standardize DataFrame
structures = self._standardize_dataframe(
structures, DATAFRAME_COLUMNS["structures"], REMOTE_COLUMNS_MAPPING["structures"]
)
return structures
def by_structure_klifs_id(self, structure_klifs_ids):
structure_klifs_ids = self._ensure_list(structure_klifs_ids)
# Use KLIFS API
result = (
self._client.Structures.get_structure_list(structure_ID=structure_klifs_ids)
.response()
.result
)
# Convert list of ABC objects to DataFrame
structures = self._abc_to_dataframe(result)
# Standardize DataFrame
structures = self._standardize_dataframe(
structures, DATAFRAME_COLUMNS["structures"], REMOTE_COLUMNS_MAPPING["structures"]
)
return structures
def by_ligand_klifs_id(self, ligand_klifs_ids):
# TODO in the future: Approach incorrect: One PDB can have multiple KLIFS IDs
_logger.warning(
f"This method uses this lookup: ligand KLIFS ID > Ligand Expo ID > structures."
f"The KLIFS Swagger API offers no direct structure search by ligand KLIFS ID."
f"However, one Ligand Expo ID can be represented by multiple ligand KLIFS IDs. "
f"Thus, in rare cases, this method will return also structure that are not connected "
f"to the input ligand KLIFS ID but to a mutual Ligand Expo ID."
)
ligand_klifs_ids = self._ensure_list(ligand_klifs_ids)
# Use KLIFS API: Get Ligand Expo IDs for ligand KLIFS IDs
remote_ligands = Ligands(self._client)
ligands = remote_ligands.by_ligand_klifs_id(ligand_klifs_ids)
# Use KLIFS API: Get structures from Ligand Expo IDs
ligand_expo_ids = ligands["ligand.expo_id"].to_list()
structures = self.by_ligand_expo_id(ligand_expo_ids)
# Standardize DataFrame
structures = self._standardize_dataframe(
structures, DATAFRAME_COLUMNS["structures"], REMOTE_COLUMNS_MAPPING["structures"]
)
return structures
def by_kinase_klifs_id(self, kinase_klifs_ids):
kinase_klifs_ids = self._ensure_list(kinase_klifs_ids)
# Use KLIFS API
result = (
self._client.Structures.get_structures_list(kinase_ID=kinase_klifs_ids)
.response()
.result
)
# Convert list of ABC objects to DataFrame
structures = self._abc_to_dataframe(result)
# Standardize DataFrame
structures = self._standardize_dataframe(
structures, DATAFRAME_COLUMNS["structures"], REMOTE_COLUMNS_MAPPING["structures"]
)
return structures
def by_structure_pdb_id(
self, structure_pdb_ids, structure_alternate_model=None, structure_chain=None
):
structure_pdb_ids = self._ensure_list(structure_pdb_ids)
# Use KLIFS API
result = (
self._client.Structures.get_structures_pdb_list(pdb_codes=structure_pdb_ids)
.response()
.result
)
# Convert list of ABC objects to DataFrame
structures = self._abc_to_dataframe(result)
# Standardize DataFrame
structures = self._standardize_dataframe(
structures, DATAFRAME_COLUMNS["structures"], REMOTE_COLUMNS_MAPPING["structures"]
)
# If only one structure PDB ID is given, check alternate model and chain filters
if len(structure_pdb_ids) == 1:
structures = self._filter_pdb_by_alt_chain(
structures, structure_alternate_model, structure_chain
).reset_index(drop=True)
return structures
def by_ligand_expo_id(self, ligand_expo_ids):
ligand_expo_ids = self._ensure_list(ligand_expo_ids)
# Use KLIFS API: Get all structures
structures = self.all_structures()
# Select structures by Ligand Expo IDs
structures = structures[structures["ligand.expo_id"].isin(ligand_expo_ids)]
# Standardize DataFrame
structures = self._standardize_dataframe(
structures, DATAFRAME_COLUMNS["structures"], REMOTE_COLUMNS_MAPPING["structures"]
)
return structures
def by_kinase_name(self, kinase_names):
kinase_names = self._ensure_list(kinase_names)
# Use KLIFS API: Get all structures
structures = self.all_structures()
# Select structures by kinase names
structures = structures[structures["kinase.klifs_name"].isin(kinase_names)]
# Standardize DataFrame
structures = self._standardize_dataframe(
structures, DATAFRAME_COLUMNS["structures"], REMOTE_COLUMNS_MAPPING["structures"]
)
return structures
class Bioactivities(RemoteInitializer, BioactivitiesProvider):
"""
Extends BioactivitiesProvider to provide remote bioactivities requests.
Refer to BioactivitiesProvider documentation for more information:
opencadd.databases.klifs.core.BioactivitiesProvider
"""
def all_bioactivities(self, _top_n=None):
# Use KLIFS API: Get all kinase KLIFS IDs
ligands_remote = Ligands(self._client)
ligands = ligands_remote.all_ligands()
# Optional: Select top n ligands for bioactivity query!
if _top_n:
ligands = ligands[:_top_n]
# Use KLIFS API: Get all bioactivities from these ligand KLIFS IDs
ligand_klifs_ids = ligands["ligand.klifs_id"].to_list()
# Many ligands do not have bioactivities in ChEMBL,
# Thus, disable logging messages for this query
with silence_logging():
bioactivities = self.by_ligand_klifs_id(ligand_klifs_ids)
# Standardize DataFrame
bioactivities = self._standardize_dataframe(
bioactivities,
DATAFRAME_COLUMNS["bioactivities"],
REMOTE_COLUMNS_MAPPING["bioactivities"],
)
return bioactivities
def by_kinase_klifs_id(self, kinase_klifs_ids):
kinase_klifs_ids = self._ensure_list(kinase_klifs_ids)
# Use KLIFS API: Get all kinase KLIFS IDs
ligands_remote = Ligands(self._client)
ligands = ligands_remote.by_kinase_klifs_id(kinase_klifs_ids)
# Use KLIFS API: Get all bioactivities from these ligand KLIFS IDs
ligand_klifs_ids = ligands["ligand.klifs_id"].to_list()
bioactivities = self.by_ligand_klifs_id(ligand_klifs_ids)
# Standardize DataFrame
bioactivities = self._standardize_dataframe(
bioactivities,
DATAFRAME_COLUMNS["bioactivities"],
REMOTE_COLUMNS_MAPPING["bioactivities"],
)
return bioactivities
def by_ligand_klifs_id(self, ligand_klifs_ids):
# Use KLIFS API (send requests iteratively)
bioactivities = self._multiple_remote_requests(self._by_ligand_klifs_id, ligand_klifs_ids)
# Standardize DataFrame
bioactivities = self._standardize_dataframe(
bioactivities,
DATAFRAME_COLUMNS["bioactivities"] + [("ligand.klifs_id (query)", "int32")],
REMOTE_COLUMNS_MAPPING["bioactivities"],
)
return bioactivities
def by_ligand_expo_id(self, ligand_expo_id):
# Use KLIFS API (send requests iteratively)
bioactivities = self._multiple_remote_requests(self._by_ligand_expo_id, ligand_expo_id)
# Standardize DataFrame
bioactivities = self._standardize_dataframe(
bioactivities,
DATAFRAME_COLUMNS["bioactivities"] + [("ligand.expo_id (query)", "string")],
REMOTE_COLUMNS_MAPPING["bioactivities"],
)
return bioactivities
def _by_ligand_klifs_id(self, ligand_klifs_id):
"""
Get bioactivities by ligand KLIFS ID.
Parameters
----------
ligand_klifs_id : int
Ligand KLIFS ID.
Returns
-------
pandas.DataFrame
Bioactivities (rows) with columns as described in the class docstring.
"""
# Use KLIFS API
result = (
self._client.Ligands.get_bioactivity_list_id(ligand_ID=ligand_klifs_id)
.response()
.result
)
# Convert list of ABC objects to DataFrame
bioactivities = self._abc_to_dataframe(result)
# Standardize DataFrame
bioactivities = self._standardize_dataframe(
bioactivities,
DATAFRAME_COLUMNS["bioactivities"],
REMOTE_COLUMNS_MAPPING["bioactivities"],
)
# Rename column to indicate query key
bioactivities["ligand.klifs_id (query)"] = ligand_klifs_id
return bioactivities
def _by_ligand_expo_id(self, ligand_expo_id):
"""
Get bioactivities by ligand Expo ID.
Parameters
----------
ligand_expo_id : int
Ligand Expo ID.
Returns
-------
pandas.DataFrame
Bioactivities (rows) with columns as described in the class docstring.
"""
# Use KLIFS API
result = (
self._client.Ligands.get_bioactivity_list_pdb(ligand_PDB=ligand_expo_id)
.response()
.result
)
# Convert list of ABC objects to DataFrame
bioactivities = self._abc_to_dataframe(result)
# Standardize DataFrame
bioactivities = self._standardize_dataframe(
bioactivities,
DATAFRAME_COLUMNS["bioactivities"],
REMOTE_COLUMNS_MAPPING["bioactivities"],
)
# Rename column to indicate query key
bioactivities["ligand.expo_id (query)"] = ligand_expo_id
return bioactivities
class Interactions(RemoteInitializer, InteractionsProvider):
"""
Extends InteractionsProvider to provide remote kinases requests.
Refer to InteractionsProvider documentation for more information:
opencadd.databases.klifs.core.InteractionsProvider
"""
@property
def interaction_types(self):
# Use KLIFS API
result = self._client.Interactions.get_interactions_get_types().response().result
# Convert list of ABC objects to DataFrame
interaction_types = self._abc_to_dataframe(result)
# Standardize DataFrame
interaction_types = self._standardize_dataframe(
interaction_types,
DATAFRAME_COLUMNS["interaction_types"],
REMOTE_COLUMNS_MAPPING["interaction_types"],
)
return interaction_types
def all_interactions(self):
# Use KLIFS API: Get all structure KLIFS IDs
structures_remote = Structures(self._client)
structures = structures_remote.all_structures()
# Use KLIFS API: Get all interactions from these structures KLIFS IDs
structure_klifs_ids = structures["structure.klifs_id"].to_list()
interactions = self.by_structure_klifs_id(structure_klifs_ids)
# Standardize DataFrame
interactions = self._standardize_dataframe(
interactions,
DATAFRAME_COLUMNS["interactions"],
REMOTE_COLUMNS_MAPPING["interaction_types"],
)
return interactions
def by_structure_klifs_id(self, structure_klifs_ids):
structure_klifs_ids = self._ensure_list(structure_klifs_ids)
# Use KLIFS API
result = (
self._client.Interactions.get_interactions_get_IFP(structure_ID=structure_klifs_ids)
.response()
.result
)
# Convert list of ABC objects to DataFrame
interactions = self._abc_to_dataframe(result)
# Standardize DataFrame
interactions = self._standardize_dataframe(
interactions, DATAFRAME_COLUMNS["interactions"], REMOTE_COLUMNS_MAPPING["interactions"]
)
return interactions
def by_ligand_klifs_id(self, ligand_klifs_ids):
ligand_klifs_ids = self._ensure_list(ligand_klifs_ids)
# Use KLIFS API: Get structure KLIFS IDs from ligand KLIFS IDs
structures_remote = Structures(self._client)
structures = structures_remote.by_ligand_klifs_id(ligand_klifs_ids)
# Use KLIFS API: Get interactions from these structure KLIFS IDs
structure_klifs_ids = structures["structure.klifs_id"].to_list()
interactions = self.by_structure_klifs_id(structure_klifs_ids)
# Standardize DataFrame
interactions = self._standardize_dataframe(
interactions, DATAFRAME_COLUMNS["interactions"], REMOTE_COLUMNS_MAPPING["interactions"]
)
return interactions
def by_kinase_klifs_id(self, kinase_klifs_ids):
kinase_klifs_ids = self._ensure_list(kinase_klifs_ids)
# Use KLIFS API: Get structure KLIFS IDs from ligand KLIFS IDs
structures_remote = Structures(self._client)
structures = structures_remote.by_kinase_klifs_id(kinase_klifs_ids)
# Use KLIFS API: Get interactions from these structure KLIFS IDs
structure_klifs_ids = structures["structure.klifs_id"].to_list()
interactions = self.by_structure_klifs_id(structure_klifs_ids)
# Standardize DataFrame
interactions = self._standardize_dataframe(
interactions, DATAFRAME_COLUMNS["interactions"], REMOTE_COLUMNS_MAPPING["interactions"]
)
return interactions
class Pockets(RemoteInitializer, PocketsProvider):
"""
Extends PocketsProvider to provide remote pocket requests.
Refer to PocketsProvider documentation for more information:
opencadd.databases.klifs.core.PocketsProvider
"""
def by_structure_klifs_id(self, structure_klifs_id):
# Use KLIFS API
result = (
self._client.Interactions.get_interactions_match_residues(
structure_ID=structure_klifs_id
)
.response()
.result
)
# Convert to DataFrame and formatting
pocket =
|
pd.DataFrame(result)
|
pandas.DataFrame
|
"""Collection of functions for dealing with timeseries.
Functions:
calc_seasonal_cycle -- Calculate the seasonal cycle
calc_trend -- Calculate the linear trend
convert_to_annual -- Convert the data to annual mean
equalise_time_axes -- Make all the time axes in an iris cube list the same
flux_to_total -- Convert a flux (i.e. per second quantity) to total
get_control_time_constraint -- Define the time constraint for the control data
outlier_removal -- Remove outliers from a timeseries
runmean -- Calculae the running mean
"""
import pdb
import math
import numpy as np
import pandas as pd
import iris
import iris.coord_categorisation
import cf_units
import general_io as gio
import convenient_universal as uconv
def adjust_control_time(cube, ref_cube, branch_index=None, branch_time=None):
"""Adjust the control time axis so it matches the reference cube."""
if branch_index == None:
if not branch_time:
branch_time = ref_cube.attributes['branch_time']
print('branch time =', branch_time)
branch_index, index_error = uconv.find_nearest(cube.coord('time').points, float(branch_time), index=True)
print('branch index =', branch_index)
else:
print('branch time =', cube.coord('time').points[branch_index])
iris.util.unify_time_units([ref_cube, cube])
adjustment_factor = cube.coord('time').points[branch_index] - ref_cube.coord('time').points[0]
cube.coord('time').points = cube.coord('time').points - adjustment_factor
return cube
def calc_seasonal_cycle(cube):
"""Calculate the seasonal cycle.
cycle = (max - min) for each 12 month window
Args:
cube (iris.cube.Cube)
"""
max_cube = cube.rolling_window('time', iris.analysis.MAX, 12)
min_cube = cube.rolling_window('time', iris.analysis.MIN, 12)
seasonal_cycle_cube = max_cube - min_cube
return seasonal_cycle_cube
def calc_trend(cube, running_mean=False, per_yr=False,
remove_scaling=False, outlier_threshold=None):
"""Calculate linear trend.
Args:
cube (iris.cube.Cube)
running_mean(bool, optional):
A 12-month running mean can first be applied to the data
per_yr (bool, optional):
Change units from per second to per year
"""
coord_names = [coord.name() for coord in cube.dim_coords]
assert coord_names[0] == 'time'
if remove_scaling:
cube = undo_unit_scaling(cube)
if running_mean:
cube = cube.rolling_window('time', iris.analysis.MEAN, 12)
time_axis = cube.coord('time')
time_axis = convert_to_seconds(time_axis)
trend = np.ma.apply_along_axis(linear_trend, 0, cube.data, time_axis.points, outlier_threshold)
if type(cube.data) == np.ma.core.MaskedArray:
trend = np.ma.masked_values(trend, cube.data.fill_value)
if per_yr:
trend = trend * 60 * 60 * 24 * 365.25
return trend
def _check_attributes(data_attrs, control_attrs):
"""Make sure the correct control run has been used."""
assert data_attrs['parent_experiment_id'] in [control_attrs['experiment_id'], 'N/A']
control_rip = 'r%si%sp%s' %(control_attrs['realization'],
control_attrs['initialization_method'],
control_attrs['physics_version'])
assert data_attrs['parent_experiment_rip'] in [control_rip, 'N/A']
def _chunked_year_aggregation(cube, agg_method, step=12, days_in_month=False):
"""Chunked conversion to annual timescale.
Args:
cube (iris.cube.Cube)
agg_method (iris.analysis.WeightedAggregator): aggregation method
step (int): Integer number of time steps used in chunking
(For monthly data this would be a multiple of 12)
"""
assert agg_method in [iris.analysis.SUM, iris.analysis.MEAN]
chunk_list = iris.cube.CubeList([])
coord_names = [coord.name() for coord in cube.dim_coords]
start_indexes, step = uconv.get_chunks(cube.shape, coord_names, chunk=True, step=step)
start_year = end_year = -5
for index in start_indexes:
start_year = cube[index:index+step, ...].coord('year').points[0]
assert start_year != end_year
print(start_year)
end_year = cube[index:index+step, ...].coord('year').points[-1]
if days_in_month:
chunk = _days_in_month_annual_mean(cube[index:index+step, ...])
else:
chunk = cube[index:index+step, ...].aggregated_by(['year'], agg_method)
chunk_list.append(chunk)
annual_cube = chunk_list.concatenate()[0]
return annual_cube
def get_days_in_year(cube, return_df=False):
"""Generate an array of days in each year.
Returns a pandas data series.
"""
aux_coord_names = [coord.name() for coord in cube.aux_coords]
if not 'year' in aux_coord_names:
iris.coord_categorisation.add_year(cube, 'time')
assert 'days' in str(cube.coord('time').units)
time_span_days = cube.coord('time').bounds[:, 1] - cube.coord('time').bounds[:, 0]
assert time_span_days.max() < 32
assert time_span_days.min() > 26
df = pd.DataFrame(data={'days_in_month': time_span_days, 'year': cube.coord('year').points})
days_in_year = df.groupby('year').sum()
if return_df:
return df, days_in_year['days_in_month']
else:
return days_in_year['days_in_month']
def _days_in_month_annual_mean(cube):
"""Calculate the annual mean timeseries accounting for days in month."""
df, days_in_year = get_days_in_year(cube, return_df=True)
df['weight'] = df.apply(lambda row: row['days_in_month'] / days_in_year.loc[row['year']], axis=1)
np.testing.assert_allclose(df.groupby('year').sum()['weight'].min(), 1.0)
np.testing.assert_allclose(df.groupby('year').sum()['weight'].max(), 1.0)
weights_array = uconv.broadcast_array(df['weight'].values, 0, cube.shape)
cube.data = cube.data * weights_array
cube = cube.aggregated_by(['year'], iris.analysis.SUM)
return cube
def convert_to_annual(cube, aggregation='mean', chunk=False, days_in_month=False):
"""Convert data to annual timescale.
Args:
cube (iris.cube.Cube)
full_months(bool): Only include years with data for all 12 months
chunk (bool): Chunk along the time axis
(Set by default to a chunk step of 12 assuming monthly data.)
days_in_month (bool): Account for the fact that each month has a different
number of days.
"""
aux_coord_names = [coord.name() for coord in cube.aux_coords]
if not 'year' in aux_coord_names:
iris.coord_categorisation.add_year(cube, 'time')
iris.coord_categorisation.add_month(cube, 'time')
if not is_annual(cube):
if aggregation == 'mean':
aggregator = iris.analysis.MEAN
elif aggregation == 'sum':
aggregator = iris.analysis.SUM
if days_in_month:
assert aggregation == 'mean'
if chunk:
cube = _chunked_year_aggregation(cube, aggregator, step=36, days_in_month=True)
else:
cube = _days_in_month_annual_mean(cube)
else:
if chunk:
cube = _chunked_year_aggregation(cube, aggregator, step=12)
else:
cube = cube.aggregated_by(['year'], aggregator)
cube.remove_coord('year')
cube.remove_coord('month')
return cube
def convert_to_seconds(time_axis):
"""Convert time axis units to seconds.
Args:
time_axis(iris.DimCoord)
"""
old_units = str(time_axis.units)
old_timestep = old_units.split(' ')[0]
new_units = old_units.replace(old_timestep, 'seconds')
new_unit = cf_units.Unit(new_units, calendar=time_axis.units.calendar)
time_axis.convert_units(new_unit)
return time_axis
def equalise_time_axes(cube_list):
"""Make all the time axes in an iris cube list the same."""
iris.util.unify_time_units(cube_list)
reference_cube = cube_list[0]
new_cube_list = iris.cube.CubeList([])
for cube in cube_list:
assert len(cube.coord('time').points) == len(reference_cube.coord('time').points)
cube.coord('time').points = reference_cube.coord('time').points
cube.coord('time').bounds = reference_cube.coord('time').bounds
cube.coord('time').units = reference_cube.coord('time').units
cube.coord('time').attributes = reference_cube.coord('time').attributes
new_cube_list.append(cube)
return new_cube_list
def flux_to_total(cube):
"""Convert a flux (i.e. per second quantity) to total"""
assert 'days' in str(cube.coord('time').units)
time_span_days = cube.coord('time').bounds[:, 1] - cube.coord('time').bounds[:, 0]
time_span_seconds = time_span_days * 60 * 60 * 24
cube.data = cube.data * time_span_seconds
units = str(cube.units)
assert ('s-1' in units) or ('W' in units), 'input units must be a flux per second'
if 's-1' in units:
cube.units = units.replace('s-1', '')
elif 'W' in units:
cube.units = units.replace('W', 'J')
return cube
def get_control_time_constraint(control_cube, ref_cube, time_bounds, branch_time=None):
"""Define the time constraint for control data.
Args:
control_cube (iris.cube.Cube): cube for piControl experiment
ref_cube (iris.cube.Cube): reference cube (e.g. from historical experiment)
time_bounds (list): selected time periods from reference cube
(e.g. ['1861-01-01', '2005-12-31'])
branch_time (float): Override the branch time in the ref_cube attributes
"""
_check_attributes(ref_cube.attributes, control_cube.attributes)
iris.coord_categorisation.add_year(control_cube, 'time')
iris.coord_categorisation.add_year(ref_cube, 'time')
if not branch_time:
branch_time = ref_cube.attributes['branch_time']
index = 0
for bounds in control_cube.coord('time').bounds:
lower, upper = bounds
if lower <= float(branch_time) < upper:
break
else:
index = index + 1
branch_year = control_cube.coord('year').points[index]
ref_start_year = ref_cube.coord('year').points[0]
start_gap = int(time_bounds[0].split('-')[0]) - ref_start_year
end_gap = int(time_bounds[1].split('-')[0]) - ref_start_year
control_start_year = branch_year + start_gap
control_end_year = branch_year + end_gap
control_start_date = str(control_start_year).zfill(4)+'-01-01'
control_end_date = str(control_end_year).zfill(4)+'-12-31'
time_constraint = gio.get_time_constraint([control_start_date, control_end_date])
control_cube.remove_coord('year')
ref_cube.remove_coord('year')
return time_constraint
def is_annual(cube):
"""Check whether the data is annual timescale."""
year_diffs = np.diff(cube.coord('year').points)
if year_diffs.min() < 1:
annual = False
else:
annual = True
return annual
def fit_polynomial(data, time_axis, order, outlier_threshold):
"""Fit a polynomial to data.
e.g. order 1 polynomial, polyfit returns [b, a] corresponding to y = a + bx
"""
if outlier_threshold:
data, outlier_idx = outlier_removal(data, outlier_threshold)
coefficients = np.ma.polyfit(time_axis, data, order)
return coefficients
def linear_trend(data, time_axis, outlier_threshold):
"""Calculate the linear trend.
polyfit returns [b, a] corresponding to y = a + bx
"""
masked_flag = False
if type(data) == np.ma.core.MaskedArray:
if type(data.mask) == np.bool_:
if data.mask:
masked_flag = True
elif data.mask[0]:
masked_flag = True
if masked_flag:
return data.fill_value
else:
coefficients = fit_polynomial(data, time_axis, 1, outlier_threshold)
return coefficients[0]
def runmean(data, window_width, overlap=True):
"""Calculate the running mean.
If overlap is false, the windows won't overlap.
"""
if overlap:
runmean_data = np.convolve(data, np.ones((window_width,))/window_width, mode='valid')
else:
nchunks = math.ceil(len(data) / window_width)
split_data = [x for x in np.array_split(data, nchunks) if x.size == window_width]
runmean_data = np.array(list(map(np.mean, split_data)))
return runmean_data
def outlier_removal(data, outlier_threshold, replacement_method='missing'):
"""Remove outliers from a timeseries.
Args:
data (numpy.array)
outlier_threshold (float): remove points that deviate from
the rolling median by greater than this threshold
replacement_method (str): method for replacing outliers
"""
assert replacement_method in ['missing', 'mean']
data_series =
|
pd.Series(data)
|
pandas.Series
|
# ActivitySim
# See full license in LICENSE.txt.
from builtins import range
import logging
import numpy as np
import pandas as pd
from activitysim.core import tracing
from activitysim.core import config
from activitysim.core import pipeline
from activitysim.core import simulate
from activitysim.core import inject
from activitysim.core.tracing import print_elapsed_time
from activitysim.core.util import reindex
from activitysim.core.util import assign_in_place
from .util import expressions
from activitysim.core import assign
from activitysim.abm.tables.size_terms import tour_destination_size_terms
from activitysim.core.skim import DataFrameMatrix
from activitysim.core.interaction_sample_simulate import interaction_sample_simulate
from activitysim.core.interaction_sample import interaction_sample
from activitysim.abm.models.util.trip import cleanup_failed_trips
from activitysim.abm.models.util.trip import flag_failed_trip_leg_mates
logger = logging.getLogger(__name__)
NO_DESTINATION = -1
def get_spec_for_purpose(model_settings, spec_name, purpose):
omnibus_spec = simulate.read_model_spec(file_name=model_settings[spec_name])
spec = omnibus_spec[[purpose]]
# might as well ignore any spec rows with 0 utility
spec = spec[spec.iloc[:, 0] != 0]
assert spec.shape[0] > 0
return spec
def trip_destination_sample(
primary_purpose,
trips,
alternatives,
model_settings,
size_term_matrix, skims,
chunk_size, trace_hh_id,
trace_label):
"""
Returns
-------
destination_sample: pandas.dataframe
choices_df from interaction_sample with (up to) sample_size alts for each chooser row
index (non unique) is trip_id from trips (duplicated for each alt)
and columns dest_taz, prob, and pick_count
dest_taz: int
alt identifier (dest_taz) from alternatives[<alt_col_name>]
prob: float
the probability of the chosen alternative
pick_count : int
number of duplicate picks for chooser, alt
"""
trace_label = tracing.extend_trace_label(trace_label, 'trip_destination_sample')
spec = get_spec_for_purpose(model_settings, 'DESTINATION_SAMPLE_SPEC', primary_purpose)
sample_size = model_settings['SAMPLE_SIZE']
alt_dest_col_name = model_settings['ALT_DEST_COL_NAME']
logger.info("Running %s with %d trips", trace_label, trips.shape[0])
locals_dict = config.get_model_constants(model_settings).copy()
locals_dict.update({
'size_terms': size_term_matrix
})
locals_dict.update(skims)
destination_sample = interaction_sample(
choosers=trips,
alternatives=alternatives,
sample_size=sample_size,
alt_col_name=alt_dest_col_name,
allow_zero_probs=True,
spec=spec,
skims=skims,
locals_d=locals_dict,
chunk_size=chunk_size,
trace_label=trace_label)
return destination_sample
def compute_ood_logsums(
choosers,
logsum_settings,
od_skims,
locals_dict,
chunk_size,
trace_label):
"""
Compute one (of two) out-of-direction logsums for destination alternatives
Will either be trip_origin -> alt_dest or alt_dest -> primary_dest
"""
locals_dict.update(od_skims)
expressions.annotate_preprocessors(
choosers, locals_dict, od_skims,
logsum_settings,
trace_label)
nest_spec = config.get_logit_model_settings(logsum_settings)
logsum_spec = simulate.read_model_spec(file_name=logsum_settings['SPEC'])
logsums = simulate.simple_simulate_logsums(
choosers,
logsum_spec,
nest_spec,
skims=od_skims,
locals_d=locals_dict,
chunk_size=chunk_size,
trace_label=trace_label)
assert logsums.index.equals(choosers.index)
# FIXME not strictly necessary, but would make trace files more legible?
# logsums = logsums.replace(-np.inf, -999)
return logsums
def compute_logsums(
primary_purpose,
trips,
destination_sample,
tours_merged,
model_settings,
skims,
chunk_size,
trace_label):
"""
Calculate mode choice logsums using the same recipe as for trip_mode_choice, but do it twice
for each alternative since we need out-of-direction logsum
(i.e . origin to alt_dest, and alt_dest to half-tour destination)
Returns
-------
adds od_logsum and dp_logsum columns to trips (in place)
"""
trace_label = tracing.extend_trace_label(trace_label, 'compute_logsums')
logger.info("Running %s with %d samples", trace_label, destination_sample.shape[0])
# - trips_merged - merge trips and tours_merged
trips_merged = pd.merge(
trips,
tours_merged,
left_on='tour_id',
right_index=True,
how="left")
assert trips_merged.index.equals(trips.index)
# - choosers - merge destination_sample and trips_merged
# re/set index because pandas merge does not preserve left index if it has duplicate values!
choosers = pd.merge(destination_sample,
trips_merged.reset_index(),
left_index=True,
right_on='trip_id',
how="left",
suffixes=('', '_r')).set_index('trip_id')
assert choosers.index.equals(destination_sample.index)
logsum_settings = config.read_model_settings(model_settings['LOGSUM_SETTINGS'])
omnibus_coefficient_spec = \
assign.read_constant_spec(config.config_file_path(logsum_settings['COEFFICIENTS']))
coefficient_spec = omnibus_coefficient_spec[primary_purpose]
constants = config.get_model_constants(logsum_settings)
locals_dict = assign.evaluate_constants(coefficient_spec, constants=constants)
locals_dict.update(constants)
# - od_logsums
od_skims = {
'ORIGIN': model_settings['TRIP_ORIGIN'],
'DESTINATION': model_settings['ALT_DEST_COL_NAME'],
"odt_skims": skims['odt_skims'],
"dot_skims": skims['dot_skims'],
"od_skims": skims['od_skims'],
}
destination_sample['od_logsum'] = compute_ood_logsums(
choosers,
logsum_settings,
od_skims,
locals_dict,
chunk_size,
trace_label=tracing.extend_trace_label(trace_label, 'od'))
# - dp_logsums
dp_skims = {
'ORIGIN': model_settings['ALT_DEST_COL_NAME'],
'DESTINATION': model_settings['PRIMARY_DEST'],
"odt_skims": skims['dpt_skims'],
"dot_skims": skims['pdt_skims'],
"od_skims": skims['dp_skims'],
}
destination_sample['dp_logsum'] = compute_ood_logsums(
choosers,
logsum_settings,
dp_skims,
locals_dict,
chunk_size,
trace_label=tracing.extend_trace_label(trace_label, 'dp'))
return destination_sample
def trip_destination_simulate(
primary_purpose,
trips,
destination_sample,
model_settings,
want_logsums,
size_term_matrix, skims,
chunk_size, trace_hh_id,
trace_label):
"""
Chose destination from destination_sample (with od_logsum and dp_logsum columns added)
Returns
-------
choices - pandas.Series
destination alt chosen
"""
trace_label = tracing.extend_trace_label(trace_label, 'trip_destination_simulate')
spec = get_spec_for_purpose(model_settings, 'DESTINATION_SPEC', primary_purpose)
alt_dest_col_name = model_settings['ALT_DEST_COL_NAME']
logger.info("Running trip_destination_simulate with %d trips", len(trips))
locals_dict = config.get_model_constants(model_settings).copy()
locals_dict.update({
'size_terms': size_term_matrix
})
locals_dict.update(skims)
destinations = interaction_sample_simulate(
choosers=trips,
alternatives=destination_sample,
spec=spec,
choice_column=alt_dest_col_name,
want_logsums=want_logsums,
allow_zero_probs=True, zero_prob_choice_val=NO_DESTINATION,
skims=skims,
locals_d=locals_dict,
chunk_size=chunk_size,
trace_label=trace_label,
trace_choice_name='trip_dest')
if not want_logsums:
# for consistency, always return a dataframe with canonical column name
assert isinstance(destinations, pd.Series)
destinations = destinations.to_frame('choice')
# drop any failed zero_prob destinations
if (destinations.choice == NO_DESTINATION).any():
# logger.debug("dropping %s failed destinations", (destinations == NO_DESTINATION).sum())
destinations = destinations[destinations.choice != NO_DESTINATION]
return destinations
def choose_trip_destination(
primary_purpose,
trips,
alternatives,
tours_merged,
model_settings,
want_logsums,
want_sample_table,
size_term_matrix, skims,
chunk_size, trace_hh_id,
trace_label):
logger.info("choose_trip_destination %s with %d trips", trace_label, trips.shape[0])
t0 = print_elapsed_time()
# - trip_destination_sample
destination_sample = trip_destination_sample(
primary_purpose=primary_purpose,
trips=trips,
alternatives=alternatives,
model_settings=model_settings,
size_term_matrix=size_term_matrix, skims=skims,
chunk_size=chunk_size, trace_hh_id=trace_hh_id,
trace_label=trace_label)
dropped_trips = ~trips.index.isin(destination_sample.index.unique())
if dropped_trips.any():
logger.warning("%s trip_destination_sample %s trips "
"without viable destination alternatives" %
(trace_label, dropped_trips.sum()))
trips = trips[~dropped_trips]
t0 = print_elapsed_time("%s.trip_destination_sample" % trace_label, t0)
if trips.empty:
return pd.Series(index=trips.index).to_frame('choice'), None
# - compute logsums
destination_sample = compute_logsums(
primary_purpose=primary_purpose,
trips=trips,
destination_sample=destination_sample,
tours_merged=tours_merged,
model_settings=model_settings,
skims=skims,
chunk_size=chunk_size,
trace_label=trace_label)
t0 = print_elapsed_time("%s.compute_logsums" % trace_label, t0)
# - trip_destination_simulate
destinations = trip_destination_simulate(
primary_purpose=primary_purpose,
trips=trips,
destination_sample=destination_sample,
model_settings=model_settings,
want_logsums=want_logsums,
size_term_matrix=size_term_matrix, skims=skims,
chunk_size=chunk_size, trace_hh_id=trace_hh_id,
trace_label=trace_label)
dropped_trips = ~trips.index.isin(destinations.index)
if dropped_trips.any():
logger.warning("%s trip_destination_simulate %s trips "
"without viable destination alternatives" %
(trace_label, dropped_trips.sum()))
if want_sample_table:
# FIXME - sample_table
destination_sample.set_index(model_settings['ALT_DEST_COL_NAME'], append=True, inplace=True)
else:
destination_sample = None
t0 = print_elapsed_time("%s.trip_destination_simulate" % trace_label, t0)
return destinations, destination_sample
def wrap_skims(model_settings):
"""
wrap skims of trip destination using origin, dest column names from model settings.
Various of these are used by destination_sample, compute_logsums, and destination_simulate
so we create them all here with canonical names.
Note that compute_logsums aliases their names so it can use the same equations to compute
logsums from origin to alt_dest, and from alt_dest to primarly destination
odt_skims - SkimStackWrapper: trip origin, trip alt_dest, time_of_day
dot_skims - SkimStackWrapper: trip alt_dest, trip origin, time_of_day
dpt_skims - SkimStackWrapper: trip alt_dest, trip primary_dest, time_of_day
pdt_skims - SkimStackWrapper: trip primary_dest,trip alt_dest, time_of_day
od_skims - SkimDictWrapper: trip origin, trip alt_dest
dp_skims - SkimDictWrapper: trip alt_dest, trip primary_dest
Parameters
----------
model_settings
Returns
-------
dict containing skims, keyed by canonical names relative to tour orientation
"""
skim_dict = inject.get_injectable('skim_dict')
skim_stack = inject.get_injectable('skim_stack')
o = model_settings['TRIP_ORIGIN']
d = model_settings['ALT_DEST_COL_NAME']
p = model_settings['PRIMARY_DEST']
skims = {
"odt_skims": skim_stack.wrap(left_key=o, right_key=d, skim_key='trip_period'),
"dot_skims": skim_stack.wrap(left_key=d, right_key=o, skim_key='trip_period'),
"dpt_skims": skim_stack.wrap(left_key=d, right_key=p, skim_key='trip_period'),
"pdt_skims": skim_stack.wrap(left_key=p, right_key=d, skim_key='trip_period'),
"od_skims": skim_dict.wrap(o, d),
"dp_skims": skim_dict.wrap(d, p),
}
return skims
def run_trip_destination(
trips,
tours_merged,
chunk_size, trace_hh_id,
trace_label,
fail_some_trips_for_testing=False):
"""
trip destination - main functionality separated from model step so it can be called iteratively
Run the trip_destination model, assigning destinations for each (intermediate) trip
(last trips already have a destination - either the tour primary destination or Home)
Set trip destination and origin columns, and a boolean failed flag for any failed trips
(destination for flagged failed trips will be set to -1)
Parameters
----------
trips
tours_merged
want_sample_table
chunk_size
trace_hh_id
trace_label
Returns
-------
"""
model_settings = config.read_model_settings('trip_destination.yaml')
preprocessor_settings = model_settings.get('preprocessor', None)
logsum_settings = config.read_model_settings(model_settings['LOGSUM_SETTINGS'])
logsum_column_name = model_settings.get('DEST_CHOICE_LOGSUM_COLUMN_NAME')
want_logsums = logsum_column_name is not None
sample_table_name = model_settings.get('DEST_CHOICE_SAMPLE_TABLE_NAME')
want_sample_table = config.setting('want_dest_choice_sample_tables') and sample_table_name is not None
land_use = inject.get_table('land_use')
size_terms = inject.get_injectable('size_terms')
# - initialize trip origin and destination to those of half-tour
# (we will sequentially adjust intermediate trips origin and destination as we choose them)
tour_destination = reindex(tours_merged.destination, trips.tour_id).astype(np.int64)
tour_origin = reindex(tours_merged.origin, trips.tour_id).astype(np.int64)
trips['destination'] = np.where(trips.outbound, tour_destination, tour_origin)
trips['origin'] = np.where(trips.outbound, tour_origin, tour_destination)
trips['failed'] = False
trips = trips.sort_index()
trips['next_trip_id'] = np.roll(trips.index, -1)
trips.next_trip_id = trips.next_trip_id.where(trips.trip_num < trips.trip_count, 0)
# - filter tours_merged (AFTER copying destination and origin columns to trips)
# tours_merged is used for logsums, we filter it here upfront to save space and time
tours_merged_cols = logsum_settings['TOURS_MERGED_CHOOSER_COLUMNS']
if 'REDUNDANT_TOURS_MERGED_CHOOSER_COLUMNS' in model_settings:
redundant_cols = model_settings['REDUNDANT_TOURS_MERGED_CHOOSER_COLUMNS']
tours_merged_cols = [c for c in tours_merged_cols if c not in redundant_cols]
tours_merged = tours_merged[tours_merged_cols]
# - skims
skims = wrap_skims(model_settings)
# - size_terms and alternatives
alternatives = tour_destination_size_terms(land_use, size_terms, 'trip')
# DataFrameMatrix alows us to treat dataframe as virtual a 2-D array, indexed by TAZ, purpose
# e.g. size_terms.get(df.dest_taz, df.purpose)
# returns a series of size_terms for each chooser's dest_taz and purpose with chooser index
size_term_matrix = DataFrameMatrix(alternatives)
# don't need size terms in alternatives, just TAZ index
alternatives = alternatives.drop(alternatives.columns, axis=1)
alternatives.index.name = model_settings['ALT_DEST_COL_NAME']
sample_list = []
# - process intermediate trips in ascending trip_num order
intermediate = trips.trip_num < trips.trip_count
if intermediate.any():
first_trip_num = trips[intermediate].trip_num.min()
last_trip_num = trips[intermediate].trip_num.max()
# iterate over trips in ascending trip_num order
for trip_num in range(first_trip_num, last_trip_num + 1):
nth_trips = trips[intermediate & (trips.trip_num == trip_num)]
nth_trace_label = tracing.extend_trace_label(trace_label, 'trip_num_%s' % trip_num)
# - annotate nth_trips
if preprocessor_settings:
expressions.assign_columns(
df=nth_trips,
model_settings=preprocessor_settings,
locals_dict=config.get_model_constants(model_settings),
trace_label=nth_trace_label)
logger.info("Running %s with %d trips", nth_trace_label, nth_trips.shape[0])
# - choose destination for nth_trips, segmented by primary_purpose
choices_list = []
for primary_purpose, trips_segment in nth_trips.groupby('primary_purpose'):
choices, destination_sample = choose_trip_destination(
primary_purpose,
trips_segment,
alternatives,
tours_merged,
model_settings,
want_logsums,
want_sample_table,
size_term_matrix, skims,
chunk_size, trace_hh_id,
trace_label=tracing.extend_trace_label(nth_trace_label, primary_purpose))
choices_list.append(choices)
if want_sample_table:
assert destination_sample is not None
sample_list.append(destination_sample)
destinations_df =
|
pd.concat(choices_list)
|
pandas.concat
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import xgboost as xgb
import pickle
import base64
import pandas as pd
import numpy as np
import json
from singa_auto.model import BaseModel, IntegerKnob, FloatKnob, logger
from singa_auto.model.dev import test_model_class
from singa_auto.constants import ModelDependency
class XgbClf(BaseModel):
'''
Implements a XGBoost Classifier for tabular data classification task
'''
@staticmethod
def get_knob_config():
return {
'n_estimators': IntegerKnob(50, 200),
'min_child_weight': IntegerKnob(1, 6),
'max_depth': IntegerKnob(2, 8),
'gamma': FloatKnob(0.0, 1.0, is_exp=False),
'subsample': FloatKnob(0.5, 1.0, is_exp=False),
'colsample_bytree': FloatKnob(0.1, 0.7, is_exp=False)
}
def __init__(self, **knobs):
self.__dict__.update(knobs)
def train(self, dataset_path, features=None, target=None, **kwargs):
# Record features & target
self._features = features
self._target = target
# Load CSV file as pandas dataframe
csv_path = dataset_path
data = pd.read_csv(csv_path)
# Extract X & y from dataframe
(X, y) = self._extract_xy(data)
# Encode categorical features
X = self._encoding_categorical_type(X)
num_class = y.unique().size
self._clf = self._build_classifier(self.n_estimators, self.min_child_weight, \
self.max_depth, self.gamma, self.subsample, self.colsample_bytree, num_class)
self._clf.fit(X, y)
# Compute train accuracy
score = self._clf.score(X, y)
logger.log('Train accuracy: {}'.format(score))
def evaluate(self, dataset_path):
# Load CSV file as pandas dataframe
csv_path = dataset_path
data = pd.read_csv(csv_path)
# Extract X & y from dataframe
(X, y) = self._extract_xy(data)
# Encode categorical features
X = self._encoding_categorical_type(X)
accuracy = self._clf.score(X, y)
return accuracy
def predict(self, queries):
queries = [
|
pd.DataFrame(query, index=[0])
|
pandas.DataFrame
|
import logging
from typing import NamedTuple, Union
import numpy
import pandas
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from statsmodels.tsa.statespace.sarimax import SARIMAX
from hts._t import ModelT, NAryTreeT, TimeSeriesModelT, TransformT
from hts.core.exceptions import InvalidArgumentException
from hts.hierarchy import HierarchyTree
from hts.transforms import BoxCoxTransformer, FunctionTransformer
logger = logging.getLogger(__name__)
class TimeSeriesModel(TimeSeriesModelT):
"""Base class for the implementation of the underlying models.
Inherits from scikit-learn base classes
"""
def __init__(
self, kind: str, node: HierarchyTree, transform: TransformT = False, **kwargs
):
"""
Parameters
----------
kind : str
One of `prophet`, `sarimax`, `auto-arima`, `holt-winters`
node : HierarchyTree
Node
transform : Bool or NamedTuple
kwargs
Keyword arguments to be passed to the model instantiation. See the documentation
of each of the actual model implementations for a more comprehensive treatment
"""
if kind not in ModelT.names():
raise InvalidArgumentException(
f'Model {kind} not valid. Pick one of: {" ".join(ModelT.names())}'
)
self.kind = kind
self.node = node
self.transform_function = self._set_transform(transform=transform)
self.model = self.create_model(**kwargs)
self.forecast = None
self.residual = None
self.mse = None
def _set_transform(self, transform: TransformT):
if transform is False or transform is None:
return FunctionTransformer(func=self._no_func, inv_func=self._no_func)
elif transform is True:
return BoxCoxTransformer()
elif isinstance(transform, tuple):
if not hasattr(transform, "func") or not hasattr(transform, "inv_func"):
raise ValueError(
"If passing a NamedTuple, it must have a `func` and `inv_func` parameters"
)
return FunctionTransformer(
func=getattr(transform, "func"), inv_func=getattr(transform, "inv_func")
)
else:
raise ValueError(
"Invalid transform passed. Use either `True` for default boxcox transform or "
"a `NamedTuple(func: Callable, inv_func: Callable)` for custom transforms"
)
def _set_results_return_self(self, in_sample, y_hat):
in_sample = self.transform_function.inverse_transform(in_sample)
y_hat = self.transform_function.inverse_transform(y_hat)
self.forecast = pandas.DataFrame(
{"yhat": numpy.concatenate([in_sample, y_hat])}
)
self.residual = (in_sample - self._get_transformed_data(as_series=True)).values
self.mse = numpy.mean(numpy.array(self.residual) ** 2)
return self
def _get_transformed_data(
self, as_series: bool = False
) -> Union[pandas.DataFrame, pandas.Series]:
key = self.node.key
value = self.node.item
transformed = self.transform_function.transform(value[key])
if as_series:
return pandas.Series(transformed)
else:
return
|
pandas.DataFrame({key: transformed})
|
pandas.DataFrame
|
#%% [markdown]
# Lambda School Data Science, Unit 2: Predictive Modeling
#
# # Kaggle Challenge, Module 1
#
# ## Assignment
# - [ ] Do train/validate/test split with the Tanzania Waterpumps data.
# - [ ] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features. (For example, [what other columns have zeros and shouldn't?](https://github.com/Quartz/bad-data-guide#zeros-replace-missing-values) What other columns are duplicates, or nearly duplicates? Can you extract the year from date_recorded? Can you engineer new features, such as the number of years from waterpump construction to waterpump inspection?)
# - [ ] Select features. Use a scikit-learn pipeline to encode categoricals, impute missing values, and fit a decision tree classifier.
# - [ ] Get your validation accuracy score.
# - [ ] Get and plot your feature importances.
# - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
# - [ ] Commit your notebook to your fork of the GitHub repo.
#
#
# ## Stretch Goals
#
# ### Reading
#
# - A Visual Introduction to Machine Learning
# - [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/)
# - [Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)
# - [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2)
# - [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)
# - [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)
# - [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) — _Don’t worry about understanding the code, just get introduced to the concepts. This 10 minute video has excellent diagrams and explanations._
# - [Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)
#
#
# ### Doing
# - [ ] Add your own stretch goal(s) !
# - [ ] Try other [scikit-learn imputers](https://scikit-learn.org/stable/modules/impute.html).
# - [ ] Make exploratory visualizations and share on Slack.
#
#
# #### Exploratory visualizations
#
# Visualize the relationships between feature(s) and target. I recommend you do this with your training set, after splitting your data.
#
# For this problem, you may want to create a new column to represent the target as a number, 0 or 1. For example:
#
# ```python
# train['functional'] = (train['status_group']=='functional').astype(int)
# ```
#
#
#
# You can try [Seaborn "Categorical estimate" plots](https://seaborn.pydata.org/tutorial/categorical.html) for features with reasonably few unique values. (With too many unique values, the plot is unreadable.)
#
# - Categorical features. (If there are too many unique values, you can replace less frequent values with "OTHER.")
# - Numeric features. (If there are too many unique values, you can [bin with pandas cut / qcut functions](https://pandas.pydata.org/pandas-docs/stable/getting_started/basics.html?highlight=qcut#discretization-and-quantiling).)
#
# You can try [Seaborn linear model plots](https://seaborn.pydata.org/tutorial/regression.html) with numeric features. For this classification problem, you may want to use the parameter `logistic=True`, but it can be slow.
#
# You do _not_ need to use Seaborn, but it's nice because it includes confidence intervals to visualize uncertainty.
#
# #### High-cardinality categoricals
#
# This code from a previous assignment demonstrates how to replace less frequent values with 'OTHER'
#
# ```python
# # Reduce cardinality for NEIGHBORHOOD feature ...
#
# # Get a list of the top 10 neighborhoods
# top10 = train['NEIGHBORHOOD'].value_counts()[:10].index
#
# # At locations where the neighborhood is NOT in the top 10,
# # replace the neighborhood with 'OTHER'
# train.loc[~train['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
# test.loc[~test['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
# ```
#
#%%
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
get_ipython().system('pip install category_encoders==2.*')
# If you're working locally:
else:
DATA_PATH = './data/'
#%%
import pandas
from sklearn.model_selection import train_test_split
train = pandas.merge(pandas.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pandas.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test_features = pandas.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pandas.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train.shape, test_features.shape
#%%
from typing import Optional
def keepTopN( column:pandas.Series,
n:int,
default:Optional[object] = None) -> pandas.Series:
"""
Keeps the top n most popular values of a Series, while replacing the rest with `default`
Args:
column (pandas.Series): Series to operate on
n (int): How many values to keep
default (object, optional): Defaults to NaN. Value with which to replace remaining values
Returns:
pandas.Series: Series with the most popular n values
"""
import numpy
if default is None: default = numpy.nan
val_counts = column.value_counts()
if n > len(val_counts): n = len(val_counts)
top_n = list(val_counts[:n].index)
return(column.where(column.isin(top_n), other=default))
def oneHot( frame:pandas.DataFrame,
cols:Optional[list] = None,
exclude_cols:Optional[list] = None,
max_cardinality:Optional[int] = None) -> pandas.DataFrame:
"""
One-hot encodes the dataframe.
Args:
frame (pandas.DataFrame): Dataframe to clean
cols (list, optional): Columns to one-hot encode. Defaults to all string columns.
exclude_cols (list, optional): Columns to skip one-hot encoding. Defaults to None.
max_cardinality (int, optional): Maximum cardinality of columns to encode. Defaults to no maximum cardinality.
Returns:
pandas.DataFrame: The one_hot_encoded dataframe.
"""
import category_encoders
one_hot_encoded = frame.copy()
if cols is None: cols = list(one_hot_encoded.columns[one_hot_encoded.dtypes=='object'])
if exclude_cols is not None:
for col in exclude_cols:
cols.remove(col)
if max_cardinality is not None:
described = one_hot_encoded[cols].describe(exclude=[numpy.number])
cols = list(described.columns[described.loc['unique'] <= max_cardinality])
encoder = category_encoders.OneHotEncoder(return_df=True, use_cat_names=True, cols=cols)
one_hot_encoded = encoder.fit_transform(one_hot_encoded)
return(one_hot_encoded)
#%%
def clean_X(df, max_ordinality=100, int_ts=False, n_clusters=100):
cleaned = df.copy().drop(columns=['recorded_by'])
from sklearn.cluster import KMeans
kmeans=KMeans(n_clusters=n_clusters)
kmeans.fit(cleaned[['latitude', 'longitude']])
cleaned['cluster'] = kmeans.labels_
categorical_description = cleaned.describe(exclude=[numpy.number])
if int_ts:
cat_cols = categorical_description.drop(columns=['date_recorded']).columns
else:
cat_cols = categorical_description.columns
# high_ordinality_cols = categorical_description[categorical_description.loc['unique'] > max_ordinality].columns
for col in cat_cols:
cleaned[col] = keepTopN(cleaned[col], max_ordinality, default='other')
if int_ts:
cleaned['date_recorded_dt'] = pandas.to_datetime(df['date_recorded'])
cleaned['date_recorded_ts'] = cleaned['date_recorded_dt'].view('int64')
return(cleaned.drop(columns=['date_recorded_dt', 'date_recorded']))
else:
return(cleaned)
#%%
train_targets = train.sort_values(by=['id'])['status_group'].replace({'functional': 1, 'functional needs repair': 2, 'non functional': 3})
train_features = train.sort_values(by=['id']).drop(columns=['status_group'])
#%%
import numpy
combined = pandas.concat([train_features, test_features])
cleaned_combined = oneHot(clean_X(combined, max_ordinality=100, int_ts=True))
cleaned_train = cleaned_combined[cleaned_combined['id'].isin(train_features['id'])].sort_values(by=['id'])
cleaned_test = cleaned_combined[cleaned_combined['id'].isin(test_features['id'])].sort_values(by=['id'])
#%%
#%%
X_train, X_val, y_train, y_val = train_test_split(cleaned_train, train_targets, random_state=33)
#%%
from sklearn.pipeline import Pipeline
import sklearn.preprocessing as preprocessing
from sklearn.tree import DecisionTreeClassifier
import sklearn.model_selection as model_selection
#%%
train_scores = []
test_scores = []
min_samples_range = list(range(2500,250,-50)) + list(range(250,50,-10)) + list(range(50,1,-1))
for i in min_samples_range:
print(i)
dtc = DecisionTreeClassifier( min_samples_leaf=i,
criterion='entropy',
random_state=i)
pipeline = Pipeline([('DecisionTreeClassifier', dtc)])
pipeline.fit(X_train, y_train)
train_scores.append(pipeline.score(X_train, y_train))
test_scores.append(pipeline.score(X_val, y_val))
#%%
import matplotlib.pyplot as pyplot
pyplot.rcParams['figure.facecolor'] = '#002B36'
pyplot.rcParams['axes.facecolor'] = 'black'
pyplot.rcParams['figure.figsize'] = (10,8)
pyplot.plot(min_samples_range, train_scores, label='Train')
pyplot.plot(min_samples_range, test_scores, label='Test')
pyplot.xscale('log')
pyplot.xlim(left=1000, right=1)
# pyplot.gcf().axes[0].set_xticks(range(1000,-50, -5))
pyplot.title('Accuracy vs Minimum leaf size')
pyplot.xlabel('min_samples_leaf')
pyplot.ylabel('Accuracy score')
pyplot.legend()
pyplot.show()
#%%
max(test_scores)
#%%
dtc = DecisionTreeClassifier( min_samples_leaf=10,
criterion='entropy',
random_state=33)
pipeline = Pipeline([('DecisionTreeClassifier', dtc)])
pipeline.fit(X_train, y_train)
pipeline.score(X_val, y_val)
#%%
y_pred = pipeline.predict(cleaned_test)
out_df =
|
pandas.DataFrame(y_pred, index=cleaned_test['id'], columns=['status_group'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 10 14:24:56 2021
@author: <NAME>
Script created for determination of optimal power generation mix with
power production from DK1 and DK2.
- Includes heating sector
- Possible to add CO2 constraint
Reads data for the period 2017 dowloaded from
data.open-power-system-data.org
Capacity factor is determined using installed capacity per production type
data from www.transparency.entsoe.eu
"""
#%% Import and define
import pypsa
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from pandas.tseries.offsets import DateOffset
def annuity(n,r):
"""Calculate the annuity factor for an asset with lifetime n years and
discount rate of r, e.g. annuity(20,0.05)*20 = 1.6"""
if r > 0:
return r/(1. - 1./(1.+r)**n)
else:
return 1/n
# Create network and snapshot
network = pypsa.Network()
hours_in_2017 = pd.date_range('2017-01-01T00:00Z','2017-12-31T23:00Z', freq='H')
network.set_snapshots(hours_in_2017)
# Load data: Demand and generators for 6 regions
df_elec = pd.read_csv('data/2017_entsoe.csv', sep=',', index_col=0) # in MWh
df_elec.index = pd.to_datetime(df_elec.index) #change index to datatime
df_heat = pd.read_csv('data/heat_demand.csv', sep=';', index_col=0)
df_heat.index = pd.to_datetime(df_heat.index)
df_heat.index = df_heat.index +
|
DateOffset(years=2)
|
pandas.tseries.offsets.DateOffset
|
import os
import unittest
import pandas as pd
import pandas.testing as pd_testing
from non_duplicate_lesion_id import non_duplicate_lesion_id
class TestCreateNonDuplicateLesionId(unittest.TestCase):
def assertDataframeEqual(self, a, b, msg):
try:
|
pd_testing.assert_frame_equal(a, b)
|
pandas.testing.assert_frame_equal
|
import pandas as pd
outpath = '../output/JH_US/'
cases = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv')
# there are some counties without fips, but I am adding the one that should be
cases['FIPS'] = cases['FIPS'].fillna(cases['UID']-84000000)
# there are counties that are called Unassigned and Out of [state] for each state,
# that blows up in my face when I try to take it as a county because
# there are inconsistencies in the count of cases and deceased. let's eliminate!
cases = cases[cases['Admin2']!='Unassigned']
cases = cases[~cases['Admin2'].astype(str).str.startswith('Out of ')]
# There are weird fips, 60, 88888, 99999, 66, 69 and 78 (cruises and states) without Admin 2 column, fill with Province_State
cases['Admin2'] = cases['Admin2'].fillna(cases['Province_State'])
# Irrelevant columns
drop_col = ['UID', 'iso2', 'iso3', 'code3', 'Country_Region', 'Lat', 'Long_', 'Combined_Key', 'Admin2', 'Province_State']
rename_col = {'FIPS': 'fips'}
cases.rename(columns=rename_col, inplace=True)
new_cases = cases.drop(columns=drop_col).set_index('fips').T
new_cases.index = pd.to_datetime(new_cases.index)
new_cases.sort_index(inplace=True)
new_cases = new_cases.diff()
new_cases_raw = new_cases.copy()
deaths = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv')
# there are some counties without fips, but I am adding the one that should be
deaths['FIPS'] = deaths['FIPS'].fillna(deaths['UID']-84000000)
deaths[deaths['FIPS'].isna()]['FIPS']
# there are counties that are called Unassigned and Out of [state] for each state,
# that blows up in my face when I try to take it as a county because
# there are inconsistencies in the count of cases and deceased. let's eliminate!
deaths = deaths[deaths['Admin2']!='Unassigned']
deaths = deaths[~deaths['Admin2'].astype(str).str.startswith('Out of ')]
# There are weird fips, 60, 88888, 99999, 66, 69 and 78 (cruises and states) without Admin 2 column, fill with Province State
deaths['Admin2'] = deaths['Admin2'].fillna(deaths['Province_State'])
# Irrelevant columns
deaths.rename(columns=rename_col, inplace=True)
deaths_drop = deaths.drop(columns=drop_col + ['Population']).set_index('fips').T
deaths_drop.index = pd.to_datetime(deaths_drop.index)
deaths_drop.sort_index(inplace=True)
new_deaths = deaths_drop.diff()
new_deaths_raw = new_deaths.copy()
# Routine to fix negative values
print('Routine to fix negative values')
i = 1
for fips in new_deaths:
ndeaths = new_deaths[fips]
if any(ndeaths < 0):
while any(ndeaths < 0):
rest_bmatrix = ndeaths<0
rest_matrix = ndeaths[rest_bmatrix] # These are the negative values to be fixed
sumneg_matrix = ndeaths[rest_bmatrix] * -1
rest_matrix.index = rest_matrix.index - pd.DateOffset(1) # it is necessary to eliminate them from the previous day
ndeaths = ndeaths.add(sumneg_matrix, fill_value=0) # We add what we are going to subtract on the day of the problem
ndeaths = ndeaths.add(rest_matrix, fill_value=0) # We subtract the cases from the previous day
new_deaths[fips] = ndeaths
print(f'new_deaths: {i}/{len(new_deaths.columns)}', end='\r')
i += 1
print()
print('Done new_deaths')
i = 1
for fips in new_cases:
ncase = new_cases[fips]
if any(ncase < 0):
while any(ncase < 0):
rest_bmatrix = ncase<0
rest_matrix = ncase[rest_bmatrix] # These are the negative values to be fixed
sumneg_matrix = ncase[rest_bmatrix] * -1
rest_matrix.index = rest_matrix.index - pd.DateOffset(1) # it is necessary to eliminate them from the previous day
ncase = ncase.add(sumneg_matrix, fill_value=0) # We add what we are going to subtract on the day of the problem
ncase = ncase.add(rest_matrix, fill_value=0) # We subtract the cases from the previous day
new_cases[fips] = ncase
print(f'new_cases: {i}/{len(new_cases.columns)}', end='\r')
i += 1
print()
print('Done new_cases')
mergednewcases = pd.merge(new_cases.T, cases[['fips', 'Admin2', 'Province_State']], left_index=True, right_on='fips')
mergednewcases.rename(columns={'Admin2': 'county'}, inplace=True)
mergednewcases.fillna(0, inplace=True)
mergednewcases_raw = pd.merge(new_cases_raw.T, cases[['fips', 'Admin2', 'Province_State']], left_index=True, right_on='fips')
mergednewcases_raw.rename(columns={'Admin2': 'county'}, inplace=True)
mergednewcases_raw.fillna(0, inplace=True)
std_new_cases = pd.melt(mergednewcases, id_vars=['fips', 'county', 'Province_State'], var_name='date', value_name='new_cases')
std_new_cases_raw = pd.melt(mergednewcases_raw, id_vars=['fips', 'county', 'Province_State'], var_name='date', value_name='new_cases')
mergednew_deaths =
|
pd.merge(new_deaths.T, cases[['fips', 'Admin2', 'Province_State']], left_index=True, right_on='fips')
|
pandas.merge
|
################################################################################
# This module aggregates the all psycho-linguistic measures into one matrix by
# each 'AC_Doc_ID (item stem or option).
# Parameters df_ac_pos: input pandas.DataFrame, it should have, at least, POS
# count columns with the 'AC_Doc_ID's as the index of
# the DataFrame
# pos_start_q: integer column number (starting from zero)
# specifying the starting point of POS count
# columns in the question DataFrame, from the point
# to the end, all the columns should be the POS count
# columns
# df_ac_loc_overlapping_lemma: pandas.DataFrame of the overlapping
# lemma location information, even no
# location information, still
# df_ac_overlapping_lemma is
# acceptable
# df_ac_loc_overlapping_syn_lemma: pandas.DataFrame of
# the overlapping lemma with synonym
# location information, even no
# location information, still
# df_ac_overlapping_syn_lemma is
# acceptable
# df_ac_overlapping_nchunk: pandas.DataFrame as a result of
# overlapping NChunk counts
# df_ac_oanc_lemma_freq_q: pandas.DataFrame reporting each
# 'AC_Doc_ID's lemma frequency stats
# stem_option_name_clm: column name of stem/option identifier
# in the aggregated DataFrame
# stem_identifier: name of the stem identifier in the aggregated
# DataFrame
# keep_specific_columns_POS = None: a list of column names to be
# included into the aggrageted
# matrix as a part of the original
# columns of the df_ac_pos input
# DataFrame
# stop_words_POS = None: list of POS to specify stop words, they
# should all include in the POS question
# and passage DataFrames
# df_ac_lemma_q = None: pandas.DataFrame of questions, it should
# have, at least, lemma count columns with the 'AC_Doc_ID's
# as the index of the DataFrame
# include_specific_lemma_count = None: a list of lemmas to be
# included into the aggrageted
# matrix as the lemma counts
# df_ac_pos_p = None: pandas.DataFrame of passages, it should have,
# at least, POS count columns, passage name and the section
# columns
# passage_name_clm_q = None: column name of the passage names
# in the lemma question DataFrame
# passage_sec_clm_q = None: column name of the passage sections
# in the lemma question DataFrame
# passage_name_clm_p = None: column name of the passage names
# in the passage DataFrame
# passage_sec_clm_p = None: column name of the passage sections
# in the passage DataFrame
# pos_start_p: integer column number (starting from zero)
# specifying the starting point of POS count
# columns in the passage DataFrame, from the point
# to the end, all the columns should be the POS
# count columns
# decimal_places = None: specify the decimal places to round at
# df_ac_overlapping_hypernyms = None: pandas.DataFrame as a result
# of overlapping hypernym counts
# df_ac_overlapping_hyponyms = None: pandas.DataFrame as a result
# of overlapping hyponym counts
# nchunk_suffix = '_nc': specify the suffix of NChunk variables
# which was used for the column names of
# the overlapping NChunk
# hypernym_suffix = '_hype': specify the suffix of hypernym variables
# which was used for the column names of
# the overlapping hypernyms
# hyponym_suffix = '_hypo': specify the suffix of hyponym variables
# which was used for the column names of
# the overlapping hyponyms
# df_ac_bigram_pmi_distribution = None: pandas.DataFrame as bigram
# PMI stats
# df_ac_trigram_pmi_distribution = None: pandas.DataFrame as trigram
# PMI stats
# Returns Result: pandas.DataFrame including the original columns of
# the df_ac_pos DataFrame plus aggregated result columns
################################################################################
def ac_aggregate_plim(df_ac_pos, pos_start_q, df_ac_loc_overlapping_lemma,
df_ac_loc_overlapping_syn_lemma, df_ac_overlapping_nchunk,
df_ac_oanc_lemma_freq_q, stem_option_name_clm, stem_identifier,
keep_specific_columns_POS = None, stop_words_POS = None,
df_ac_lemma_q = None, include_specific_lemma_count = None,
df_ac_pos_p = None, passage_name_clm_q = None, passage_sec_clm_q =None,
passage_name_clm_p = None, passage_sec_clm_p = None,
pos_start_p = None, decimal_places = None,
df_ac_overlapping_hypernyms = None, df_ac_overlapping_hyponyms = None,
nchunk_suffix = '_nc', hypernym_suffix = '_hype',
hyponym_suffix = '_hypo', df_ac_bigram_pmi_distribution = None,
df_ac_trigram_pmi_distribution = None):
import pandas as pd
df_ac_buf_POS = df_ac_pos.iloc[:, pos_start_q:]
all_option_count_name_clms = []
df_ac_options = df_ac_pos.drop_duplicates([stem_option_name_clm])
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count_s_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count' + nchunk_suffix + '_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count' + hypernym_suffix + '_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count' + hyponym_suffix + '_' + x
all_option_count_name_clms.append(s)
option_len = len(all_option_count_name_clms) // 5
if stop_words_POS != None:
df_ac_buf_POS = df_ac_buf_POS.drop(stop_words_POS, axis=1)
df_ac_buf_sum = pd.DataFrame({ 'POS_sum' : df_ac_buf_POS.sum(axis=1) })
if keep_specific_columns_POS != None:
df_ac_buf_POS_head = df_ac_pos.loc[:, keep_specific_columns_POS]
else:
df_ac_buf_POS_head = df_ac_pos.copy()
df_ac_buf_POS_head['POS_sum'] = df_ac_buf_sum['POS_sum']
if df_ac_loc_overlapping_lemma is not None:
df_concat = pd.concat([df_ac_buf_POS_head, df_ac_loc_overlapping_lemma], axis=1)
else:
df_concat = df_ac_buf_POS_head.copy()
df_concat_tmp = df_concat.copy()
if df_ac_loc_overlapping_syn_lemma is not None:
df_concat = pd.concat([df_concat_tmp, df_ac_loc_overlapping_syn_lemma], axis=1)
df_concat_tmp = df_concat.copy()
if df_ac_overlapping_nchunk is not None:
df_concat = pd.concat([df_concat_tmp, df_ac_overlapping_nchunk, df_ac_oanc_lemma_freq_q], axis=1)
else:
df_concat = pd.concat([df_concat_tmp, df_ac_oanc_lemma_freq_q], axis=1)
df_concat_tmp = df_concat.copy()
if df_ac_overlapping_hypernyms is not None:
df_concat = pd.concat([df_concat_tmp, df_ac_overlapping_hypernyms], axis=1)
df_concat_tmp = df_concat.copy()
if df_ac_overlapping_hyponyms is not None:
df_concat =
|
pd.concat([df_concat_tmp, df_ac_overlapping_hyponyms], axis=1)
|
pandas.concat
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
sub_df =
|
pd.read_csv('https://covid.postera.ai/covid/submissions.csv')
|
pandas.read_csv
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import abc
import six
import time
import queue
import bisect
import logging
import importlib
import traceback
import numpy as np
import pandas as pd
from multiprocessing import Pool
from qlib.data.cache import H
from qlib.config import C
from qlib.data.ops import *
from qlib.log import get_module_logger
from qlib.utils import parse_field, read_bin, hash_args, normalize_cache_fields
from qlib.data.base import Feature
from qlib.data.cache import DiskDatasetCache, DiskExpressionCache
@six.add_metaclass(abc.ABCMeta)
class CalendarProvider(object):
"""Calendar provider base class
Provide calendar data.
"""
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
"""Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list
"""
raise NotImplementedError("Subclass of CalendarProvider must implement `calendar` method")
def locate_index(self, start_time, end_time, freq, future):
"""Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
"""
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
)
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
def _get_calendar(self, freq, future):
"""Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search
"""
flag = f"{freq}_future_{future}"
if flag in H["c"]:
_calendar, _calendar_index = H["c"][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search
H["c"][flag] = _calendar, _calendar_index
return _calendar, _calendar_index
def _uri(self, start_time, end_time, freq, future=False):
"""Get the uri of calendar generation task."""
return hash_args(start_time, end_time, freq, future)
@six.add_metaclass(abc.ABCMeta)
class InstrumentProvider(object):
"""Instrument provider base class
Provide instrument data.
"""
@staticmethod
def instruments(market="all", filter_pipe=None):
"""Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
"""
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
# the order of the filters will affect the result, so we need to keep
# the order
for filter_t in filter_pipe:
config["filter_pipe"].append(filter_t.to_config())
return config
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
"""List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans
"""
raise NotImplementedError("Subclass of InstrumentProvider must implement `list_instruments` method")
def _uri(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return hash_args(instruments, start_time, end_time, freq, as_list)
# instruments type
LIST = "LIST"
DICT = "DICT"
CONF = "CONF"
@classmethod
def get_inst_type(cls, inst):
if "market" in inst:
return cls.CONF
if isinstance(inst, dict):
return cls.DICT
if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):
return cls.LIST
raise ValueError(f"Unknown instrument type {inst}")
@six.add_metaclass(abc.ABCMeta)
class FeatureProvider(object):
"""Feature provider class
Provide feature data.
"""
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
"""Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature
"""
raise NotImplementedError("Subclass of FeatureProvider must implement `feature` method")
@six.add_metaclass(abc.ABCMeta)
class ExpressionProvider(object):
"""Expression provider class
Provide Expression data.
"""
def __init__(self):
self.expression_instance_cache = {}
def get_expression_instance(self, field):
try:
if field in self.expression_instance_cache:
expression = self.expression_instance_cache[field]
else:
expression = eval(parse_field(field))
self.expression_instance_cache[field] = expression
except NameError as e:
get_module_logger("data").exception(
"ERROR: field [%s] contains invalid operator/variable [%s]" % (str(field), str(e).split()[1])
)
raise
except SyntaxError:
get_module_logger("data").exception("ERROR: field [%s] contains invalid syntax" % str(field))
raise
return expression
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
"""Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression
"""
raise NotImplementedError("Subclass of ExpressionProvider must implement `Expression` method")
@six.add_metaclass(abc.ABCMeta)
class DatasetProvider(object):
"""Dataset provider class
Provide Dataset data.
"""
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
"""Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index
"""
raise NotImplementedError("Subclass of DatasetProvider must implement `Dataset` method")
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
**kwargs,
):
"""Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
"""
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)
@staticmethod
def get_instruments_d(instruments, freq):
"""
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
"""
if isinstance(instruments, dict):
if "market" in instruments:
# dict of stockpool config
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
# dict of instruments and timestamp
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
# list or tuple of a group of instruments
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
@staticmethod
def get_column_names(fields):
"""
Get column names from input fields
"""
if len(fields) == 0:
raise ValueError("fields cannot be empty")
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names
@staticmethod
def parse_fields(fields):
# parse and check the input fields
return [ExpressionD.get_expression_instance(f) for f in fields]
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
"""
Load and process the data, return the data set.
- default using multi-kernel method.
"""
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
# One process for one task, so that the memory will be freed quicker.
workers = min(C.kernels, len(instruments_d))
if C.maxtasksperchild is None:
p = Pool(processes=workers)
else:
p = Pool(processes=workers, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for inst, spans in instruments_d.items():
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
spans,
C,
),
)
else:
for inst in instruments_d:
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
None,
C,
),
)
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst].get()) > 0:
# NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order
new_data[inst] = data[inst].get()
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, g_config=None):
"""
Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
"""
# FIXME: Windows OS or MacOS using spawn: https://docs.python.org/3.8/library/multiprocessing.html?highlight=spawn#contexts-and-start-methods
global C
C = g_config
# NOTE: This place is compatible with windows, windows multi-process is spawn
if getattr(ExpressionD, "_provider", None) is None:
register_all_wrappers()
obj = dict()
for field in column_names:
# The client does not have expression provider, the data will be loaded from cache using static method.
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ["datetime"]
if spans is None:
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
return data[mask]
class LocalCalendarProvider(CalendarProvider):
"""Local calendar data provider class
Provide calendar data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_cal(self):
"""Calendar file uri."""
return os.path.join(C.get_data_path(), "calendars", "{}.txt")
def _load_calendar(self, freq, future):
"""Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps
"""
if future:
fname = self._uri_cal.format(freq + "_future")
# if future calendar not exists, return current calendar
if not os.path.exists(fname):
get_module_logger("data").warning(f"{freq}_future.txt not exists, return current calendar!")
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if not os.path.exists(fname):
raise ValueError("calendar not exists for freq " + freq)
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f]
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
# strip
if start_time:
start_time =
|
pd.Timestamp(start_time)
|
pandas.Timestamp
|
import sys
import numpy as np
import pandas as pd
import sklearn
import pickle
import constants
def load_df(fasta_filename):
filename = fasta_filename
with open(filename, "r") as f:
test = f.readlines()
x = []
y = []
lens = []
for i in range(len(test)):
#for i in range(start, end):
if i % 2 == 0:
x.append(test[i])
else:
tmp = test[i].split("\n")[0]
lens.append(len(tmp))
y.append(tmp)
len_max = max(lens)
for i in range(len(y)):
diff = len_max - len(y[i])
for j in range(diff):
y[i] += "N"
xs = constants.xs
print(f'seq_len: {len(y[0])}')
l = []
c0, c1, c2, c3 = [], [], [], []
df_lists = []
for i in range(len(x)):
df_list = []
if x[i].startswith(xs[0]):
df_list.append(0)
tmp = x[i].split("_")[1].split("\n")[0]
df_list.append(tmp)
df_list.append(y[i])
elif x[i].startswith(xs[1]):
df_list.append(1)
tmp = x[i].split("_")[1].split("\n")[0]
df_list.append(tmp)
df_list.append(y[i])
elif x[i].startswith(xs[2]):
df_list.append(2)
tmp = x[i].split("_")[1].split("\n")[0]
df_list.append(tmp)
df_list.append(y[i])
elif x[i].startswith(xs[3]):
df_list.append(3)
tmp = x[i].split("_")[1].split("\n")[0]
df_list.append(tmp)
df_list.append(y[i])
else:
raise
df_lists.append(df_list)
return df_lists
if __name__ == "__main__":
df_list = load_df(constants.fasta_filename)
columns = ["class", "id", "seq"]
df = pd.DataFrame(data=df_list, columns=columns)
df= df.sample(frac=1)
# each column in a dataframe is called a series
classes = df.loc[:, 'class'].tolist()
sequences = df.loc[:, 'seq'].tolist()
dataset = {}
skip = 50
# Loop throught the sequences and split into individual nucleotides
for i, seq in enumerate(sequences):
# split into nucleotides, remove tab characters
n_tmp = list(seq)
nucleotides = []
for j in range(0, len(n_tmp), skip):
if n_tmp[j] != '\t':
nucleotides.append(n_tmp[j])
#nucleotides = [x for x in nucleotides if x != '\t']
# Append class assignment
nucleotides.append(classes[i])
# add to dataset
dataset[i] = nucleotides
df = pd.DataFrame(dataset).T
print(len(df.columns))
df.rename(columns={599: 'Class'}, inplace=True)
#with open('pp_data/original_train.pkl', 'wb') as f:
# pickle.dump(df, f)
print("deal with dummies and categories for train and test....")
cats = []
for i in range(len(df.columns)-1):
tmp = set(df[df.columns[i]].unique().tolist())
tmp = list(tmp)
tmp.sort()
cats.append(tmp)
cat = tmp
df[df.columns[i]] =
|
pd.Categorical(df[df.columns[i]], categories=cat)
|
pandas.Categorical
|
import pandas as pd
import os
path = "./csv_files"
list_of_samples = os.listdir(path)
df_array = []
for file in list_of_samples:
df_array.append(pd.read_csv(path+"/"+file))
strain_dictionary = {
"genome_file": [],
"scientific name": []
}
for df in df_array:
for index, row in df.iterrows():
specie_array = row['#Organism Name'].split(" ")
strain_dictionary["genome_file"].append(row["Assembly"].split(".")[0])
strain_dictionary["scientific name"].append(specie_array[0] + " "
+ specie_array[1] + ("" if
|
pd.isna(row['Strain'])
|
pandas.isna
|
#Cleaning data
import pandas as pd
import numpy as np
def clean():
#Reading in features/echonest frames and tracks df's to merge on track_id
features =
|
pd.read_csv('features.csv',skiprows=[2,3])
|
pandas.read_csv
|
import h5py
import numpy as np
import pandas as pd
import random
from tqdm import tqdm
from sklearn.preprocessing import Imputer, Normalizer
def load_data(data_path, split=None, label=None, protein_name_list=None, sample_size=None, features_list=None, mode=None):
input_fo = h5py.File(data_path, 'r')
if split is not None:
if split not in ["train", "test"]:
print ("invalid split option")
return None
else:
pass
data_frame = pd.DataFrame()
if protein_name_list is None:
protein_name_list = list(input_fo[split].keys())
print("loading", len(protein_name_list), "proteins.")
if split is not None:
for i, protein_name in enumerate(tqdm(protein_name_list)):
protein_df = load_split_protein(data_path, label=label, protein_name=protein_name, split=split,
sample_size=sample_size,
features_list=features_list, mode=mode)
data_frame = pd.concat([data_frame, protein_df])
else:
for i, protein_name in enumerate(tqdm(protein_name_list)):
protein_df = load_protein(data_path, label=label, protein_name=protein_name, sample_size=sample_size,
features_list=features_list, mode=mode)
data_frame = pd.concat([data_frame, protein_df])
return data_frame
def load_protein(data_path, label=None, protein_name=None, sample_size=None, features_list=None, mode=None):
input_fo = h5py.File(data_path, 'r')
if label is None:
label = "label"
# if features_list is none then use all of the features
if features_list is None:
features_list = list(input_fo[str(protein_name)].keys())
else:
if "receptor" not in features_list:
features_list.append("receptor")
if "drugID" not in features_list:
features_list.append("drugID")
if "label" not in features_list:
features_list.append("label")
data_frame =
|
pd.DataFrame()
|
pandas.DataFrame
|
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import sklearn
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
# In[ ]:
def filtering(level): # filter class data based on level which students response
db=
|
pd.read_csv('/Users/surin/Documents/201720723/3-1/도메인분석및SW설계/Final/rcmd/finalAcademy.csv', engine='python')
|
pandas.read_csv
|
from pathlib import Path
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import argparse
'''
How to run
python assemble_timer_output.py -b folder_before -a folder_after -d folder_output -o file_name_prefix
'''
plt.rcParams['figure.figsize'] = [10, 6]
plt.rcParams.update({'font.size': 18, 'figure.dpi': 150})
sns.set(rc={"lines.linewidth": 0.7})
# https://github.com/mwaskom/seaborn/issues/915
def fixed_boxplot(x, y, *args, label=None, **kwargs):
sns.boxplot(x=x, y=y, *args, **kwargs, labels=[label])
def plot_micro_timing_min_cag_distributions(df_mcmc_and_kde, measurement='Wall Clock Time (ns)', hue='Sample Type',
line=True, separate=True, summary=False, file_name_prefix=''):
min_cag = df_mcmc_and_kde['Edges'] == (df_mcmc_and_kde['Nodes'] - 1)
df_min_cag = df_mcmc_and_kde[min_cag]
df_min_cag.to_csv('min_cag.csv', index=False)
if line:
sns.lineplot(data=df_min_cag, x='Nodes', y=measurement, hue=hue, marker='o', linewidth=2)
if summary:
plt.title('Percentage Speedup for a Single MCMC Iteration (# Edges = # Nodes - 1)', size=16)
else:
plt.title('Timing for a Single MCMC Iteration (# Edges = # Nodes - 1)', size=16)
plt.tight_layout()
else:
if separate:
g = sns.FacetGrid(df_min_cag, col=hue, row='Nodes', hue=hue, sharex='col', margin_titles=True)
else:
g = sns.FacetGrid(df_min_cag, row='Nodes', hue=hue, sharex='col', margin_titles=True)
# g = sns.FacetGrid(df_min_cag, row='Nodes', hue='Sample Type', sharex=False, sharey=False, margin_titles=True)
g.map(sns.histplot, measurement)
# g.map(fixed_boxplot, 'Sample Type', measurement)
g.fig.set_figwidth(24)
g.fig.set_figheight(11)
g.set_titles(col_template='{col_name}', row_template='{row_name} Nodes')
g.fig.suptitle('Micro Timing for Minimum Size CAGs', size=16)
g.fig.subplots_adjust(top=.9)
# Iterate thorugh each axis
for ax in g.axes.flat:
ax.set_ylabel('Number of Samples')
g.add_legend()
if file_name_prefix:
plt.savefig(file_name_prefix)
else:
plt.show()
plt.close()
def plot_micro_timing_distributions(df_mcmc_and_kde, measurement='Wall Clock Time (ns)', separate=True):
df_nodes = df_mcmc_and_kde.groupby(by=['Nodes'], as_index=False)
plot_no = 1
for nodes, df_node in df_nodes:
if separate:
g = sns.FacetGrid(df_node, col='Sample Type', row='Edges', hue='Sample Type', sharex='col', sharey='row', margin_titles=True)
file_name_modifier = 'sep'
else:
g = sns.FacetGrid(df_node, row='Edges', hue='Sample Type', sharex='col', sharey='row', margin_titles=True)
file_name_modifier = 'comb'
g.map(sns.histplot, measurement)
g.fig.set_figwidth(24)
g.fig.set_figheight(11)
g.set_titles(col_template='{col_name}', row_template='{row_name} Edges')
g.fig.suptitle(f'Micro Timing for CAGs with {nodes} Nodes', size=16)
g.fig.subplots_adjust(top=.9)
# Iterate thorugh each axis
for ax in g.axes.flat:
ax.set_ylabel('Number of Samples')
g.add_legend()
# plt.tight_layout()
# plt.savefig(f'{out_dir}{plot_no}_{file_name_modifier}_{nodes}.png')
plot_no += 1
plt.show()
plt.close()
def plot_micro_timing_summery_per_cag_size(df_mcmc_and_kde, measurement='Wall Clock Time (ns)', separate=True,
title_specifier='', y_label='', file_name_prefix='', timing_type=''):
def edges_to_label(row):
if row['Edges'] == row['Nodes'] - 1:
return '$Nodes - 1$'
elif row['Edges'] == int((row['Nodes'] - 1) * 5 / 4):
return '$\\frac{5(Nodes - 1)}{4}$'
elif row['Edges'] == int((row['Nodes'] - 1) * 6 / 4):
return '$\\frac{6(Nodes - 1)}{4}$'
elif row['Edges'] == int((row['Nodes'] - 1) * 7 / 4):
return '$\\frac{7(Nodes - 1)}{4}$'
elif row['Edges'] == (row['Nodes'] - 1) * 2:
return '$2(Nodes - 1)$'
order = ['$Nodes - 1$', '$\\frac{5(Nodes - 1)}{4}$', '$\\frac{6(Nodes - 1)}{4}$',
'$\\frac{7(Nodes - 1)}{4}$', '$2(Nodes - 1)$']
if separate:
df_nodes = df_mcmc_and_kde.groupby(by=['Nodes'], as_index=False)
for nodes, df_node in df_nodes:
sns.lineplot(data=df_node, x='Edges', y=measurement, hue='Sample Type', marker='o', linewidth=2)
plt.title(f'Variation of the {title_specifier} for a Single MCMC Iteration\nwith the Number of Edges for {nodes} Nodes', size=16)
if file_name_prefix:
plt.savefig(f'{file_name_prefix}with_num_edges_for_{nodes}_nodes.png')
else:
plt.show()
plt.close()
else:
df_mcmc_and_kde = df_mcmc_and_kde.copy()
df_mcmc_and_kde['x label'] = df_mcmc_and_kde.apply(edges_to_label, axis=1)
# set categorical order
df_mcmc_and_kde['x label'] = pd.Categorical(df_mcmc_and_kde['x label'], categories=order, ordered=True)
df_mcmc_and_kde['Nodes'] = df_mcmc_and_kde['Nodes'].apply(lambda nodes: str(nodes))
sns.lineplot(data=df_mcmc_and_kde[((df_mcmc_and_kde['Sample Type'] == timing_type) & (df_mcmc_and_kde[measurement] >= 0))],
x='x label', y=measurement, hue='Nodes', linewidth=2)
plt.xlabel('Edges (as a function of Nodes)')
plt.ylabel(y_label)
plt.title(f'Variation of the {title_specifier} for a Single MCMC Iteration\nwith the Number of Edges', size=16)
if file_name_prefix:
plt.savefig(f'{file_name_prefix}with_num_edges.png')
else:
plt.show()
plt.close()
def plot_prediction_timing_min_cag(df_prediction, measurement='Wall Clock Time (ns)', line=False, separate=True):
min_cag = df_prediction['Edges'] == (df_prediction['Nodes'] - 1)
df_min_cag = df_prediction[min_cag]
if line:
sns.lineplot(data=df_min_cag, x='Nodes', y=measurement, marker='o', linewidth=2)
plt.title('Prediction Timing for Minimum Size CAGs', size=16)
plt.tight_layout()
else:
if separate:
g = sns.FacetGrid(df_min_cag, row='Nodes', margin_titles=True)
else:
g = sns.FacetGrid(df_min_cag, row='Nodes', hue='Sample Type', sharex='col', margin_titles=True)
g.map(sns.histplot, measurement)
g.fig.set_figwidth(24)
g.fig.set_figheight(11)
g.set_titles(col_template='{col_name}', row_template='{row_name} Nodes')
g.fig.suptitle('Micro Timing for Minimum Size CAGs', size=16)
g.fig.subplots_adjust(top=.9)
# Iterate thorugh each axis
for ax in g.axes.flat:
ax.set_ylabel('Number of Samples')
g.add_legend()
plt.show()
plt.close()
def plot_prediction_timing_distributions(df_prediction, measurement='Wall Clock Time (ns)', separate=True):
df_prediction = df_prediction.groupby(by=['Nodes'], as_index=False)
for nodes, df_node in df_prediction:
if separate:
g = sns.FacetGrid(df_node, col='Nodes', row='Edges', hue='Nodes', sharex='col', margin_titles=True)
else:
g = sns.FacetGrid(df_node, row='Edges', hue='Nodes', sharex='col', margin_titles=True)
g.map(sns.histplot, measurement)
g.fig.set_figwidth(24)
g.fig.set_figheight(11)
g.set_titles(row_template='{row_name} Edges')
g.fig.suptitle(f'Prediction Timing Distributions for CAGs with {nodes} Nodes', size=16)
g.fig.subplots_adjust(top=.9)
# Iterate thorugh each axis
for ax in g.axes.flat:
ax.set_ylabel('Number of Samples')
g.add_legend()
# plt.tight_layout()
plt.show()
plt.close()
def analyze_micro_timing_data(df, mcmc_timing=False):
# df_summerry = df.groupby(by=['Nodes', 'Edges', 'Sample Type'], as_index=False).agg(['mean', 'median', 'std'])
# print(df_summerry)
# return
df_node_edge = df.groupby(by=['Nodes'], as_index=False)
for ne, df_ne in df_node_edge:
# print(ne)
# print(df_ne.columns)
# fig, ax = plt.subplots(dpi=250, figsize=(24, 6.75))
g = sns.FacetGrid(df_ne, col='Sample Type', row='Edges', sharex='col', margin_titles=True)
g.map(sns.histplot, 'Time Wall')
plt.show()
plt.close()
continue
if mcmc_timing:
df_sample_type = df_ne.groupby(by=['Sample Type'], as_index=False)
for st, df_st in df_sample_type:
min_cag = df_st['Edges'] == (df_st['Nodes'] - 1)
df_min_cag = df_st[min_cag]
# print(st)
# print(df_st.columns)
# continue
# sns.lineplot(data=df_min_cag, x='Nodes', y='MCMC Wall', marker='o', linewidth=2)
sns.histplot(df_min_cag, x='MCMC Wall', element='step',
color=(0.9375, 0.5, 0.5), stat='probability')
title = 'Sampling $\\theta$ ' if st == 1 else 'Sampling derivative '
plt.title(title + f'{ne}')
plt.tight_layout()
# plt.savefig(f'{out_dir}{plot_no}_{title} - line.png')
plt.show()
plt.close()
def assemble_micro_timing_output_files_into_df(folder, file_name_filter, ns_to_ms=True):
csv_files = Path(folder).glob(f'*{file_name_filter}*.csv')
df = pd.concat(map(pd.read_csv, csv_files), ignore_index=True)
df.drop(['Run', 'KDE Kernels'], axis=1, inplace=True, errors='ignore')
if ns_to_ms:
df['CPU Time (ns)'] = df['CPU Time (ns)'].apply(lambda ns: ns / 1000000.0)
df['Wall Clock Time (ns)'] = df['Wall Clock Time (ns)'].apply(lambda ns: ns / 1000000.0)
df.rename(columns={'Wall Clock Time (ns)': 'Wall Clock Time (ms)', 'CPU Time (ns)': 'CPU Time (ms)'},
inplace=True)
return df
def combine_before_and_after_dfs(df_bf, df_af):
def add_percentage_speedup_columns(df, timing_type, col_before, col_after):
df[f'{timing_type} Diff (ms)'] = df.apply(lambda row: row[col_before] - row[col_after], axis=1)
df[f'% Speedup ({timing_type})'] = df\
.apply(lambda row: row[f'{timing_type} Diff (ms)'] * 100 / row[col_before], axis=1)
df[f'Fold Speedup ({timing_type}) - $f$'] = df.apply(lambda row: row[col_before] / row[col_after], axis=1)
df_summary_bf = df_bf.groupby(by=['Nodes', 'Edges', 'Sample Type'], as_index=False)\
.agg(wall_before_mean=('Wall Clock Time (ms)', 'mean'),
wall_before_median=('Wall Clock Time (ms)', 'median'),
wall_before_std=('Wall Clock Time (ms)', 'std'),
wall_before_count=('Wall Clock Time (ms)', 'count'),
cpu_before_mean=('CPU Time (ms)', 'mean'),
cpu_before_median=('CPU Time (ms)', 'median'),
cpu_before_std=('CPU Time (ms)', 'std'),
cpu_before_count=('CPU Time (ms)', 'count')
).round(2)
df_summary_af = df_af.groupby(by=['Nodes', 'Edges', 'Sample Type'], as_index=False)\
.agg(wall_after_mean=('Wall Clock Time (ms)', 'mean'),
wall_after_median=('Wall Clock Time (ms)', 'median'),
wall_after_std=('Wall Clock Time (ms)', 'std'),
wall_after_count=('Wall Clock Time (ms)', 'count'),
cpu_after_mean=('CPU Time (ms)', 'mean'),
cpu_after_median=('CPU Time (ms)', 'median'),
cpu_after_std=('CPU Time (ms)', 'std'),
cpu_after_count=('CPU Time (ms)', 'count')
).round(2)
df_both =
|
pd.merge(left=df_summary_bf, right=df_summary_af, on=['Nodes', 'Edges', 'Sample Type'])
|
pandas.merge
|
"""Tests for gate.py"""
import numpy as np
import pandas as pd
import xarray as xr
from timeflux.helpers.testing import DummyData, DummyXArray
from timeflux.nodes.gate import Gate
xarray_data = DummyXArray()
pandas_data = DummyData()
node = Gate(event_opens='foo_begins', event_closes='foo_ends', truncate=True)
def test_gate_silent():
pandas_data.reset()
# Send data but no event
node.i.data = pandas_data.next(20)
node.update()
assert node._status == 'closed'
assert node.o.data == None
def test_send_opening_closing_event_in_separate_chunks():
pandas_data.reset()
# Send an opening event
node.clear()
node.i.data = pandas_data.next(5)
time_open = pd.Timestamp('2018-01-01 00:00:00.104507143') # Sync event to second sample
event = pd.DataFrame([['foo_begins']], [time_open], columns=['label']) # Generate a trigger event
node.i_events.data = event
node.update()
expected_data = pd.DataFrame(
[
[0.658783, 0.692277, 0.849196, 0.249668, 0.489425],
[0.221209, 0.987668, 0.944059, 0.039427, 0.705575],
[0.925248, 0.180575, 0.567945, 0.915488, 0.033946],
[0.69742, 0.297349, 0.924396, 0.971058, 0.944266],
],
[
pd.Timestamp('2018-01-01 00:00:00.104507143'),
pd.Timestamp('2018-01-01 00:00:00.202319939'),
pd.Timestamp('2018-01-01 00:00:00.300986584'),
pd.Timestamp('2018-01-01 00:00:00.396560186')
]
)
expected_meta = {'gate_status': 'open'}
assert node._status == 'open'
assert node._trigger == 'foo_ends'
assert node.o.meta == expected_meta
pd.testing.assert_frame_equal(node.o.data, expected_data)
# Send a closing event
node.clear()
node.i.data = pandas_data.next(5)
time_close = pd.Timestamp('2018-01-01 00:00:00.595580836') # Sync event to second sample
event = pd.DataFrame([['foo_ends']], [time_close], columns=['label']) # Generate a trigger event
node.i_events.data = event
node.update()
expected_data = pd.DataFrame(
[
[0.474214, 0.862043, 0.844549, 0.3191, 0.828915],
[0.037008, 0.59627, 0.230009, 0.120567, 0.076953]
],
[
pd.Timestamp('2018-01-01 00:00:00.496559945'),
pd.Timestamp('2018-01-01 00:00:00.595580836')
]
)
expected_meta = {'gate_status': 'closed',
'gate_times': [time_open, time_close]}
assert node._status == 'closed'
assert node._trigger == 'foo_begins'
assert node.o.meta == expected_meta
pd.testing.assert_frame_equal(node.o.data, expected_data)
def test_send_opening_closing_event_in_same_chunk():
# Send an opening event and a closing event in the same chunk
pandas_data.reset()
node.clear()
node.i.data = pandas_data.next(5)
time_open = pd.Timestamp('2018-01-01 00:00:00.1')
time_close = pd.Timestamp('2018-01-01 00:00:00.3')
event = pd.DataFrame([['foo_begins'], ['foo_ends']], [time_open, time_close],
columns=['label']) # Generate a trigger event
node.i_events.data = event
node.update()
expected_data = pd.DataFrame(
[
[0.658783, 0.692277, 0.849196, 0.249668, 0.489425],
[0.221209, 0.987668, 0.944059, 0.039427, 0.705575]
],
[
|
pd.Timestamp('2018-01-01 00:00:00.104507143')
|
pandas.Timestamp
|
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT,
|
Timestamp('2011-01-01 10:00')
|
pandas.Timestamp
|
import os
import pandas as pd
import csv
from sklearn.model_selection import train_test_split
import numpy as np
import random
import tensorflow as tf
import torch
#directory of tasks dataset
os.chdir("original_data")
#destination path to create tsv files, dipends on data cutting
path_0 = "mttransformer/data/0"
path_100_no_gan = "mttransformer/data/100/no_gan"
path_200_no_gan = "mttransformer/data/200/no_gan"
path_500_no_gan = "mttransformer/data/500/no_gan"
path_100_gan = "mttransformer/data/100/gan"
path_200_gan = "mttransformer/data/200/gan"
path_500_gan = "mttransformer/data/500/gan"
#if you use a model with gan the flag "apply_gan" is True, else False
apply_gan=False
#data cutting
number_labeled_examples=0 #0-100-200-500
#if you want activate balancing, that is used only in the model Multi-task, MT-DNN and MT-GANBERT
balancing=False
#path train and test dataset of the task
tsv_haspeede_train = 'haspeede_TW-train.tsv'
tsv_haspeede_test = 'haspeede_TW-reference.tsv'
tsv_AMI2018_train = 'AMI2018_it_training.tsv'
tsv_AMI2018_test = 'AMI2018_it_testing.tsv'
tsv_AMI2018_train = 'AMI2018_it_training.tsv'
tsv_AMI2018_test = 'AMI2018_it_testing.tsv'
tsv_DANKMEMES2020_train = 'dankmemes_task2_train.csv'
tsv_DANKMEMES2020_test = 'hate_test.csv'
tsv_SENTIPOLC2016_train = 'training_set_sentipolc16.csv'
tsv_SENTIPOLC2016_test = 'test_set_sentipolc16_gold2000.csv'
tsv_SENTIPOLC2016_train = 'training_set_sentipolc16.csv'
tsv_SENTIPOLC2016_test = 'test_set_sentipolc16_gold2000.csv'
#Upload the dataset of all task as dataframes
#haspeede_TW
df_train = pd.read_csv(tsv_haspeede_train, delimiter='\t', names=('id','sentence','label'))
df_train = df_train[['id']+['label']+['sentence']]
df_test =
|
pd.read_csv(tsv_haspeede_test, delimiter='\t', names=('id','sentence','label'))
|
pandas.read_csv
|
from sampling_utils import *
from collections import OrderedDict
import theano
import re
import pandas as pd
import datetime
import numpy as np
import scipy as sp
import pymc3 as pm
import patsy as pt
import theano.tensor as tt
# BUG: may throw an error for flat RVs
theano.config.compute_test_value = 'off'
class SpatioTemporalFeature(object):
def __init__(self):
self._call_ = np.frompyfunc(self.call, 2, 1)
def __call__(self, times, locations):
_times = [pd.Timestamp(d) for d in times]
return self._call_(np.asarray(_times).reshape(
(-1, 1)), np.asarray(locations).reshape((1, -1))).astype(np.float32)
class SpatioTemporalYearlyDemographicsFeature(SpatioTemporalFeature):
""" TODO:
* county data must be updated to include 2019/2020 demographic data
|> fix call
"""
def __init__(self, county_dict, group, scale=1.0):
self.dict = {
(year, county): val * scale
for county, values in county_dict.items()
for (g, year), val in values["demographics"].items()
if g == group
}
super().__init__()
def call(self, yearweekday, county):
# TODO: do this properly when data is available!
return self.dict.get((2018, county))
# return self.dict.get((yearweekday.year,county))
class SpatialEastWestFeature(SpatioTemporalFeature):
def __init__(self, county_dict):
self.dict = {
county: 1.0 if "east" in values["region"] else (
0.5 if "berlin" in values["region"] else 0.0) for county,
values in county_dict.items()}
super().__init__()
def call(self, yearweekday, county):
return self.dict.get(county)
class TemporalFourierFeature(SpatioTemporalFeature):
def __init__(self, i, t0, scale):
self.t0 = t0
self.scale = scale
self.τ = (i // 2 + 1) * 2 * np.pi
self.fun = np.sin if (i % 2) == 0 else np.cos
super().__init__()
def call(self, t, x):
return self.fun((t - self.t0) / self.scale * self.τ)
class TemporalPeriodicPolynomialFeature(SpatioTemporalFeature):
def __init__(self, t0, period, order):
self.t0 = t0
self.period = period
self.order = order
super().__init__()
def call(self, t, x):
tdelta = (t - self.t0).days % self.period
return (tdelta / self.period) ** self.order
class TemporalSigmoidFeature(SpatioTemporalFeature):
def __init__(self, t0, scale):
self.t0 = t0
self.scale = scale
super().__init__()
def call(self, t, x):
t_delta = (t - self.t0) / self.scale
return sp.special.expit(t_delta.days + (t_delta.seconds / (3600 * 24)))
class TemporalPolynomialFeature(SpatioTemporalFeature):
def __init__(self, t0, tmax, order):
self.t0 = t0
self.order = order
self.scale = (tmax - t0).days
super().__init__()
def call(self, t, x):
t_delta = (t - self.t0).days / self.scale
return t_delta ** self.order
class IAEffectLoader(object):
generates_stats = False
def __init__(self, var, filenames, days, counties):
self.vars = [var]
self.samples = []
for filename in filenames:
try:
with open(filename, "rb") as f:
tmp = pkl.load(f)
except FileNotFoundError:
print("Warning: File {} not found!".format(filename))
pass
except Exception as e:
print(e)
else:
m = tmp["ia_effects"]
ds = list(tmp["predicted day"])
cs = list(tmp["predicted county"])
d_idx = np.array([ds.index(d) for d in days]).reshape((-1, 1))
c_idx = np.array([cs.index(c) for c in counties])
self.samples.append(np.moveaxis(
m[d_idx, c_idx, :], -1, 0).reshape((m.shape[-1], -1)).T)
def step(self, point):
new = point.copy()
# res = new[self.vars[0].name]
new_res = self.samples[np.random.choice(len(self.samples))]
new[self.vars[0].name] = new_res
return new
def stop_tuning(self, *args):
pass
@property
def vars_shape_dtype(self):
shape_dtypes = {}
for var in self.vars:
dtype = np.dtype(var.dtype)
shape = var.dshape
shape_dtypes[var.name] = (shape, dtype)
return shape_dtypes
class BaseModel(object):
"""
Model for disease prediction.
The model has 4 types of features (predictor variables):
* temporal (functions of time)
* spatial (functions of space, i.e. longitude, latitude)
* county_specific (functions of time and space, i.e. longitude, latitude)
* interaction effects (functions of distance in time and space relative to each datapoint)
"""
def __init__(
self,
trange,
counties,
ia_effect_filenames,
num_ia=16,
model=None,
include_ia=True,
include_demographics=True,
include_temporal=True,
include_periodic=True,
orthogonalize=False):
self.county_info = counties
self.ia_effect_filenames = ia_effect_filenames
self.num_ia = num_ia if include_ia else 0
self.include_ia = include_ia
self.include_demographics = include_demographics
self.include_temporal = include_temporal
self.include_periodic = include_periodic
self.trange = trange
"""Model for Covid-19 daily reports (RKI)
* Trend: (polynomial days/max_days) // degree: 4
* Periodic: (periodic polynomial days/7) // degree: 3
* Interactions: (sampled IA Kernel (from cases and geographical data))
* Spatiotemporal:
* Exposure: ("fixed log population to adjust growth rate slightly")
"""
self.features = {
"temporal_trend": {
"temporal_polynomial_{}".format(i): TemporalPolynomialFeature(
pd.Timestamp('2020-01-28'),
|
pd.Timestamp('2020-03-30')
|
pandas.Timestamp
|
import anndata as ad
from typing import Optional, Literal
import numpy as np
import pandas as pd
class AnnotatedData(ad.AnnData):
"""Creates annotated cellsdata (see anndata library for more information on AnnotatedData) object based around the Ca2+ matrix of the imaging trial."""
def __init__(self, X, obs, var=None, data_label=None, **kwargs):
adata_dict = {'X': X, 'obs': obs, 'var': var}
for key in [*kwargs]:
adata_dict[key] = kwargs[key]
ad.AnnData.__init__(self, **adata_dict)
self.data_label = data_label if data_label else None
print(f"Created AnnData object: \n\t{self.__repr__()}")
def __str__(self):
"extensive information about the AnnotatedData cellsdata structure"
if self.filename:
backed_at = f" backed at {str(self.filename)!r}"
else:
backed_at = ""
descr = f"Annotated Data of n_obs × n_vars = {self.n_obs} × {self.n_vars} {backed_at}"
descr += f"\navailable attributes: "
descr += f"\n\t.X (primary datamatrix) of .data_label: \n\t\t|- {str(self.data_label)}" if self.data_label else f"\n\t.X (primary datamatrix)"
descr += f"\n\t.obs (obs metadata): \n\t\t|- {str(list(self.obs.keys()))[1:-1]}"
descr += f"\n\t.var (vars metadata): \n\t\t|- {str(list(self.var.keys()))[1:-1]}"
for attr in [
".uns",
".obsm",
".varm",
".layers",
".obsp",
".varp",
]:
keys = getattr(self, attr[1:]).keys()
if len(keys) > 0:
descr += f"\n\t{attr}: \n\t\t|- {str(list(keys))[1:-1]}"
return descr
def _gen_repr(self, n_obs, n_vars) -> str: # overriding base method from AnnData
"""overrides the default anndata _gen_repr_() method for imaging cellsdata usage."""
return f"Annotated Data of n_obs (# ROIs) × n_vars (# Frames) = {n_obs} × {n_vars}"
def add_obs(self, obs_name: str, values: list):
"""adds values to the observations of an anndata object, under the key obs_name"""
assert len(values) == self.obs.shape[0], f"# of values to add doesn't match # of observations in anndata array"
self.obs[obs_name] = values
def del_obs(self, obs_name: str): # TODO
"""removes a key from observations from an anndata object, of the key obs_name"""
_ = self.obs.pop(obs_name)
def add_var(self, var_name: str, values: list):
"""adds values to the variables of an anndata object, under the key var_name"""
assert len(values) == self.var.shape[0], f"# of values to add doesn't match # of observations in anndata array"
self.var[var_name] = values
def del_var(self, obs_name: str): # TODO
"""removes a key from variables from an anndata object, of the key var_name"""
_ = self.var.pop(obs_name)
def extend_anndata(self, additional_adata: ad.AnnData, axis: Literal[0,1] = 0):
"""
:param additional_adata: an anndata object of dimensions n obs x # var or, # obs x m var (depending on which axis to extend)
:param axis:
"""
adata = ad.concat([self, additional_adata], axis=axis)
return adata
def convert_to_df(self) -> pd.DataFrame:
"""
Convert anndata object into a long-form pandas dataframe. primary purpose is to allow access to pandas and seaborn functionality more directly.
- overall seems to be working well. just need to test with a dataset with >1 obs and var keys(), and to test with the whole larger dataset.
:return: long-form pandas dataframe
"""
print(f"\n\- converting anndata cellsdata matrix to long-form pandas dataframe ... [in progress]")
cols = [self.obs_keys()[0], self.var_keys()[0]]
cols.extend(self.obs_keys()[1:])
cols.extend(self.var_keys()[1:])
cols.extend([self.data_label]) if self.data_label is not None or not '' else cols.extend('data_values')
df =
|
pd.DataFrame(columns=cols)
|
pandas.DataFrame
|
#!/usr/bin/env python
from pangeamt_toolkit import Pangeanmt, Pipeline
import os
import json
import shutil
import argparse
import subprocess
import pandas as pd
class Engine:
def __init__(self):
with open("config.json", "r") as file:
self._config = json.loads(file.read())
self._src_pipeline = Pipeline(
self._config["pipeline_config"],
self._config["src_lang"],
self._config["tgt_lang"],
)
self._tgt_pipeline = Pipeline(
self._config["pipeline_config_tgt"], self._config["tgt_lang"]
)
# Returns a trained model
def train_from_table(self, p, table):
res = {"src_prep": [], "tgt_prep": []}
# Shape returns the dimensions of the DataFrame, so shape[0] is the
# number of rows.
for i in range(table.shape[0]):
if (i + 1) % 10 == 0:
print(f"Trained with {i+1} segments.")
# The attribute iat gets the value at [x, y]
src = table.iat[i, 0]
tgt = table.iat[i, 1]
src_prep = self._src_pipeline.preprocess_str(src)
tgt_prep = self._tgt_pipeline.preprocess_str(tgt)
res["src_prep"].append(src_prep)
res["tgt_prep"].append(tgt_prep)
p.train(src_prep, tgt_prep)
return p, res
def no_train_translate_from_table(self, p, table):
res = {"original": []}
# Shape returns the dimensions of the DataFrame, so shape[0] is the
# number of rows.
for i in range(table.shape[0]):
if (i + 1) % 10 == 0:
print(f"Translated {i+1} segments.")
# The attribute iat gets the value at [x, y]
seg = table.iat[i, 0]
seg_prep = self._src_pipeline.preprocess_str(seg)
translation = p.translate([seg_prep])
tgt = (" ").join(translation[0].tgt)
tgt = self._src_pipeline.postprocess_str(tgt)
res["original"].append(tgt)
return res
def translate_from_table(self, p, table, j):
res = {f"tgts_{j}": []}
# Shape returns the dimensions of the DataFrame, so shape[0] is the
# number of rows.
for i in range(table.shape[0]):
if (i + 1) % 10 == 0:
print(f"Translated {i+1} segments.")
# The attribute iat gets the value at [x, y]
seg = table.iat[i, 0]
seg_prep = self._src_pipeline.preprocess_str(seg)
translation = p.translate([seg_prep])
tgt = (" ").join(translation[0].tgt)
tgt = self._src_pipeline.postprocess_str(tgt)
res[f"tgts_{j}"].append(tgt)
return res
def _get_parser():
parser = argparse.ArgumentParser(description="Vocab learning")
parser.add_argument("dir", help="Path to extended model.")
parser.add_argument("xlsx_file", help="Xlsx file to work with.")
parser.add_argument("i", help="Number of times the model learns.")
return parser
def main(args):
# Loads the file
xl_file = pd.ExcelFile(args.xlsx_file)
for sheet in xl_file.sheet_names:
# Parses the content of the sheet to a pandas DataFrame.
table = xl_file.parse(sheet)
xl_writer =
|
pd.ExcelWriter(args.xlsx_file, engine="xlsxwriter")
|
pandas.ExcelWriter
|
import argparse
import os
from dataclasses import dataclass
from functools import lru_cache
import socket
from urllib.parse import parse_qsl, urlencode, urlparse
import flask
from cached_property import cached_property
from pathlib import Path
from typing import Dict, List, Optional, Union
import cv2
import pandas as pd
import numpy as np
import dash
from dash import Dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
from dash.exceptions import PreventUpdate
from flask import make_response
parser = argparse.ArgumentParser(
description="Run web-based ESV visualisation tool",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("esvs_pkl", type=Path, help="Path to extracted ESVs")
parser.add_argument("dataset_root", type=Path, help="Path dataset folder of videos")
parser.add_argument(
"classes_csv", type=Path, help="Path to CSV containing name,id entries"
)
parser.add_argument(
"--debug", action="store_true", help="Enable Dash debug capabilities"
)
parser.add_argument(
"--port", default=8080, type=int, help="Port for webserver to listen on"
)
parser.add_argument("--host", default="localhost", help="Host to bind to")
def load_video(video_path: Union[str, Path]) -> np.ndarray:
capture = cv2.VideoCapture(str(video_path))
frames = []
while capture.isOpened():
success, frame = capture.read()
if success:
frames.append(frame[..., ::-1]) # BGR -> RGB
else:
break
if len(frames) == 0:
raise ValueError(f"Could not load video from {video_path}")
return np.stack(frames)
@dataclass
class Result:
esvs: List[np.ndarray] # [n_frames_idx][frame_idx, class_idx]
scores: np.ndarray # [n_frames_idx, class_idx]
uid: str
label: int
sequence_idxs: List[np.ndarray] # [n_frames_idx][frame_idx]
results_idx: int
@property
def max_n_frames(self):
return max([len(s) for s in self.sequence_idxs])
class ShapleyValueResults:
def __init__(self, results):
self._results = results
@property
def uids(self) -> List[str]:
return list(self._results["uids"])
@property
def shapley_values(self) -> List[np.ndarray]:
# shapley_values[n_frames_idx][example_idx, frame_idx, class_idx]
return self._results["shapley_values"]
@property
def sequence_idxs(self) -> np.ndarray:
# sequence_idxs[n_frames_idx][example_idx]
return self._results["sequence_idxs"]
@property
def labels(self) -> np.ndarray:
return self._results["labels"]
@property
def scores(self) -> np.ndarray:
# sequence_idxs[n_frames_idx, example_idx, class_idx]
return self._results["scores"]
@property
def max_n_frames(self) -> int:
return len(self._results["scores"])
@cached_property
def available_classes(self) -> List[int]:
return sorted(np.unique(self.labels))
@cached_property
def class_counts(self) -> Dict[int, int]:
return pd.Series(self.labels).value_counts().to_dict()
@cached_property
def class_example_idxs_lookup(self) -> Dict[int, np.ndarray]:
return {
cls: np.nonzero(self.labels == cls)[0] for cls in self.available_classes
}
def __getitem__(self, idx: Union[int, str]):
if isinstance(idx, (int, np.int32, np.int64)):
example_idx = idx
elif isinstance(idx, str):
example_idx = self.uids.index(idx)
else:
raise ValueError(f"Cannot handle idx type: {idx.__class__.__name__}")
return Result(
esvs=[esvs[example_idx] for esvs in self.shapley_values],
scores=self.scores[:, example_idx],
uid=self.uids[example_idx],
label=self.labels[example_idx],
sequence_idxs=[
sequence_idxs[example_idx] for sequence_idxs in self.sequence_idxs
],
results_idx=example_idx,
)
def get_triggered_props():
ctx = dash.callback_context
return {trigger["prop_id"] for trigger in ctx.triggered}
class Visualisation:
def __init__(
self,
results: ShapleyValueResults,
class2str: Dict[int, str],
dataset_dir: Path,
title: str = "ESV Dashboard",
):
self.results = results
self.class2str = class2str
self.str2class = {v: k for k, v in class2str.items()}
self.dataset_dir = dataset_dir
self.title = title
def decode_other_classes(classes_str):
return list(map(int, classes_str.split(":")))
self.default_state = {
"n-frames": self.results.max_n_frames,
"uid": self.results.uids[0],
"selected-classes": [],
}
self.state_types = {
"uid": str,
"n-frames": int,
"selected-classes": decode_other_classes,
}
def extract_state_from_url(self, url):
components = urlparse(url)
query_string = parse_qsl(components.query)
state = self.default_state.copy()
for k, v in query_string:
state[k] = self.state_types[k](v)
return state
def load_result(self, cls, example_idx):
return self.results[self.results.class_example_idxs_lookup[cls][example_idx]]
def attach_to_app(self, app: Dash):
def app_layout():
return html.Div(
[dcc.Location(id="url", refresh=False), self.render_layout()]
)
app.layout = app_layout
self.attach_callbacks(app)
self.attach_routes(app)
def attach_routes(self, app: Dash):
@app.server.route("/videos/<uid>")
def load_video(uid: str):
path = self.dataset_dir / f"{uid}.webm"
return flask.send_from_directory(self.dataset_dir.absolute(), f"{uid}.webm")
@app.server.route("/frames/<uid>/<int:frame_idx>")
def load_frame(uid: str, frame_idx: int):
vid = self.load_video(uid)
frame = vid[frame_idx]
success, frame_jpeg = cv2.imencode(".jpg", frame[..., ::-1])
response = make_response(frame_jpeg.tobytes())
response.headers.set("Content-Type", "image/jpeg")
response.headers.set(
"Content-Disposition", "attachment", filename=f"{uid}-{frame_idx}.jpg"
)
return response
def get_cls_and_example_idx_for_uid(self, uid):
cls = self.results.labels[self.results.uids.index(uid)]
uids = np.array(self.results.uids)
class_uids = self.results.class_example_idxs_lookup[cls]
example_idx = list(uids[class_uids]).index(uid)
return cls, example_idx
def get_uid_from_cls_and_example_idx(self, cls, example_idx):
return np.array(self.results.uids)[self.results.class_example_idxs_lookup[cls]][
example_idx
]
def get_preds_df(self, result: Result, n_frames: int):
scores = result.scores[n_frames - 1]
classes = list(scores.argsort()[::-1][:10])
if result.label not in classes:
classes = classes[:-1] + [result.label]
entries = []
for i, cls in enumerate(classes):
class_name = (
self.class2str[cls]
.replace("something", "[...]")
.replace("Something", "[...]")
)
# We have to truncate labels on the x-axis so that they fit without all
# getting horribly cut off
max_len = 33
truncated_class_name = class_name
if len(class_name) >= max_len:
truncated_class_name = class_name[: max_len - len("...")] + "..."
entries.append(
{
"Idx": i,
"Class": class_name,
"TruncatedClass": truncated_class_name,
"ClassId": cls,
"Score": scores[cls],
}
)
return pd.DataFrame(entries)
def attach_callbacks(self, app: Dash):
@app.callback(
Output("class-dropdown", "value"),
Input("url", "href"),
)
def update_class_dropdown_value(href):
state = self.parse_state_from_url(href)
if "uid" not in state:
raise PreventUpdate
cls, _ = self.get_cls_and_example_idx_for_uid(state["uid"])
return cls
@app.callback(
Output("n-frames-slider", "value"),
Input("url", "href"),
)
def update_n_frames(href):
state = self.parse_state_from_url(href)
if "n-frames" not in state:
raise PreventUpdate
return state["n-frames"]
@app.callback(
Output("example-idx-slider", "value"),
Input("class-dropdown", "value"),
Input("url", "href"),
)
def update_example_slider_value(cls, href):
ctx = dash.callback_context
url_trigger = "url.href" in get_triggered_props()
state = self.parse_state_from_url(href)
if url_trigger and "uid" in state:
_, example_idx = self.get_cls_and_example_idx_for_uid(state["uid"])
return example_idx
return 0
@app.callback(
Output("example-idx-slider", "max"),
Output("example-idx-slider", "disabled"),
Output("example-idx-slider", "marks"),
Input("class-dropdown", "value"),
)
def update_example_slider(cls):
max_index = self.results.class_counts[cls] - 1
marks = {i: str(i) for i in range(max_index + 1)}
return max_index, max_index == 0, marks
@app.callback(
Output("model-preds-bar", "clickData"),
Output("model-preds-bar", "figure"),
Input("class-dropdown", "value"),
Input("example-idx-slider", "value"),
Input("n-frames-slider", "value"),
)
def update_scores(cls, example_idx, n_frames):
result = self.get_result(cls, example_idx)
return None, self.plot_preds(self.get_preds_df(result, n_frames))
@app.callback(
Output("state-uid", "children"),
Input("class-dropdown", "value"),
Input("example-idx-slider", "value"),
)
def update_uid(cls, example_idx):
idx = self.results.class_example_idxs_lookup[cls][example_idx]
return self.results.uids[idx]
@app.callback(
Output("esv-scatter", "figure"),
Input("state-uid", "children"),
Input("n-frames-slider", "value"),
Input("state-alt-class", "children"),
)
def update_esvs(uid, n_frames, alt_class_str):
try:
alt_class = int(alt_class_str)
except ValueError:
alt_class = None
result = self.results[uid]
return self.plot_esvs(result, n_frames, alt_class=alt_class)
@app.callback(
Output("esv-scatter", "hoverData"), Input("n-frames-slider", "value")
)
def update_esv_scatter_hover_data(_):
return None
@app.callback(
Output("state-alt-class", "children"),
Input("model-preds-bar", "clickData"),
Input("state-uid", "children"),
)
def update_selected_classes(clickData, uid):
if "state-uid" in get_triggered_props():
return ""
if clickData is not None:
cls = clickData["points"][0]["customdata"][0]
return str(cls)
return dash.no_update
@app.callback(
Output("current-frame-container", "children"),
Input("state-uid", "children"),
Input("esv-scatter", "hoverData"),
)
def update_selected_frame(uid, hoverData):
result = self.results[uid]
if hoverData is None or "state-uid.children" in get_triggered_props():
frame_index = 0
else:
frame_index = hoverData["points"][0]["x"]
return html.Img(src=f"/frames/{result.uid}/{frame_index}")
@app.callback(
Output("video-container", "children"),
Input("state-uid", "children"),
)
def update_video(uid):
return html.Video(src=f"/videos/{uid}", loop=True, autoPlay=True)
@app.callback(
Output("url", "search"),
[
Input("example-idx-slider", "value"),
Input("class-dropdown", "value"),
Input("n-frames-slider", "value"),
],
)
def update_url_params(example_idx, cls, n_frames):
state = {
"uid": self.get_uid_from_cls_and_example_idx(cls, example_idx),
"n-frames": n_frames,
}
params = urlencode(state)
return f"?{params}"
def render_layout(self):
idx = self.results.uids.index(self.default_state["uid"])
cls = self.results.labels[idx]
available_example_idxs = list(self.results.class_example_idxs_lookup[cls])
example_idx = available_example_idxs.index(idx)
return html.Div(
[
html.Div(html.H1(self.title)),
html.Div(
[
html.Div(
[
html.Label("Class: "),
dcc.Dropdown(
id="class-dropdown",
options=[
{
"label": self.class2str[cls],
"value": cls,
}
for cls in self.results.available_classes
],
value=cls,
),
],
className="control-element",
),
html.Div(
[
html.Label("Example: "),
dcc.Slider(
id="example-idx-slider",
min=0,
max=len(available_example_idxs),
disabled=False,
value=example_idx,
),
],
className="control-element",
),
html.Div(
[
html.Label("Frames fed to model: "),
dcc.Slider(
id="n-frames-slider",
min=1,
max=self.results.max_n_frames,
marks={
i: str(i)
for i in range(1, self.results.max_n_frames + 1)
},
value=self.results.max_n_frames,
),
],
className="control-element",
),
],
className="controls",
),
html.Hr(),
html.Div(
[
html.Div(
[
html.H2("Model Predictions"),
dcc.Graph(
id="model-preds-bar",
config={"displayModeBar": False},
responsive=True,
),
],
id="model-preds-bar-container",
),
html.Div(
[
html.H2("ESV Values"),
dcc.Graph(
id="esv-scatter",
config={"displayModeBar": False},
responsive=True,
# if we don't set the initial height of the graph it
# gets a height of 0 before it is updated when
# the user clicks on an alternate class which
# refreshes the height attribute of the Graph div.
style={"height": "450px"},
),
],
id="esv-scatter-container",
),
],
id="graph-pane",
),
html.Div(
[
html.Div(
[
html.Span("Hovered Frame:"),
html.Div(
id="current-frame-container",
),
]
),
html.Div(
[
html.Span("Orignal Video:"),
html.Div(
id="video-container",
),
]
),
],
id="video-pane",
),
html.A(
target="_blank",
href="https://www.youtube.com/watch?v=zoUJi6L6z0M&feature=youtu.be",
children=html.Div(id="help-btn", children=html.Div("?")),
),
html.Div(
id="state-uid",
children=self.default_state["uid"],
style={"display": "none"},
),
html.Div(id="state-alt-class", children="", style={"display": "none"}),
],
id="visualisation",
)
def plot_esvs(self, result: Result, n_frames: int, alt_class: Optional[int] = None):
classes = [result.label]
if alt_class is not None and alt_class != result.label:
classes.append(alt_class)
entries = []
for cls in classes:
for i in range(n_frames):
entries.append(
{
"Segment": i + 1,
"Frame": result.sequence_idxs[n_frames - 1][i],
"ESV": result.esvs[n_frames - 1][i, cls],
"Class": self.class2str[cls]
+ ("" if cls != result.label else " (GT)"),
}
)
df = pd.DataFrame(entries)
figure = px.line(
df,
x="Frame",
y="ESV",
color="Class",
line_shape="spline",
)
figure.update_traces(mode="markers+lines")
figure.update_layout(
margin_r=0,
margin_b=20,
hovermode="x unified",
legend={"yanchor": "bottom", "y": 1.02, "xanchor": "left", "x": 0},
transition={"duration": 400, "easing": "cubic-in-out"},
)
figure.add_hline(y=0)
return figure
def plot_preds(self, df):
figure = px.bar(
df,
x="Idx",
y="Score",
hover_data={"Idx": False, "Class": True, "Score": True},
custom_data=["ClassId"],
labels={"Idx": ""},
)
figure.update_traces(marker_color="red")
figure.update_layout(margin_b=200, margin_r=0)
figure.update_xaxes(
tickmode="array",
tickvals=df.Idx,
ticktext=df.TruncatedClass,
tickangle=90,
automargin=True,
)
return figure
@lru_cache(maxsize=10)
def load_video(self, uid: str) -> np.ndarray:
return load_video(self.dataset_dir / f"{uid}.webm")
def get_result(self, cls: int, example_idx: int) -> Result:
idx = self.results.class_example_idxs_lookup[cls][example_idx]
return self.results[idx]
def parse_state_from_url(self, url):
components = urlparse(url)
query_string = parse_qsl(components.query)
state = dict()
for k, v in query_string:
state[k] = self.state_types[k](v)
return state
args = parser.parse_args()
dataset_dir: Path = args.dataset_root
classes = pd.read_csv(args.classes_csv, index_col="name")["id"]
class2str = {class_id: name for name, class_id in classes.items()}
results_dict =
|
pd.read_pickle(args.esvs_pkl)
|
pandas.read_pickle
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Simulate elections.
Elements of an election
1. Create voter preferences
- Create voter preference distributions
- Create voter preference tolerance distribution
2. Create candidate preferences
3. Simulate voter behavior, strategy
4. Transform voter preferences into candidate scores or rankings
5. Input scores/ranks into election system.
6. Run the election.
7. Measure the results.
Object Data Transfer Model
--------------------------
Voters --> VoterGroup
Voters/VoterGroup --> Candidates
Voters, VoterGroup, Candidates --> Election
To construct models or benchmarks, start by creating object `Voters`.
`Voters` may have various properties such as preference,
voter strategy parameters, tolerance circles, etc. Define these
properties in Voters. Voters can be segregated by groups,
and each group may have different properties. `VoterGroup` is used to
define groups of several `Voters`.
After defining voters, candidates may be defined using class
`Candidate`. `Candidate` definition may be dependent on the voters population,
therefore `Candidate` accepts voters as an argument.
With the voters and candidates define, an election can be generated with
`Election`. `Election` has many subclasses which run the election.
- `BallotGenerator` takes voter and candidate information to generate honest
and tactical ballots.
- `eRunner` handles the running of specific types of elections.
- `ElectionResult` handles the storage of output data.
"""
import collections
import pickle
import copy
from typing import List, NamedTuple, Tuple, Dict
import numpy as np
import pandas as pd
import scipy
from scipy.stats import truncnorm
from votesim import metrics
from votesim import ballot
from votesim import votemethods
from votesim import utilities
from votesim.models import vcalcs
from votesim.models.dataclasses import (VoterData,
VoterGroupData,
CandidateData,
ElectionData,
ElectionResult,
strategy_data,
StrategyData,
)
from votesim.strategy import TacticalBallots
# from votesim.strategy import TacticalBallots, FrontRunners
__all__ = [
'Voters',
'VoterGroup',
'Candidates',
'Election'
]
# Base random seeds
VOTERS_BASE_SEED = 2
CLIMIT_BASE_SEED = 3
CANDIDATES_BASE_SEED = 4
ELECTION_BASE_SEED = 5
#import seaborn as sns
import logging
logger = logging.getLogger(__name__)
def ltruncnorm(loc, scale, size, random_state=None):
"""
Truncated normal random numbers, cut off at locations less than 0.
Parameters
-----------
loc : float
Center coordinate of gaussian distribution
scale : float
Std deviation scale
size : int
Number of random numbers to generate
random_state : None or numpy.random.RandomState
Random number seeding object, or None.
Returns
---------
out : array shaped (size)
Output samples
"""
if scale == 0:
return np.ones(size) * loc
xmin = -loc / scale
t = truncnorm(xmin, 1e6)
s = t.rvs(size=size, random_state=random_state)
s = s * scale + loc
return s
def _RandomState(seed, level=1):
"""
Create random state.
Generate multiple random statse from a single seed, by specifying
different levels for different parts of Election.
Parameters
----------
seed : int
Integer seed
level : int
Anoter integer seed.
"""
if seed is None:
return np.random.RandomState()
else:
return np.random.RandomState((seed, level))
class Voters(object):
"""Create simple normal distribution of voters.
Parameters
----------
seed : int or None
Integer seed for pseudo-random generation. None for random numbers.
tol : float or None
Voter preference max tolerance.
base : str
Voter rating mapping to distance, either:
- 'linear' - Linear mapping of distance to rating
- 'quadratic' - Quadratic mapping of distance to rating
- 'sqrt' - Square root mappiong of distance to rating
order : int
Order or norm calculation for voter-candidate regret distance.
Attributes
----------
data : `votesim.models.dataclasses.VoterData`
Voter data
"""
data: VoterData
def __init__(self, seed: int=None, tol: float=None,
base: str='linear', order: int=1):
self.init(seed, order=order)
self.set_behavior(tol=tol, base=base)
return
@utilities.recorder.record_actions(replace=True)
def init(self, seed: int, order: int):
"""Set pseudorandom seed & distance calculation order."""
self.seed = seed
self._randomstate = _RandomState(seed, VOTERS_BASE_SEED)
self._order = order
self._weights = None
return self
@utilities.recorder.record_actions(replace=True)
def set_behavior(self, tol: float=None, base: str='linear',):
"""Set voter strategy type."""
self._tol = tol
self._base = base
return self
@utilities.recorder.record_actions()
def add_random(self, numvoters, ndim=1, loc=None):
"""Add random normal distribution of voters.
Parameters
----------
numvoters : int
Number of voters to generate
ndim : int
Number of preference dimensions of population
loc : array shaped (ndim,)
Coordinate of voter centroid
"""
rs = self._randomstate
center = np.zeros(ndim)
voters = rs.normal(center, size=(numvoters, ndim))
if loc is not None:
voters = voters + loc
return self._add_voters(voters)
@utilities.recorder.record_actions()
def add_points(self, avgnum, pnum, ndim=1):
"""Add a random point with several clone voters at that point.
Parameters
----------
avgnum : int
Avg. Number of voters per unique point
pnum : int
Number of unique points
ndim : int
Number of dimensions
"""
rs = self._randomstate
center = np.zeros(ndim)
for i in range(pnum):
# coordinate of point
point = rs.normal(center, size=(1, ndim))
# number of voters at the point
voternum = ltruncnorm(1, 1, 1) * avgnum
voternum = int(voternum)
voters = np.ones((voternum, ndim)) * point
self._add_voters(voters)
return self
@utilities.recorder.record_actions()
def add(self, pref):
"""Add arbitrary voters.
Parameters
----------
pref : array shape (a, b)
Voter preferences, `a` is number of voters, `b` pref. dimensions.
"""
return self._add_voters(pref)
def _add_voters(self, pref):
"""Base function for adding 2d array of candidates to election."""
try:
pref = np.row_stack((self._pref, pref))
except (AttributeError, ValueError):
pref = np.atleast_2d(pref)
self._pref = pref
return self
def build(self):
"""Finalize Voter, construct immutable VoterData."""
self.data = VoterData(pref=self._pref,
weights=self._weights,
order=self._order,
stats=None,
tol=self._tol,
base=self._base,
)
return self
def calculate_distances(self, candidates: CandidateData):
"""Preference distances of candidates from voters for building ballots.
Parameters
----------
candidates : votesim.models.dataclasses.CandidateData
Candidate preference data
"""
pref = self.data.pref
order = self.data.order
weights = self.data.weights
distances = vcalcs.voter_distances(voters=pref,
candidates=candidates.pref,
weights=weights,
order=order)
return distances
def honest_ballots(self, candidates: CandidateData):
"""Honest ballots calculated from Candidates."""
distances = self.calculate_distances(candidates)
b = ballot.gen_honest_ballots(distances=distances,
tol=self.data.strategy['tol'],
base=self.data.strategy['base'])
return b
class VoterGroup(object):
"""Group together multiple voter objects & interact with candidates.
Parameters
----------
voters_list : list[Voters]
List of Voters
Attributes
----------
group : list[Voters]
Same as voters_list
"""
def __init__(self, voters_list: List[Voters]):
try:
iter(voters_list)
except Exception:
voters_list = [voters_list]
self.group = voters_list
self._build()
return
def _build(self):
"""Finalize VoterGroup, build immutable VoterGroupData."""
group_datas = tuple(v.build() for v in self.group)
orders = np.array([v.data.order for v in self.group])
if len(orders) > 0:
order = orders[0]
if not np.all(orders == orders[0]):
raise ValueError('Order of voters in group must all be same.')
else:
order = None
# data = self.group[0]
# data = data.replace(pref=self._get_pref())
# self.data = data
pref = self._get_pref()
stats = metrics.VoterStats(pref=pref,
weights=None,
order=order)
group_index = dict(enumerate(self.group_indices))
data = VoterGroupData(groups=group_datas,
pref=pref,
weights=None,
order=order,
stats=stats,
group_index=group_index,
)
self.data = data
return self
def build(self):
"""This is a dummy build and does nothing. VoterGroup is auto-built."""
return self
def _get_pref(self):
vlist = [v.data.pref for v in self.group]
return np.vstack(vlist)
def __getitem__(self, key):
return self.group[key]
@utilities.lazy_property
def group_indices(self):
"""Row indices to obtain child's voters for all children in the voter
preference and ballot arrays.
Returns
-------
slices : list of slice
Slice which returns the Voter group, indexed by group number.
"""
groups = self.group
lengths = [len(v.data.pref) for v in groups]
iarr = np.cumsum(lengths)
iarr = np.append(0, iarr)
slices = [slice(iarr[i], iarr[i+1]) for i in iarr[:-1]]
return slices
def voter_group(vlist) -> VoterGroup:
"""Group together multiple Voters."""
if hasattr(vlist, 'group'):
return vlist
else:
return VoterGroup(vlist)
class Candidates(object):
"""Create candidates for spatial model.
Parameters
-----------
voters : `Voters` or `VoterGroup`
Voters to draw population data.
seed : int or None
Seed for random number generation.
Attributes
----------
pref : array shape (a, b)
Voter preferences, `a` number of candidates,
`b` number of preference dimensions
"""
data: CandidateData
def __init__(self, voters: Voters, seed: int=None):
self._method_records = utilities.recorder.RecordActionCache()
if not hasattr(voters, '__len__'):
voters = [voters]
self.voters = voter_group(voters)
self.set_seed(seed)
return
@utilities.recorder.record_actions()
def set_seed(self, seed: int):
""" Set pseudorandom seed """
self._seed = (seed, CANDIDATES_BASE_SEED)
self._randomstate = _RandomState(*self._seed)
return self
def _add_candidates(self, candidates: np.ndarray):
"""Base function for adding 2d array of candidates to election"""
candidates = np.array(candidates)
assert candidates.ndim == 2, 'candidates array must have ndim=2'
vdata = self.voters.data
try:
candidates = np.row_stack((self._pref, candidates))
except (AttributeError, ValueError):
candidates = np.atleast_2d(candidates)
cdim = candidates.shape[1]
vdim = vdata.pref.shape[1]
condition = cdim == vdim
s = ('dim[1] of candidates (%s) '
'must be same as dim[1] (%s) of self.voters' % (cdim, vdim))
assert condition, s
self._pref = candidates
return self
def reset(self):
"""Reset candidates for a given Voters.
Delete candidate preferences and records"""
try:
self._method_records.reset()
except AttributeError:
pass
try:
del self.data
except AttributeError:
pass
return
@utilities.recorder.record_actions()
def add_random(self, cnum: int, sdev=2):
"""
Add random candidates, uniformly distributed.
Parameters
----------
cnum : int
Number of candidates for election
sdev : float
+- Width of standard deviations to set uniform candidate
generation across voter population
"""
rs = self._randomstate
std = self.voters.data.stats.pref_std
mean = self.voters.data.stats.pref_mean
ndim = std.shape[0]
candidates = rs.uniform(low = -sdev*std,
high = sdev*std,
size = (cnum, ndim)) + mean
return self._add_candidates(candidates)
@utilities.recorder.record_actions()
def add(self, candidates: np.ndarray):
"""Add 2d array of candidates to election, record actions
Parameters
----------
candidates : array shape (a, n)
Candidate preference coordinates.
- a = number of candidates
- n = number of preference dimensions
Returns
-------
out: Candidates
`self`
"""
self._add_candidates(candidates)
return self
def build(self):
"""Construct immutable CandidateData needed for simulation."""
voters = self.voters
pref = self._pref
distances = vcalcs.voter_distances(voters=voters.data.pref,
candidates=pref,
weights=voters.data.weights,
order=voters.data.order,
)
stats = metrics.CandidateStats(pref=pref,
distances=distances)
self.data = CandidateData(pref=self._pref,
distances=distances,
stats=stats)
return self
class StrategiesEmpty(object):
"""Create empty strategy; honest election."""
_strategies = []
data = ()
def __init__(self):
return
def build(self):
return self
def __len__(self):
return 0
class Strategies(object):
"""Strategy constructor for `VoterGroup`."""
def __init__(self, vgroup: VoterGroup):
self._method_records = utilities.recorder.RecordActionCache()
self.voters = voter_group(vgroup)
self.vlen = len(self.voters.group)
self._strategies = []
return
@utilities.recorder.record_actions()
def add(self,
tactics: tuple,
subset: str,
ratio: float,
underdog: int,
groupnum: int=0,
frontrunnertype: str='eliminate',):
"""Set a strategy for a specified voter group.
Parameters
----------
tactics : tuple[str]
Tuple of tactic names.
subset : str
- '' -- No subset
- 'topdog' -- Topdog voter coalition
- 'underdog' -- Underdog voter coalition
ratio : float
Ratio of strategic voters in the group from [0 to 1].
underdog : int or None
Specify the underdog candidate. Set to None to estimate best underdog.
groupnum : int, optional
Voter group number to set strategy. The default is 0.
frontrunnertype : str, optional
Strategy used to determine underdog frontrunner.
The default is 'eliminate'.
Returns
-------
Strategies
Returns `self`.
"""
return self._set(
tactics=tactics,
subset=subset,
ratio=ratio,
underdog=underdog,
groupnum=groupnum,
frontrunnertype=frontrunnertype,
)
@utilities.recorder.record_actions()
def fill(self,
tactics: tuple,
subset: str,
ratio: float,
underdog: int,
groupnum: int,
frontrunnertype='eliminate'):
"""Set strategy for unset groups."""
locations = self.get_no_strategy
for ii in locations:
self._set(
tactics=tactics,
subset=subset,
ratio=ratio,
underdog=underdog,
groupnum=ii,
frontrunnertype=frontrunnertype,
)
return self
def _set(self,
tactics: tuple,
subset: str,
ratio: float,
underdog: int,
groupnum: int,
frontrunnertype='eliminate',
):
group_index = self.voters.group_indices[groupnum]
strat_data = StrategyData(tactics=tactics,
subset=subset,
ratio=ratio,
underdog=underdog,
groupnum=groupnum,
index=group_index,
frontrunnertype=frontrunnertype)
self._strategies.append(strat_data)
return self
def build(self):
"""Construct static data needed to run simulation"""
if len(self.get_no_strategy()) > 0:
raise ValueError('Insufficient strategies have been defined!')
if self.has_duplicates():
raise ValueError('Duplicate strategy entries found.')
self.data = tuple(self._strategies)
return self
def get_no_strategy(self):
"""ndarray : Groups' index locations that have no strategies set."""
no_strat_locs = []
for ii, index in enumerate(self.voters.group_indices):
found = False
for strategy in self._strategies:
if np.all(index == strategy.index):
found = True
if not found:
no_strat_locs.append(ii)
return np.array(no_strat_locs)
def has_duplicates(self):
"""Make sure no duplicate group index + subset locations have been defined.
Return True if duplicates found. False otherwise."""
data = []
for strategy in self._strategies:
index = strategy.index
subset = strategy.subset
data.append((repr(index), subset))
count = collections.Counter(data)
count_values = list(count.values())
iimax = np.argmax(count_values)
if count_values[iimax] > 1:
logger.warn('Duplicate strategy found at strategy #%s', iimax)
return True
return False
def __len__(self):
return len(self._strategies)
class BallotGenerator(object):
"""
Generate ballots from voter and candidate data.
Parameters
----------
voters_list : list of Voter or VoterGroup
Voters of election
candidates : Candidates
Candidates of election
scoremax : int
Maximum score for scored ballots.
"""
tacticalballots : TacticalBallots
honest_ballot_dict : dict
def __init__(self,
voters_list: VoterGroup,
candidates: Candidates,
scoremax: int):
self.candidates = candidates
self.votergroup = voter_group(voters_list)
self.scoremax = scoremax
self._init_honest_builder()
return
def _init_honest_builder(self):
"""Honest ballot constructor for ratings, ranks, scores, and votes."""
cdata = self.candidates.data
blist = []
for voter in self.votergroup.group:
distances = voter.calculate_distances(cdata)
b = ballot.gen_honest_ballots(distances=distances,
tol=voter.data.tol,
base=voter.data.base,
maxscore=self.scoremax,)
blist.append(b)
self.honest_ballot_gen = ballot.CombineBallots(blist)
bdict = {}
bdict['rank'] = self.honest_ballot_gen.ranks
bdict['score'] = self.honest_ballot_gen.scores
bdict['rate'] = self.honest_ballot_gen.ratings
bdict['vote'] = self.honest_ballot_gen.votes
self.honest_ballot_dict = bdict
return
def get_honest_ballots(self, etype: str) -> np.ndarray:
"""Get honest ballot data.
Parameters
----------
etype : str
Election method name.
Returns
-------
out : np.ndarray
Output ballot data array
"""
btype = votemethods.get_ballot_type(etype)
return self.honest_ballot_dict[btype]
def get_ballots(self, etype, strategies=(), result=None, ballots=None):
"""Retrieve tactical ballots.
Parameters
----------
etype : str
Election method
strategies : list of `StrategyData`
Voter strategies to apply onto ballots
result : `ElectionResult`
Previous results which can be used to calculate front runner.
Returns
-------
ballots : ndarray (v, c)
New ballots
group_index : dict
Index locations of voter groups.
"""
if len(strategies) == 0:
ballots = self.get_honest_ballots(etype)
group_index = self.votergroup.data.group_index
else:
if ballots is None:
ballots = self.get_honest_ballots(etype)
if result is None:
raise ValueError('A previous honest result must be provided for tactical ballots.')
tballot_gen = TacticalBallots(etype=etype,
strategies=strategies,
result=result,
ballots=ballots)
ballots = tballot_gen.ballots
group_index = tballot_gen.group_index
# Just save this thing might be useful for debugging.
self.tacticalballots = tballot_gen
return ballots, group_index
class Election(object):
"""
Run an Election with Voters and Candidates
Parameters
------------
voters : None, Voters, VoterGroup, or list of Voters
Voters object specifying the voter preferences and behavior.
candidate : None or Candidates
Candidates object specifying candidate preferences
seed : None
THIS PARAMETER IS NOT REALLY USED FOR NOW. IGNORE!
numwinners : int >= 1
Number of winners for the election
scoremax : int
Maximum score for ballot generation
name : str
Name of election model, used to identify different benchmark models.
save_args : bool (default True)
- If True, save all parameters input into method calls. These
parameters can be used to regenerate specific elections.
- If False, only save parameters input into `self.user_data`.
Attributes
----------
result : ElectionResult
Results storage for Election.
ballotgen : BallotGenerator
VoterBallot data
"""
candidates: Candidates
voters: VoterGroup
data: ElectionData
result: ElectionResult
strategies: Strategies
ballotgen : BallotGenerator
def __init__(self,
voters: VoterGroup=None,
candidates: Candidates=None,
strategies: Strategies=None,
seed=0,
numwinners=1,
scoremax=5,
name = '',
save_args=True,
save_records=True):
self._method_records = utilities.recorder.RecordActionCache()
self.voters: VoterGroup = None
self.candidates: Candidates = None
self.ballotgen: BallotGenerator = None
self.strategies: Strategies = StrategiesEmpty()
self.save_args = save_args
self.save_records = save_records
self.init(seed, numwinners, scoremax, name)
self.set_models(voters, candidates, strategies)
self._result_calc = ElectionResultCalc(self)
return
@utilities.recorder.record_actions(replace=True)
def init(self, seed, numwinners, scoremax, name):
"""Initialize some election properties"""
self._set_seed(seed)
self.numwinners = numwinners
self.scoremax = scoremax
self.name = name
return
def set_models(
self,
voters: Voters=None,
candidates: Candidates=None,
strategies: Strategies=None,
):
"""Set new voter or candidate model.
Parameters
----------
voters : Voters or None
New voters object.
If None, use previously inputed Voters.
candidates : Candidates or None
New candidates object.
If None, use previously inputed Candidates.
strategies : `votesim.models.spatial.Strategies`
New strategies object.
If None, use the previously inputed Strategies
"""
if voters is not None:
self.voters = voter_group(voters)
if candidates is not None:
self.candidates = candidates.build()
if self.voters is not None:
self.ballotgen = BallotGenerator(
self.voters,
self.candidates,
scoremax=self.scoremax
)
if strategies is not None:
if len(strategies) > 0:
self.strategies = strategies.build()
else:
self.strategies = strategies
return
def _set_seed(self, seed):
""" Set pseudorandom seed """
if seed is None:
self._seed = None
self._randomstate = _RandomState(None)
else:
self._seed = (seed, ELECTION_BASE_SEED)
self._randomstate = _RandomState(*self._seed)
return
def user_data(self, d=None, **kwargs):
"""Record any additional data the user wishes to record.
Parameters
----------
**d : dict
Write any keys and associated data here
"""
udict = {}
udict.update(kwargs)
if d is not None:
# d is supposed to be a dictionary. Try to update our dict with it
try:
udict.update(d)
# Maybe the user is trying to create a parameter `d`
except TypeError:
udict['d'] = d
self._user_data = udict
return
def reset(self):
"""Delete election data for the current run --
voter preferences, candidate preferences, and ballots,
Clear the kind of data that can be regenerated if desired.
Do not clear statistics.
"""
self.voters.reset()
self.candidates.reset()
def delete(a):
try:
delattr(self, a)
except AttributeError:
pass
delete('winners')
delete('ties')
delete('output')
delete('vballots')
raise NotImplementedError('This function probably doesnt work.')
return
@utilities.recorder.record_actions(
replace=True,
exclude=['ballots', 'result'])
def run(self,
etype=None,
ballots=None,
result=None,
force_honest=False) -> ElectionResult:
"""Run the election using `votemethods.eRunner`.
Parameters
----------
etype : str
Election method. Either `etype` or `method` must be input.
ballots : ndarray
Initial ballots to be used in election.
result : ElectionResult
Election, you can input honest election
using this object to reduce repetitive computation cost.
force_honest : bool
Force run of an honest election without strategy
Returns
-------
out : ElectionResult
"""
return self._run(
etype=etype,
ballots=ballots,
result=result,
force_honest=force_honest
)
def _run(self,
etype=None,
ballots=None,
result=None,
force_honest=False) -> ElectionResult:
logger.debug('Running %s, %s, %s', etype)
strategies = self.strategies.data
if force_honest:
strategies = ()
# Auto run an honest election if result is not available.
elif len(strategies) > 0 and result is None and ballots is None:
result = self._run(
etype=etype,
ballots=None,
result=None,
force_honest=True)
# Retrieve some tactical ballots from honest data.
ballots, group_index = self.ballotgen.get_ballots(
etype=etype,
strategies=strategies,
result=result,
ballots=ballots
)
# Generate a deterministic seed based on candidates and voters
runner = votemethods.eRunner(
etype=etype,
numwinners=self.numwinners,
ballots=ballots,
seed=self._tie_seed(),
# rstate=self._randomstate,
)
self.data = ElectionData(
ballots=runner.ballots,
winners=runner.winners,
ties=runner.ties,
group_index=group_index
)
self.result = self._result_calc.update(
runner=runner,
voters=self.voters.data,
candidates=self.candidates.data,
election=self.data
)
return self.result
def _tie_seed(self):
"""Generate pseudorandom seed for tie breaking."""
v = self.voters.data.pref[0,0] * 1000
c = self.candidates.data.pref[0,0] * 10000
return int(abs(v) + abs(c))
def rerun(self, d):
"""Re-run an election found in dataframe. Find the election
data from the dataframe index.
Parameters
----------
d : dict
Dictionary or Series of election data,
generated from self.dataseries() or self.dataframe().
Returns
-------
out : Election
Newly constructed election object with re-run parameters.
"""
series = d
def filterdict(d, kfilter):
new = {}
num = len(kfilter)
for k, v in d.items():
if k.startswith(kfilter):
newkey = k[num :]
new[newkey] = v
return new
filter_key = 'args.candidate.'
c_dict = filterdict(series, filter_key)
filter_key = 'args.election.'
e_dict = filterdict(series, filter_key)
filter_key = 'args.strategy.'
s_dict = filterdict(series, filter_key)
# Construct voters
vnum = len(self.voters.group)
new_voters = []
for ii in range(vnum):
filter_key = 'args.voter-%s.' % ii
v_dict = filterdict(series, filter_key)
v = type(self.voters.group[ii])()
#v = type(self.voters)()
v._method_records.reset()
v._method_records.run_dict(v_dict, v)
new_voters.append(v)
# Construct candidates
c = type(self.candidates)(voters=new_voters)
c._method_records.reset()
c._method_records.run_dict(c_dict, c)
# Construct strategies
s_dict2 = {}
for k, v in s_dict.items():
try:
if not np.isnan(v):
s_dict2[k] = v
except TypeError:
s_dict2[k] = v
slen = len(s_dict2)
if slen > 0:
s = type(self.strategies)(c.voters)
s._method_records.reset()
s._method_records.run_dict(s_dict2, s)
else:
s = None
enew = Election(voters=c.voters, candidates=c, strategies=s)
enew._method_records.run_dict(e_dict, enew)
return enew
def copy(self) -> 'Election':
"""Copy election."""
return copy.copy(self)
def save(self, name, reset=True):
"""Pickle election data.
Parameters
----------
name : str
Name of new pickle file to dump Election into.
reset : bool
If True (default), delete election data that can be regenerated.
"""
if reset:
self.reset()
with open(name, 'wb') as file1:
pickle.dump(self, file1)
return
def dataseries(self, index=None) -> pd.Series:
"""Retrieve pandas data series of output data."""
return self._result_calc.dataseries(index=index)
def dataframe(self) -> pd.DataFrame:
"""Construct data frame from results history."""
return self._result_calc.dataframe()
def append_stat(self, d: metrics.BaseStats, name='', update_docs=False):
return self._result_calc.append_stat(d=d,
name=name,
update_docs=update_docs)
def calculate_distance(voters: VoterData, candidates: CandidateData):
"""Re-calculate distance as the distance from Election may have error."""
distances = vcalcs.voter_distances(
voters=voters.pref,
candidates=candidates.pref,
weights=voters.weights,
order=voters.order,
)
return distances
class ElectionResultCalc(object):
"""
Store Election result output. Generated as attribute of Election.
This is a sort of messy back-end that does all the calculations. The
result front end is `ElectionResult`.
Parameters
----------
e : Election
Election to extract results from.
Attributes
----------
runner : :class:`~votesim.votemethods.voterunner.eRunner`
Output from election running class for the last run election.
results : dict
Results of last run election key prefixes:
- 'output.*' -- Prefix for election output results
- 'args.etype' -- Election method
- 'args.voter.*' -- Voter input arguments
- 'args.election.*' -- Election input arguments
- 'args.user.*' -- User defined input arguments
Output Specification
--------------------
For each election output keys are generated as dataframes or dataseries.
- Voter parameters are specified as `args.voter-vnum.a.func.argname`
- `vnum` = Voter group number
- `a` = Method call number (a method could be called multiple times.)
- `func` = Name of the called method
- `argname` = Name of the set parameter for the method.
- Candidate parameters are specified as `args.candidate.a.func.arg`
- User parameters are specified as `args.user.name`
- `name` is the user's inputted parameter name
"""
def __init__(self, e: Election):
self.election = e
self.save_args = e.save_args
# Store results as list of dict
self._output_history = []
return
def update(self,
runner: votemethods.eRunner,
voters: VoterData,
candidates: CandidateData,
election: ElectionData) -> ElectionResult:
"""Get election results."""
self.runner = runner
self.winners = runner.winners
self.ties = runner.ties
self.ballots = runner.ballots
self.electionStats = metrics.ElectionStats(voters=voters,
candidates=candidates,
election=election)
### Build dictionary of all arguments and output
output = {}
output.update(self._get_parameters())
output['output'] = self.electionStats.get_dict()
output = utilities.misc.flatten_dict(output, sep='.')
self.output = output
if self.election.save_records:
self._output_history.append(output)
result = ElectionResult(winners=self.winners,
ties=self.ties,
ballots=self.ballots,
runner=self.runner,
output=self.output,
output_docs=self.output_docs,
stats=self.electionStats,
scoremax=self.election.scoremax
)
return result
def _get_parameter_keys(self) -> list:
"""Retrieve election input parameter keys."""
return list(self._get_parameters().keys())
def _get_method_records(self) -> dict:
"""Retrieve records that can be used to regenerate result."""
candidates = self.election.candidates
voters = self.election.voters
strategies = self.election.strategies
election = self.election
# get voter parameters
vrecords = []
for v in voters.group:
vrecords.append(v._method_records.dict)
# get candidate parameters
crecord = candidates._method_records.dict
# get strategy parameters
if hasattr(strategies, '_method_records'):
srecord = strategies._method_records.dict
else:
srecord = {}
# get election parameters
erecord = election._method_records.dict
# Save etype and name in special parameters
params = {}
for key in erecord:
if 'run.etype' in key:
params['args.etype'] = erecord[key]
elif '.init.name' in key:
params['args.name'] = erecord[key]
# Save all method call arguments
if self.save_args:
params['args.candidate'] = crecord
if len(srecord) > 0:
params['args.strategy'] = srecord
for ii, vrecord in enumerate(vrecords):
params['args.voter-%s' % ii] = vrecord
params['args.election'] = erecord
return params
def _get_user_data(self) -> dict:
# Retrieve user data
# Determine if user data exists. If not, save default save_args
try:
userdata = self.election._user_data
if len(userdata) == 0:
userdata = {}
except AttributeError:
userdata = {}
params = {}
# Add user data to params
for key, value in userdata.items():
newkey = 'args.user.' + key
params[newkey] = value
return params
def _get_parameters(self) -> dict:
d1 = self._get_user_data()
d2 = self._get_method_records()
d1.update(d2)
return d1
# def ___get_parameters(self) -> dict:
# """Retrieve election input parameters."""
# params = {}
# candidates = self.election.candidates
# voters = self.election.voters
# election = self.election
# # get candidate parameters
# crecord = candidates._method_records.dict
# # get voter parameters
# vrecords = []
# for v in voters.group:
# vrecords.append(v._method_records.dict)
# # get election parameters
# erecord = election._method_records.dict
# # Retrieve user data
# # Determine if user data exists. If not, save default save_args
# save_args = self.save_args
# try:
# userdata = self.election._user_data
# if len(userdata) == 0:
# save_args = True
# except AttributeError:
# save_args = True
# userdata = {}
# # Add user data to params
# for key, value in userdata.items():
# newkey = 'args.user.' + key
# params[newkey] = value
# # Save etype and name in special parameters
# for key in erecord:
# if 'run.etype' in key:
# params['args.etype'] = erecord[key]
# elif '.init.name' in key:
# params['args.name'] = erecord[key]
# # Save all method call arguments
# if self.save_args or save_args:
# params['args.candidate'] = crecord
# for ii, vrecord in enumerate(vrecords):
# params['args.voter-%s' % ii] = vrecord
# params['args.election'] = erecord
# params = utilities.misc.flatten_dict(params, sep='.')
# return params
@utilities.lazy_property
def output_docs(self) -> dict:
"""Retrieve output documentation."""
docs = self.electionStats.get_docs()
docs = utilities.misc.flatten_dict(docs, sep='.')
return docs
def dataseries(self, index=None):
"""Retrieve pandas data series of output data."""
if index is None:
return pd.Series(self.output)
else:
return
|
pd.Series(self._output_history[index])
|
pandas.Series
|
import numpy as np
import pandas as pd
#データ分割用
from sklearn.model_selection import train_test_split
#LightGBM
import lightgbm as lgb
#pickle
import pickle
#データ読み込み
df_train =
|
pd.read_csv("train.csv")
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
compared with version 1.6.4
the update is from correlation coefficient
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cs = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cs)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rank(corr_mx, cm_title):
temp = corr_mx
#output = (temp == temp.max(axis=1)[:,None]) # along row
output = temp.rank(axis=1, ascending=False)
fig, ax1 = plt.subplots()
im1 = ax1.matshow(output, cmap=plt.cm.Wistia)
#cs = ax1.matshow(output)
fig.colorbar(im1)
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.title(cm_title)
# show digit in matrix
output = np.asarray(output)
for i in range(22):
for j in range(22):
c = output[j,i]
ax1.text(i, j, int(c), va='center', ha='center')
plt.show()
def correlation_matrix_comb(corr_mx, cm_title):
fig, (ax2, ax3) = plt.subplots(1, 2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
'''
# graph 1 grayscale
im1 = ax1.matshow(corr_mx, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im1, ax=ax1)
ax1.grid(False)
ax1.set_title(cm_title)
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
'''
# graph 2 yellowscale
corr_mx_rank = corr_mx.rank(axis=1, ascending=False)
cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black'])
im2 = ax2.matshow(corr_mx, cmap='viridis')
# colorbar need numpy version 1.13.1
fig.colorbar(im2, ax=ax2)
ax2.grid(False)
ax2.set_title(cm_title)
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
# show digit in matrix
corr_mx_rank = np.asarray(corr_mx_rank)
for i in range(22):
for j in range(22):
c = corr_mx_rank[j,i]
ax2.text(i, j, int(c), va='center', ha='center')
# graph 3
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
im3 = ax3.matshow(output, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im3, ax=ax3)
ax3.grid(False)
ax3.set_title(cm_title)
ax3.set_xticks(np.arange(len(xlabels)))
ax3.set_yticks(np.arange(len(ylabels)))
ax3.set_xticklabels(xlabels,fontsize=6)
ax3.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# shrink value for correlation matrix
# in order to use colormap -> 10 scale
def shrink_value_03_1(corr_in1):
corr_out1 = corr_in1.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i in range (22):
for j in range(22):
if corr_in1.iloc[i, j] < 0.3:
corr_out1.iloc[i, j] = 0.3
return corr_out1
def shrink_value_05_1(corr_in2):
corr_out2 = corr_in2.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i2 in range (22):
for j2 in range(22):
if corr_in2.iloc[i2, j2] < 0.5:
corr_out2.iloc[i2, j2] = 0.5
return corr_out2
# not used!!!!!!!!!!!!
# normalize the complex signal series
def normalize_complex_arr(a):
a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted
return a_oo/np.abs(a_oo).max()
def improved_PCC(signal_in):
output_corr = pd.DataFrame()
for i in range(44):
row_pcc_notremovemean = []
for j in range(44):
sig_1 = signal_in.iloc[i, :]
sig_2 = signal_in.iloc[j, :]
pcc_notremovemean = np.abs(np.sum(sig_1 * sig_2) / np.sqrt(np.sum(sig_1*sig_1) * np.sum(sig_2 * sig_2)))
row_pcc_notremovemean = np.append(row_pcc_notremovemean, pcc_notremovemean)
output_corr = output_corr.append(pd.DataFrame(row_pcc_notremovemean.reshape(1,44)), ignore_index=True)
output_corr = output_corr.iloc[22:44, 0:22]
return output_corr
###############################################################################
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_EFR=pd.read_pickle('/home/bruce/Dropbox/4.Project/4.Code for Linux/df_EFR.pkl')
# Mac
# df_EFR=pd.read_pickle('/Users/bruce/Documents/uOttawa/Master‘s Thesis/4.Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_avg = pd.DataFrame()
df_EFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_EFR_avg_t = pd.DataFrame(df_EFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# without window function
df_EFR_avg_t = pd.DataFrame(df_EFR_avg_t.iloc[0,:].values.reshape(1,1024)) # without window function
# implement the window function
df_EFR_avg_t_window = pd.DataFrame((df_EFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_avg = df_EFR_avg.append(pd.concat([df_EFR_avg_t, df_EFR_label], axis=1, ignore_index=True))
df_EFR_avg_win = df_EFR_avg_win.append(pd.concat([df_EFR_avg_t_window, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg = df_EFR_avg.sort_values(by=["Condition", "Subject"])
df_EFR_avg = df_EFR_avg.reset_index(drop=True)
df_EFR_avg_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg_win = df_EFR_avg_win.sort_values(by=["Condition", "Subject"])
df_EFR_avg_win = df_EFR_avg_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
df_EFR_avg_win_sorted = df_EFR_avg_win.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_win_sorted = df_EFR_avg_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_EFR_avg_85 = pd.DataFrame(df_EFR_avg_sorted.iloc[528:, :])
df_EFR_avg_85 = df_EFR_avg_85.reset_index(drop=True)
df_EFR_avg_win_85 = pd.DataFrame(df_EFR_avg_win_sorted.iloc[528:, :])
df_EFR_avg_win_85 = df_EFR_avg_win_85.reset_index(drop=True)
# this part was replaced by upper part based on what I need to do
'''
# average all the subjects , test and retest, different sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Vowel","Condition", "Subject", "Sound Level"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
# average sound levels and
# keep vowel condition and subject
df_EFR_avg_vcs = pd.DataFrame()
for i in range(176):
# combine next two rows later
df_EFR_avg_vcs_t = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i: 4*i+4, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
df_EFR_avg_vcs_label = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i, 1024:1031].values.reshape(1,7))
df_EFR_avg_vcs = df_EFR_avg_vcs.append(pd.concat([df_EFR_avg_vcs_t, df_EFR_avg_vcs_label], axis=1, ignore_index=True), ignore_index=True)
# set the title of columns
df_EFR_avg_vcs.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
#df_EFR_avg_vcs = df_EFR_avg_vcs.sort_values(by=["Condition", "Subject"])
'''
'''
# filter by 'a vowel and 85Db'
df_EFR_a_85_test1 = df_EFR[(df_EFR['Vowel'] == 'a vowel') & (df_EFR['Sound Level'] == '85')]
df_EFR_a_85_test1 = df_EFR_a_85_test1.reset_index(drop=True)
df_EFR_a_85_avg = pd.DataFrame()
# average test1 and test2
for i in range(44):
df_EFR_a_85_avg_t = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024))
df_EFR_a_85_label = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_a_85_avg = df_EFR_a_85_avg.append(pd.concat([df_EFR_a_85_avg_t, df_EFR_a_85_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_a_85_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_a_85_avg = df_EFR_a_85_avg.sort_values(by=["Condition", "Subject"])
df_EFR_a_85_avg = df_EFR_a_85_avg.reset_index(drop=True)
'''
##################################################
# Frequency Domain
# parameters
sampling_rate = 9606 # fs
# sampling_rate = 9596.623
n = 1024
k = np.arange(n)
T = n/sampling_rate # time of signal
frq = k/T
freq = frq[range(int(n/2))]
n2 = 9606
k2 = np.arange(n2)
T2 = n2/sampling_rate
frq2 = k2/T2
freq2 = frq2[range(int(n2/2))]
# zero padding
# for df_EFR
df_EFR_data = df_EFR.iloc[:, :1024]
df_EFR_label = df_EFR.iloc[:, 1024:]
df_EFR_mid = pd.DataFrame(np.zeros((1408, 95036)))
df_EFR_withzero = pd.concat([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1)
# rename columns
df_EFR_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# for df_EFR_avg_85
df_EFR_avg_85_data = df_EFR_avg_85.iloc[:, :1024]
df_EFR_avg_85_label = df_EFR_avg_85.iloc[:, 1024:]
df_EFR_avg_85_mid = pd.DataFrame(np.zeros((176, 8582)))
df_EFR_avg_85_withzero = pd.concat([df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label], axis=1)
# rename columns
df_EFR_avg_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# df_EFR_avg_win_85
df_EFR_avg_win_85_data = df_EFR_avg_win_85.iloc[:, :1024]
df_EFR_avg_win_85_label = df_EFR_avg_win_85.iloc[:, 1024:]
df_EFR_avg_win_85_mid = pd.DataFrame(np.zeros((176, 8582)))
df_EFR_avg_win_85_withzero = pd.concat([df_EFR_avg_win_85_data, df_EFR_avg_win_85_mid, df_EFR_avg_win_85_label], axis=1)
df_EFR_avg_win_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# concatenate AENU
temp1 = pd.concat([df_EFR_avg_85.iloc[0:44, 0:1024].reset_index(drop=True),df_EFR_avg_85.iloc[44:88, 0:1024].reset_index(drop=True)], axis=1)
temp2 = pd.concat([df_EFR_avg_85.iloc[88:132, 0:1024].reset_index(drop=True), df_EFR_avg_85.iloc[132:176, 0:1024].reset_index(drop=True)], axis=1)
df_EFR_avg_85_aenu = pd.concat([temp1, temp2], axis=1, ignore_index=True)
df_EFR_avg_85_aenu_withzero = pd.concat([df_EFR_avg_85_aenu, pd.DataFrame(np.zeros((44, 36864)))] , axis=1)
'''
# test##############
# test(detrend)
temp_test = np.asarray(df_EFR_avg_85_data.iloc[0, 0:1024])
temp_test_detrend = signal.detrend(temp_test)
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(temp_test)
plt.subplot(2, 1, 2)
plt.plot(temp_test_detrend)
plt.show()
# the raw data is already DC removed
# test(zero padding)
temp_EFR_1 = df_EFR_withzero.iloc[0, 0:1024]
temp_EFR_2= df_EFR_withzero.iloc[0, 0:9606]
temp_amplitude_spectrum_1 = np.abs((fftpack.fft(temp_EFR_1)/n)[range(int(n/2))])
temp_amplitude_spectrum_2 = np.abs((fftpack.fft(temp_EFR_2)/n2)[range(int(n2/2))])
plt.figure()
plt.subplot(2, 1, 1)
markers1 = [11, 21, 32, 43, 53, 64, 75]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_1, '-D', markevery=markers1)
plt.xlim(0, 100)
plt.title('without zero padding')
plt.subplot(2, 1, 2)
#markers2 = [100, 200, 300, 400, 500, 600, 700]
markers2 = [99, 199, 299, 399, 499, 599, 599]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_2, '-D', markevery=markers2)
plt.xlim(0, 1000)
# plt.xscale('linear')
plt.title('with zero padding')
plt.show()
# #################
'''
# Calculate the Amplitude Spectrum
# create a new dataframe with zero-padding amplitude spectrum
'''
# for df_EFR
df_as_7= pd.DataFrame()
for i in range(1408):
temp_EFR = df_EFR_avg_85_withzero.iloc[i, 0:96060]
temp_as = np.abs((fftpack.fft(temp_EFR)/n2)[range(int(n2/2))])
#df_as_7 = pd.concat([df_as_7, temp_as_7_t], axis=0)
df_as_7 = df_as_7.append(pd.DataFrame(np.array([temp_as[1000], temp_as[2000], temp_as[3000], temp_as[4000], \
temp_as[5000], temp_as[6000], temp_as[7000]]).reshape(1,7)), ignore_index = True)
df_as_7 = pd.concat([df_as_7, df_EFR_label], axis=1) # add labels on it
# filter by 'a vowel and 85Db'
df_as_7_test1 = df_as_7[(df_as_7['Vowel'] == 'a vowel') & (df_as_7['Sound Level'] == '85')]
df_as_7_test1 = df_as_7_test1.reset_index(drop=True)
'''
# for df_EFR_avg_vcs_withzero
df_as_85_no0= pd.DataFrame()
df_as_85= pd.DataFrame()
df_as7_85= pd.DataFrame()
df_as_win_85= pd.DataFrame()
df_as7_win_85= pd.DataFrame()
for i in range(176):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_as_no0 = np.abs((np.fft.fft(df_EFR_avg_85_data.iloc[i, :]))[range(int(n/2))])
df_as_85_no0 = df_as_85_no0.append(pd.DataFrame(temp_as_no0.reshape(1,512)), ignore_index = True)
temp_as = np.abs((np.fft.fft(df_EFR_avg_85_withzero.iloc[i, 0:9606]))[range(int(n2/2))])
df_as_85 = df_as_85.append(pd.DataFrame(temp_as.reshape(1,4803)), ignore_index = True)
df_as7_85 = df_as7_85.append(pd.DataFrame(np.array([temp_as[100], temp_as[200], temp_as[300], temp_as[400], \
temp_as[500], temp_as[600], temp_as[700]]).reshape(1,7)), ignore_index = True)
temp_as_win = np.abs((np.fft.fft(df_EFR_avg_win_85_withzero.iloc[i, 0:9606]))[range(int(n2/2))])
df_as_win_85 = df_as_win_85.append(pd.DataFrame(temp_as_win.reshape(1,4803)), ignore_index = True)
df_as7_win_85 = df_as7_win_85.append(pd.DataFrame(np.array([temp_as_win[100], temp_as_win[200], temp_as_win[300], temp_as_win[400], \
temp_as_win[500], temp_as_win[600], temp_as_win[700]]).reshape(1,7)), ignore_index = True)
df_as_85_no0 = pd.concat([df_as_85_no0, df_EFR_avg_85_label], axis=1) # add labels on it
df_as_85 = pd.concat([df_as_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as7_85 = pd.concat([df_as7_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as_win_85 = pd.concat([df_as_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
df_as7_win_85 = pd.concat([df_as7_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
# wothout zero padding
df_as_85_aenu = pd.concat([df_as_85.iloc[0:44, :4803],
df_as_85.iloc[44:88, :4803].reset_index(drop=True),
df_as_85.iloc[88:132, :4803].reset_index(drop=True),
df_as_85.iloc[132:176, :4803].reset_index(drop=True)], axis=1)
df_as_85_1300_aenu = pd.concat([df_as_85.iloc[0:44, :1300],
df_as_85.iloc[44:88, :1300].reset_index(drop=True),
df_as_85.iloc[88:132, :1300].reset_index(drop=True),
df_as_85.iloc[132:176, :1300].reset_index(drop=True)], axis=1)
df_as_85_no0_1300 = df_as_85_no0.iloc[:, :139]
df_as_85_no0_aenu = pd.concat([df_as_85_no0_1300.iloc[0:44, :],
df_as_85_no0_1300.iloc[44:88, :].reset_index(drop=True),
df_as_85_no0_1300.iloc[88:132, :].reset_index(drop=True),
df_as_85_no0_1300.iloc[132:176, :].reset_index(drop=True)], axis=1)
df_as7_85_aenu = pd.concat([df_as7_85.iloc[0:44, :7],
df_as7_85.iloc[44:88, :7].reset_index(drop=True),
df_as7_85.iloc[88:132, :7].reset_index(drop=True),
df_as7_85.iloc[132:176, :7].reset_index(drop=True)], axis=1)
# for efr_aenu
df_aenu_as_85 = pd.DataFrame()
df_aenu_as7_85 = pd.DataFrame()
for i in range(44):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_as2 = np.abs((fftpack.fft(df_EFR_avg_85_aenu.iloc[i, 0:4096])/4096)[range(int(4096/2))])
df_aenu_as_85 = df_aenu_as_85.append(pd.DataFrame(temp_as2.reshape(1,2048)), ignore_index = True)
df_aenu_as7_85 = df_aenu_as7_85.append(pd.DataFrame(np.array([temp_as2[43], temp_as2[85], temp_as2[128], temp_as2[170], \
temp_as2[213], temp_as2[256], temp_as2[298]]).reshape(1,7)), ignore_index = True)
#df_aenu_as_85 = pd.concat([df_aenu_as_85, df_EFR_avg_85_label], axis=1) # add labels on it
'''
# average test1 and test2
df_as_7_avg = pd.DataFrame()
for i in range(44):
df_as_7_avg1 = pd.DataFrame(df_as_7_test1.iloc[2*i: 2*i+1, 0:7].mean(axis=0).values.reshape(1,7))
df_as_7_label = pd.DataFrame(df_as_7_test1.iloc[2*i, 7:14].values.reshape(1,7))
df_as_7_avg_t = pd.concat([df_as_7_avg1, df_as_7_label], axis=1, ignore_index=True)
df_as_7_avg = df_as_7_avg.append(df_as_7_avg_t)
# set the title of columns
df_as_7_avg.columns = np.append(np.arange(7), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_as_7_avg = df_as_7_avg.sort_values(by=["Condition", "Subject"])
df_as_7_avg = df_as_7_avg.reset_index(drop=True)
'''
'''
# set a normalized AS
df_as_7_avg_data= pd.DataFrame(df_as_7_avg.iloc[:, 0:7].astype(float))
df_as_7_avg_sum= pd.DataFrame(df_as_7_avg.iloc[:, 0:7]).sum(axis=1)
df_as_7_avg_label= pd.DataFrame(df_as_7_avg.iloc[:, 7:14])
# normalize
df_as_7_avg_norm = df_as_7_avg_data.div(df_as_7_avg_sum, axis=0)
# add label
df_as_7_avg_norm = pd.concat([df_as_7_avg_norm, df_as_7_avg_label], axis=1, ignore_index=True)
'''
# normalization
df_EFR_avg_85_aenu_norm = df_EFR_avg_85_aenu.div((df_EFR_avg_85_aenu.iloc[0:4096].abs()**2).sum())
df_aenu_as_85_1300_norm = df_aenu_as_85.iloc[:, :535].div((df_aenu_as_85.iloc[:, :535].abs()**2).sum()/1300)
df_as_85_1300_aenu_norm = df_as_85_1300_aenu.div((df_as_85_1300_aenu.abs()**2).sum()/1300)
# Calculate correlation
# EFR
corr_EFR_avg_85_a = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_e = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_n = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_u = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_aenu = df_EFR_avg_85_aenu.iloc[:, 0:4096].T.corr(method='pearson').iloc[22:44, 0:22]
'''
corr_EFR_avg_85_a_t = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_e_t = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_n_t = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_u_t = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_a_re = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_e_re = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_n_re = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_u_re = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
'''
# AS
corr_as_85_a = df_as_85.iloc[0:44, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_e = df_as_85.iloc[44:88, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_n = df_as_85.iloc[88:132, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_u = df_as_85.iloc[132:176, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_a = df_as_win_85.iloc[0:44, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_e = df_as_win_85.iloc[44:88, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_n = df_as_win_85.iloc[88:132, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_u = df_as_win_85.iloc[132:176, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_aenu = df_aenu_as_85.iloc[0:44, 0:2048].T.corr(method='pearson').iloc[22:44, 0:22]
# here we use df_aenu_as_85.iloc[:, 0:535] to limit freq into 0 to 1300Hz
corr_as_85_aenu_1300 = df_aenu_as_85.iloc[0:44, 0:535].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_no0_aenu = df_as_85_no0_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_no0_aenu = df_as_85_no0_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_aenu = df_as7_85_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
corr_aenu_as7_85 = df_aenu_as7_85.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
# calculate the improved PCC matrix
corr_as_85_a_v2 = improved_PCC(df_as_85.iloc[0:44, 0:1300])
corr_as_85_e_v2 = improved_PCC(df_as_85.iloc[44:88, 0:1300])
corr_as_85_n_v2 = improved_PCC(df_as_85.iloc[88:132, 0:1300])
corr_as_85_u_v2 = improved_PCC(df_as_85.iloc[132:176, 0:1300])
corr_as_85_1300_aenu = improved_PCC(df_as_85_1300_aenu)
# df_EFR + df_aenu_AS_1300
df_aenu_sum_85 =
|
pd.concat([df_EFR_avg_85_aenu, df_aenu_as_85.iloc[:, :535]], axis=1)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
import os
import pandas as pd
import xgboost
import utils
import scoring
from sklearn.model_selection import train_test_split
# The datasets are available in CoCalc in ~/share/data/I-coopetition-muon-id/
# Test
# wget --content-disposition https://codalab.coresearch.club/my/datasets/download/dd6255a1-a14b-4276-9a2b-db7f360e01c7
# Train
# get --content-disposition https://codalab.coresearch.club/my/datasets/download/3a5e940c-2382-4716-9ff7-8fbc269b98ac
# Data preparation
columns = utils.SIMPLE_FEATURE_COLUMNS + ["id", "label", "weight", "sWeight", "kinWeight"]
DATA_PATH = "."
train = pd.read_csv(os.path.join(DATA_PATH, "train.csv.gz"), index_col="id", usecols=columns)
test = pd.read_csv(os.path.join(DATA_PATH, "test-features.csv.gz"), index_col="id", usecols=utils.SIMPLE_FEATURE_COLUMNS + ["id"])
train.head()
test.head()
train_part, validation = train_test_split(train, test_size=0.25, shuffle=True, random_state=2342234)
model = xgboost.XGBClassifier(n_jobs=-1)
model.fit(train_part.loc[:, utils.SIMPLE_FEATURE_COLUMNS].values,
train_part.label.values,
sample_weight=train_part.kinWeight.values)
validation_predictions = model.predict_proba(validation.loc[:, utils.SIMPLE_FEATURE_COLUMNS].values)[:, 1]
scoring.rejection90(validation.label.values, validation_predictions, sample_weight=validation.weight.values)
model.fit(train.loc[:, utils.SIMPLE_FEATURE_COLUMNS].values, train.label, sample_weight=train.kinWeight.values)
predictions = model.predict_proba(test.loc[:, utils.SIMPLE_FEATURE_COLUMNS].values)[:, 1]
compression_opts = dict(method='zip',
archive_name='submission.csv')
|
pd.DataFrame(data={"prediction": predictions}, index=test.index)
|
pandas.DataFrame
|
# pylint: disable=E1101
from datetime import datetime, timedelta
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
import pandas.tseries.offsets as offsets
import pandas as pd
import unittest
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
class TestResample(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEquals(g.ngroups, 2593)
self.assert_(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
self.assertEquals(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assert_(result.index.name == 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEquals(len(result), 3)
self.assert_((result.index.dayofweek == [6, 6, 6]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/9/2005'])
self.assertEquals(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [0, 0]).all())
self.assertEquals(result.irow(0), s['1/3/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [1, 1]).all())
self.assertEquals(result.irow(0), s['1/4/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [2, 2]).all())
self.assertEquals(result.irow(0), s['1/5/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [3, 3]).all())
self.assertEquals(result.irow(0), s['1/6/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [4, 4]).all())
self.assertEquals(result.irow(0), s['1/7/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEquals(len(result), 7)
self.assert_((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/3/2005'])
self.assertEquals(result.irow(5), s['1/9/2005'])
self.assert_(result.index.name == 'index')
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assert_(result.index.freq == Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEquals(len(result), 12961)
self.assertEquals(result[0], s[0])
self.assertEquals(result[-1], s[-1])
self.assert_(result.index.name == 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEquals(len(result), len(expect))
self.assertEquals(len(result.columns), 4)
xs = result.irow(-2)
self.assertEquals(xs['open'], s[-6])
self.assertEquals(xs['high'], s[-6:-1].max())
self.assertEquals(xs['low'], s[-6:-1].min())
self.assertEquals(xs['close'], s[-2])
xs = result.irow(0)
self.assertEquals(xs['open'], s[0])
self.assertEquals(xs['high'], s[:5].max())
self.assertEquals(xs['low'], s[:5].min())
self.assertEquals(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEquals(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assert_(result.index.freq == offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assert_((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assert_((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assert_((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M', how='mean')
expected = ts.groupby(lambda x: x.month).mean()
self.assertEquals(len(result), 2)
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
self.assertRaises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1)
expected = df.T.resample('M').T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1)
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M'))
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2)
expected = p_apply(panel2, lambda x: x.resample('M', axis=1))
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', how=lambda x: x.mean(1), axis=1)
expected = panel.resample('M', how='mean', axis=1)
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', how=lambda x: x.mean(2), axis=2)
expected = panel.resample('M', how='mean', axis=2)
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the middle
# of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left')
expected = ts.resample(freq, closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2)
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
self.assert_(resampled.index.equals(exp_rng))
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left')
expected = ts.resample('D', closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period')
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
self.assert_(result.index.equals(exp_index))
def test_resample_empty(self):
ts = _simple_ts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
result = ts.resample('A', kind='period')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
xp = DataFrame()
rs = xp.resample('A')
assert_frame_equal(xp, rs)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W')
expected = ts.resample('W-SUN')
|
assert_series_equal(resampled, expected)
|
pandas.util.testing.assert_series_equal
|
import os
import json
import datetime
# from cs50 import SQL
import sqlite3
# from contextlib import closing
from flask import Flask, redirect, render_template, request, session, json, jsonify, Markup
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
import shutil
from PIL import Image, ImageFilter
import pandas as pd
# from sklearn.linear_model import LinearRegression, ElasticNet
# from sklearn.model_selection import train_test_split
from helpers import login_required, usd, get_weather_info, db_dictionary, category_dictionary, sleeve_list, color_dictionary, save_image, model_learning, apology
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"]
# make json not garbled
# app.config['JSON_AS_ASCII'] = False
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
dbpath = 'wwwardrobe.sqlite' # DB's path
# DB table
column_users = db_dictionary()["column_users"]
column_wardrobes = db_dictionary()["column_wardrobes"]
column_history_wear = db_dictionary()["column_history_wear"]
column_history_own = db_dictionary()["column_history_own"]
column_weather_today = db_dictionary()["column_weather_today"]
# Folder for wardrobe-images
image_path = os.path.join("static", "wardrobe_image")
# Directory of alternative image
alt_path = os.path.join(image_path, "Alternative.jpg")
# Directory of the image which portrays no wardrobe is selected
nowardrobe_path = os.path.join(image_path, "NoWardrobe.jpg")
# Folder for material
material_path = os.path.join("static", "material")
# Directory for temporary wardrobe-image
temp_directory = "temp"
@app.route("/")
@login_required
def index():
"""Show home page"""
# DB connection and cursor generation
connection = sqlite3.connect(dbpath)
# connection.isolation_level = None # 自動コミットにする場合は下記を指定(コメントアウトを解除のこと)
cursor = connection.cursor()
# Get user
cursor.execute("SELECT * FROM users WHERE id = :userid",
{'userid': session["user_id"]})
rows_users = cursor.fetchall() # users row
# Get weather and user information to display
username = rows_users[0][column_users["username"]] # Get user's name
location = rows_users[0][column_users["location"]] # Get user's location
my_api_key = '<KEY>' # Get API Key
dict_weather = get_weather_info(location, my_api_key) # Get weather in the location
weather = dict_weather['main'] # Today's weather
t_max = dict_weather['tmax'] # Maximum temperature
t_min = dict_weather['tmin'] # Minimum temperature
img_url_w = os.path.join("..", material_path, "weather_{}.png".format(weather)) # Image of Today's weather
# Get wardrobe-image url to display
cursor.execute("SELECT * FROM history_wear WHERE userid = :userid AND wear_date = :today;",
{"userid": session["user_id"],
"today" : str(datetime.datetime.now().date())
})
rows_hw = cursor.fetchone() # history_wear row
# Dicionary which will put image-url on wardrobeid
dict_image_url = {"wardrobeid_c": "", "wardrobeid_o": "", "wardrobeid_t": "",
"wardrobeid_i": "", "wardrobeid_b": "", "wardrobeid_s": ""
}
# Message to display
today_message = ""
notice_message = ""
print(rows_hw)
# Variable to assess whether wardrobe is selected today
have_selected = False
# Display the wardrobes selected today
# if wardrobe haven't been selected yet,
if rows_hw == None:
for key in dict_image_url.keys():
dict_image_url[key] = nowardrobe_path
today_message ="Not selected outfit today yet"
# if wardrobe have been selected,
else:
if rows_hw[column_history_wear["comfort_score"]] != None:
notice_message = "You have input comfort today."
for key in dict_image_url.keys(): # if a wardrobe isn't selected,
if rows_hw[column_history_wear[key]] == None:
dict_image_url[key] = nowardrobe_path
else: # if a wardrobe selected,
# Select DB for wardrobes to get wardrobename from wardrobeid
cursor.execute("SELECT wardrobename FROM wardrobes WHERE id = :wardrobeid;",
{"wardrobeid": rows_hw[column_history_wear[key]]
})
# Get wardrobename
rows_wn = cursor.fetchone()
# Set image-url from wardrobename
dict_image_url[key] = os.path.join("..", image_path, str(session["user_id"]), "{}.jpg".format(rows_wn[0]))
today_message = "Your outfit today"
have_selected = True
# Forget which page user is in now (for ajax)
session["nowpage"] = ""
return render_template("index.html", username=username, location=location, weather=weather, img_url_w=img_url_w,
t_max=t_max, t_min=t_min, img_urls=dict_image_url, today_message=today_message,
message=notice_message, have_selected=have_selected)
@app.route("/recordcomfort", methods=["POST"])
def recordcomfort():
"""Record comfort-score today"""
# User reached route via GET (as by clicking a link or via redirect)
if not request.method == "POST":
pass
# User reached route via POST (as by submitting a form via POST)
else:
pass
# DB connection and cursor generation
connection = sqlite3.connect(dbpath)
# connection.isolation_level = None # 自動コミットにする場合は下記を指定(コメントアウトを解除のこと)
cursor = connection.cursor()
# Update DB for history_wear
cursor.execute("UPDATE history_wear SET comfort_score = :t_comfort_score WHERE userid = :t_userid;",
{"t_comfort_score": int(request.form.get("comfort")),
"t_userid" : int(session["user_id"])
})
connection.commit() # DB commit
return redirect("/") # Display home page
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via GET (as by clicking a link or via redirect)
if not request.method == "POST":
return render_template("login.html") # Display login form
# User reached route via POST (as by submitting a form via POST)
else:
# Exception handling
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username") # テスト
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password") # テスト
# DB connection and cursor generation
connection = sqlite3.connect(dbpath)
# connection.isolation_level = None # 自動コミットにする場合は下記を指定(コメントアウトを解除のこと)
cursor = connection.cursor()
# Query DB for username
cursor.execute("SELECT * FROM users WHERE username = :username",
{'username': request.form.get("username")})
# Ensure username exists and password is correct
rows = cursor.fetchall()
if len(rows) != 1 or not check_password_hash(rows[0][column_users["hash"]], request.form.get("password")):
return apology("invalid username and/or password", 403) # テスト?
# Remember which user has logged in
session["user_id"] = rows[0][column_users["id"]]
session["user_location"] = rows[0][column_users["location"]]
# The value where user is from beforehand
session["pagefrom"] = ""
# Store weather-information today ------------------------------------
cursor.execute("SELECT * FROM users WHERE username = :username",
{'username': request.form.get("username")})
rows_users = cursor.fetchall()
location = rows_users[0][column_users["location"]] # Get user's location
cursor.execute("SELECT * FROM weather_today WHERE location = :t_location;",
{'t_location': location})
rows_weather = cursor.fetchall() # Get weather-information
today = str(datetime.datetime.now().date()) # Get today as string value
my_api_key = '<KEY>' # Get API Key
dict_weather = get_weather_info(location, my_api_key) # Get weather in the location
weather = dict_weather['main'] # Today's weather
t_max = dict_weather['tmax'] # Maximum temperature
t_min = dict_weather['tmin'] # Minimum temperature
humidity = 0 # 作らねば # Average of humidity
print(rows_weather)
if len(rows_weather) == 0: # If there is no information on that "location",
cursor.execute("INSERT INTO weather_today({0}, {1}, {2}, {3}, {4}, {5}) VALUES (:t_l, :t_d, :t_w, :t_tmax, :t_tmin, :t_h);".format(
"location", "date", "weather", "temperature_max", "temperature_min", "humidity"),
{"t_l": location, "t_d": today, "t_w": weather, "t_tmax": t_max, "t_tmin": t_min, "t_h": humidity})
else:
if today != rows_weather[0][column_weather_today['date']]: # If no information is about today,
cursor.execute("UPDATE weather_today SET {0} = :t_d, {1} = :t_w, {2} = :t_tmax, {3} = :t_tmin, {4} = :t_h WHERE {5} = :t_l;".format(
"date", "weather", "temperature_max", "temperature_min", "humidity", "location"),
{"t_l": location, "t_d": today, "t_w": weather, "t_tmax": t_max, "t_tmin": t_min, "t_h": humidity})
else:
pass
connection.commit() # DB commit
# Redirect user to home page
return redirect("/")
@app.route("/logout")
def logout():
"""Log user out"""
session.clear() # Forget any user_id
return redirect("/") # Redirect user to login form
@app.route("/register", methods=["GET"])
def register():
"""Entry information to register"""
locations = [] # Get the JSON content
locations = ["Tokyo", "Shanghai", "Bangkok"] # テスト
return render_template("register.html", locations=locations) # complete registering
@app.route("/registered", methods=["GET", "POST"])
def registered():
"""Try to register"""
# User reached route via GET (as by clicking a link or via redirect)
if not request.method == "POST":
return redirect("/") # Redirect user to login form
# User reached route via POST (as by submitting a form via POST)
else:
# DB connection and cursor generation
connection = sqlite3.connect(dbpath)
# connection.isolation_level = None # 自動コミットにする場合は下記を指定(コメントアウトを解除のこと)
cursor = connection.cursor()
username = request.form.get("username")
location = request.form.get("location")
gender = request.form.get("gender")
password = request.form.get("password")
# Get today as string value
dt = str(datetime.datetime.now())
regist_date = dt[:dt.find(".")]
# Select DB for users
cursor.execute("SELECT * FROM users WHERE username = :username",
{'username': username
})
checkname = cursor.fetchall()
# Exception handling
if len(checkname) > 0:
return redirect("/") # Username is taken
if location == "--Select location--":
return redirect("/") # location is invalid
if password != request.form.get("confirm"):
return redirect("/") # passwords in disagreement
# Insert DB for wardrobes
cursor.execute("INSERT INTO users(username, location, gender, hash, regist_date) VALUES(:t_username, :t_location, :t_gender, :t_hash, :t_regist_date)",
{'t_username': username,
't_location': location,
't_gender': gender,
't_hash': generate_password_hash(password),
't_regist_date': regist_date
}) # 入力値をDBに格納する。
connection.commit() # DB commit
cursor.execute("SELECT * FROM users WHERE username = :username",
{'username': username
})
row = cursor.fetchall()
# Make a folder to hold user's wardrobe-image
image_id_path = os.path.join(image_path, str(row[0][column_users["id"]]))
if not os.path.exists(image_id_path):
os.mkdir(image_id_path)
os.mkdir(os.path.join(image_id_path, temp_directory))
return render_template("registered.html")
@app.route("/recommend", methods=["GET", "POST"])
@login_required
def recommend():
"""Recommend user's best cloth today"""
# User reached route via GET (as by clicking a link or via redirect)
if not request.method == "POST":
pass
# User reached route via POST (as by submitting a form via POST)
else:
pass
# DB connection and cursor generation
connection = sqlite3.connect(dbpath)
# connection.isolation_level = None # 自動コミットにする場合は下記を指定(コメントアウトを解除のこと)
cursor = connection.cursor()
# Prepare csv file to Machine Learning --------------------------------------
# Get wardrobe-image url to display
cursor.execute("SELECT * FROM history_wear WHERE userid = :userid AND NOT comfort_score = '';",
{"userid": session["user_id"]
})
rows_hw = cursor.fetchall() # history_wear row
# Generate list of "history_wear" (wardrobeid is converted to warmscore AND wear_date is removed)
list_hw = [] # List about a record of history_wear
list_hws = [] # List about records of history_wear
for num in range(len(rows_hw)):
for i in ['wardrobeid_c', 'wardrobeid_o', 'wardrobeid_t', 'wardrobeid_i', 'wardrobeid_b', 'wardrobeid_s']:
# Select DB same as wardrobe-id.
cursor.execute("SELECT * FROM wardrobes WHERE id = :wardrobeid;",
{"wardrobeid": rows_hw[num][column_history_wear[i]]
})
rows_wr = cursor.fetchall() # wardorbes row
if rows_wr == []: # If rows_wr is empty,
warmscore = 0
else:
list_wr = [] # wardorbes list あとで使うかも
list_wr.append(rows_wr) # あとで使うかも
warmscore = rows_wr[0][column_wardrobes['warmscore']] # Get warmscore
list_hw.append(warmscore) # Store in a list
for i in ['temperature_max', 'temperature_min', 'comfort_score']:
list_hw.append(round(rows_hw[num][column_history_wear[i]], 0))
list_hws.append(list_hw) # Store in a list
list_hw = [] # Set default value
# Part of Machine Learning -----------------------------------------
# Convert list of "history_wear" to pd.DataFrame of "history_wear"
feature_names = ["warmscore_c", "warmscore_o", "warmscore_t",
"warmscore_i", "warmscore_b", "warmscore_s",
"temperature_max", "temperature_min", "comfort_score"]
data = pd.DataFrame(list_hws, columns=feature_names)
model = model_learning(data)
# Part of Machine Predicting -------------------------------------------
# Select DB for wardrobes
cursor.execute("SELECT * FROM wardrobes WHERE userid = :t_userid AND inuse = 1;",
{"t_userid": session["user_id"]
})
rows_wr = cursor.fetchall()
dict_category = category_dictionary() # Dictionary about wardrobe's category
dict_wr_category = {"-Outer Category-": [], "-Tops Category-": [],
"-Inner Category-": [], "-Bottoms Category-": []}
combi_content = {} # wardrobe-combination on category
combi_score = {} # score on wardrobe-combination on category
num = 0 # for index of "combi_content"
iter_num_o = 0 # for iterate like MECE for '-Outer Category-'
iter_num_b = 0 # for iterate like MECE for '-Bottoms Category-'
__len_combi__ = 20000
for i in range(__len_combi__):
combi_content[i] = {}
# Generate dictionary on category
for i in rows_wr:
for key, value in dict_category.items():
if i[column_wardrobes['category']] in value: # If category is same,
dict_wr_category[key].append(i[column_wardrobes['id']])
break
else:
pass
# Append NULL to dictionary on category
for key in dict_wr_category.keys():
dict_wr_category[key].append(None)
# Generate wardrobe-combination on category
len_o = len(dict_wr_category['-Outer Category-']) # Length of dictionary
len_b = len(dict_wr_category['-Bottoms Category-']) # Length of dictionary
for c in range(0, len_o):
for o in range(iter_num_o, len_o):
bool_co = c == len_o - 1 and o == len_o - 1
if bool_co or c != o : # Same wardrobes cannot be combi except both are None
for t in dict_wr_category['-Tops Category-']:
for i in dict_wr_category['-Inner Category-']:
iter_num_b = 0 # Reset start of counting
for b in range(0, len_b):
for s in range(iter_num_b, len_b):
bool_bs = b == len_b - 1 and s == len_b - 1
if b != s or bool_bs: # Same wardrobes cannot be combi except both are None
combi_content[num]['wardrobeid_c'] = dict_wr_category['-Outer Category-'][c]
combi_content[num]['wardrobeid_o'] = dict_wr_category['-Outer Category-'][o]
combi_content[num]['wardrobeid_t'] = t
combi_content[num]['wardrobeid_i'] = i
combi_content[num]['wardrobeid_b'] = dict_wr_category['-Bottoms Category-'][b]
combi_content[num]['wardrobeid_s'] = dict_wr_category['-Bottoms Category-'][s]
num += 1 # Count dictionary-length
if num >= __len_combi__:
combi_content[num] = {}
else:
pass
iter_num_b += 1 # Shift start of counting
iter_num_o += 1 # Shift start of counting
# Generate predicted values on wardrobe-combination
__best_score__ = 5 # Best comfort score
for i in range(num):
combi_score[i] = [0,0,0,0,0,0] # Set default value
for key, value in combi_content.items(): # Search in the dictionaries about wardrobe-combination
for k, v in value.items(): # Search in the dictionary about wardrobe-combination
for i in rows_wr: # Search in the list about wardrobes-DB
if v == i[column_wardrobes['id']]: # If two wardrobe-id are same
combi_score[key][column_history_wear[k] - column_history_wear['wardrobeid_c']] = i[column_wardrobes['warmscore']] # Store warmscore
break
# Append temperature
location = session["user_location"] # Get user's location
cursor.execute("SELECT * FROM weather_today WHERE location = :t_location;",
{'t_location': location})
rows_weather = cursor.fetchall()
for key, value in combi_score.items():
combi_score[key].extend([rows_weather[0][column_weather_today["temperature_max"]],
rows_weather[0][column_weather_today["temperature_min"]]
])
# # If you wanna execute other environment(ex. jupyternotebook)
# path_w = "combi_score.py"
# generate_function_to_text("generate_dict_score", path_w, combi_score)
# Generate pandas-dataframe from dictionary
data_warmscore =
|
pd.DataFrame.from_dict(combi_score, orient='index', columns=feature_names[0:7+1])
|
pandas.DataFrame.from_dict
|
"""
Benchmark for ANTs
see:
* http://stnava.github.io/ANTs
* https://sourceforge.net/projects/advants/
* https://github.com/stnava/ANTsDoc/issues/1
INSTALLATION:
See: https://brianavants.wordpress.com/2012/04/13/updated-ants-compile-instructions-april-12-2012/
* Do NOT download the binary code, there is an issue:
- https://sourceforge.net/projects/advants/files/ANTS/ANTS_Latest
- https://github.com/ANTsX/ANTs/issues/733
* Compile from source::
git clone git://github.com/stnava/ANTs.git
mkdir antsbin
cd antsbin
ccmake ../ANTs
make -j$(nproc)
Discussion
----------
I. believes he found the "problem" and indeed it has to do with the file format we use (JPEG).
I. converts the kidney images to 8-bit and then .NII.GZ and the whole pipeline works fine.
.. note:: For showing parameter/options setting run `antsRegistration --help`
1) Convert images to 8-bit using Fiji (this is only because I didn't see any ITK format to store RGB images).
2) Convert the 8-bit images to .nii.gz (using ANTs script `ConvertImagePixelType`)::
ConvertImagePixelType \
Rat_Kidney_PanCytokeratin-8bit.png \
Rat_Kidney_PanCytokeratin.nii.gz \
1
3) Register images using `antsRegistrationSyN.sh`::
antsRegistrationSyN.sh \
-d 2 \
-m Rat_Kidney_PanCytokeratin.nii.gz \
-f Rat_Kidney_HE.nii.gz \
-j 1 \
-t s \
-o output \
> stdout-reg.txt 2> stderr-reg.txt
4) Apply transform to points::
antsApplyTransformsToPoints \
-d 2 \
-i Rat_Kidney_PanCytokeratin.csv \
-o testPointsHE.csv \
-t [ output0GenericAffine.mat, 1 ] \
-t output1InverseWarp.nii.gz
Usage
-----
Run the basic ANT registration with original parameters::
python bm_experiments/bm_ANTs.py \
-t ./data-images/pairs-imgs-lnds_anhir.csv \
-d ./data-images \
-o ./results \
--path_ANTs $HOME/Applications/antsbin/bin \
--path_config ./configs/ANTs_SyN.txt
.. note:: it was needed to use own compiled version
Copyright (C) 2017-2019 <NAME> <<EMAIL>>
"""
import glob
import logging
import os
import shutil
import sys
import pandas as pd
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from birl.benchmark import ImRegBenchmark
from birl.utilities.data_io import (
convert_image_from_nifti,
convert_image_to_nifti_gray,
load_config_args,
load_landmarks,
save_landmarks,
)
from birl.utilities.experiments import exec_commands
from bm_experiments import bm_comp_perform
class BmANTs(ImRegBenchmark):
""" Benchmark for ANTs
no run test while this method requires manual compilation of ANTs
EXAMPLE
-------
>>> from birl.utilities.data_io import create_folder, update_path
>>> path_out = create_folder('temp_results')
>>> fn_path_conf = lambda n: os.path.join(update_path('configs'), n)
>>> path_csv = os.path.join(update_path('data-images'), 'pairs-imgs-lnds_mix.csv')
>>> params = {'path_table': path_csv,
... 'path_out': path_out,
... 'nb_workers': 2,
... 'unique': False,
... 'path_ANTs': '.',
... 'path_config': '.'}
>>> benchmark = BmANTs(params)
>>> benchmark.EXECUTE_TIMEOUT
10800
>>> benchmark.run() # doctest: +SKIP
>>> shutil.rmtree(path_out, ignore_errors=True)
"""
#: timeout for executing single image registration
EXECUTE_TIMEOUT = 3 * 60 * 60 # default = 3 hour
#: required experiment parameters
REQUIRED_PARAMS = ImRegBenchmark.REQUIRED_PARAMS + ['path_config']
#: executable for performing image registration
EXEC_REGISTRATION = 'antsRegistration'
#: executable for performing image transformation
EXEC_TRANSFORM_IMAGE = 'antsApplyTransforms'
#: executable for performing landmarks transformation
EXEC_TRANSFORM_POINTS = 'antsApplyTransformsToPoints'
#: command for executing the image registration
COMMAND_REGISTER = '%(antsRegistration)s \
--dimensionality 2 \
%(config)s \
--output [%(output)s/trans]'
#: command for executing the warping image
COMMAND_WARP_IMAGE = '%(antsApplyTransforms)s \
--dimensionality 2 \
--input %(img_source)s \
--output %(output)s/%(img_name)s.nii \
--reference-image %(img_target)s \
--transform %(transfs)s \
--interpolation Linear'
#: command for executing the warping landmarks
COMMAND_WARP_POINTS = '%(antsApplyTransformsToPoints)s \
--dimensionality 2 \
--input %(path_points)s \
--output %(output)s/%(pts_name)s.csv \
--transform %(transfs)s'
#: column name of temporary Nifty reference image
COL_IMAGE_REF_NII = ImRegBenchmark.COL_IMAGE_REF + ' Nifty'
#: column name of temporary Nifty noving image
COL_IMAGE_MOVE_NII = ImRegBenchmark.COL_IMAGE_MOVE + ' Nifty'
def _prepare(self):
""" prepare BM - copy configurations """
logging.info('-> copy configuration...')
self._copy_config_to_expt('path_config')
# this is not possible since the executables can be in std path
# REQUIRED_EXECUTABLES = (self.EXEC_REGISTRATION,
# self.EXEC_TRANSFORM_IMAGE,
# self.EXEC_TRANSFORM_POINTS)
# path_execs = [os.path.join(self.params['path_ANTs'], execute)
# for execute in REQUIRED_EXECUTABLES]
# assert all(os.path.isfile(p) for p in path_execs), \
# 'Some executables are missing: %r' \
# % [p for p in path_execs if not os.path.isfile(p)]
p_ants = self.params.get('path_ANTs', '')
if p_ants and os.path.isdir(p_ants):
logging.info('using local executions from: %s', p_ants)
def _exec_update(executable):
is_path = p_ants and os.path.isdir(p_ants)
return os.path.join(p_ants, executable) if is_path else executable
self.exec_register = _exec_update(self.EXEC_REGISTRATION)
self.exec_transf_img = _exec_update(self.EXEC_TRANSFORM_IMAGE)
self.exec_transf_pts = _exec_update(self.EXEC_TRANSFORM_POINTS)
def _prepare_img_registration(self, item):
""" prepare the experiment folder if it is required,
eq. copy some extra files
:param dict item: dictionary with regist. params
:return dict: the same or updated registration info
"""
logging.debug('.. generate command before registration experiment')
# set the paths for this experiment
path_dir = self._get_path_reg_dir(item)
path_im_ref, path_im_move, _, _ = self._get_paths(item)
# Convert images to Nifty
try: # catching issue with too large images
item[self.COL_IMAGE_REF_NII] = convert_image_to_nifti_gray(path_im_ref, path_dir)
except Exception:
logging.exception('Converting: %s', path_im_ref)
return
try: # catching issue with too large images
item[self.COL_IMAGE_MOVE_NII] = convert_image_to_nifti_gray(path_im_move, path_dir)
except Exception:
logging.exception('Converting: %s', path_im_move)
return
return item
def _generate_regist_command(self, item):
""" generate the registration command(s)
:param dict item: dictionary {str: str|float} with registration params
:return str|list(str): the execution commands
"""
path_dir = self._get_path_reg_dir(item)
config = load_config_args(self.params['path_config'])
config = config % {'target-image': item[self.COL_IMAGE_REF_NII], 'source-image': item[self.COL_IMAGE_MOVE_NII]}
cmd = self.COMMAND_REGISTER % {'config': config, 'antsRegistration': self.exec_register, 'output': path_dir}
return cmd
def _extract_warped_image_landmarks(self, item):
""" get registration results - warped registered images and landmarks
:param dict item: dictionary {str: value} with registration params
:return dict: paths to results
"""
path_dir = self._get_path_reg_dir(item)
_, _, _, path_lnds_move = self._get_paths(item)
name_im_move, _ = os.path.splitext(os.path.basename(path_lnds_move))
name_lnds_move, _ = os.path.splitext(os.path.basename(path_lnds_move))
# simplified version of landmarks
lnds = load_landmarks(path_lnds_move)
path_lnds_warp = os.path.join(path_dir, name_lnds_move + '.csv')
# https://github.com/ANTsX/ANTs/issues/733#issuecomment-472049427
|
pd.DataFrame(lnds * -1, columns=['x', 'y'])
|
pandas.DataFrame
|
from unittest import mock
import pytest
import requests
try:
import pandas as pd
has_pandas = True
except ImportError:
has_pandas = False
from civis.response import (
CivisClientError, PaginatedResponse, _response_to_json,
convert_response_data_type, Response
)
def _create_mock_response(data, headers):
mock_response = mock.MagicMock(spec=requests.Response)
mock_response.json.return_value = data
mock_response.headers = headers
mock_response.status_code = 200
return mock_response
def _create_empty_response(code, headers):
mock_response = mock.MagicMock(spec=requests.Response)
mock_response.status_code = code
mock_response.content = b''
mock_response.headers = headers
return mock_response
def _make_paginated_response(path, params):
results = [
[
{'id': 1, 'name': 'job_1'},
{'id': 2, 'name': 'job_2'},
{'id': 3, 'name': 'job_3'},
],
[
{'id': 4, 'name': 'job_4'},
{'id': 5, 'name': 'job_5'},
],
[]
]
mock_endpoint = mock.MagicMock()
mock_endpoint._make_request.side_effect = [
_create_mock_response(result, {}) for result in results
]
mock_endpoint._return_type = 'snake'
paginator = PaginatedResponse(path, params, mock_endpoint)
return paginator, mock_endpoint
def test_pagination():
path = '/objects'
params = {'param': 'value'}
paginator, mock_endpoint = _make_paginated_response(path, params)
# No API calls made yet.
mock_endpoint._make_request.assert_not_called()
all_data = []
for indx, obj in enumerate(paginator):
assert obj['id'] == indx + 1
all_data.append(obj)
# Test lazy evaluation. Should make only make one call up until the
# first item of the second page is needed.
if indx < 3:
mock_endpoint._make_request.assert_called_once_with(
'GET', path, dict(params, **{'page_num': 1}))
else:
mock_endpoint._make_request.assert_called_with(
'GET', path, dict(params, **{'page_num': 2}))
# One extra call is made. Pagination is stopped since the response is
# empty.
assert mock_endpoint._make_request.call_count == 3
assert len(all_data) == 5
def test_iterator_interface():
# Make sure that the PaginatedResponse implements `next` as expected
paginator, _ = _make_paginated_response('/objects', {'param': 'value'})
assert next(paginator)['id'] == 1
assert next(paginator)['id'] == 2
assert next(paginator)['id'] == 3
assert next(paginator)['id'] == 4
assert next(paginator)['id'] == 5
with pytest.raises(StopIteration):
next(paginator)
def test_response_to_json_no_error():
raw_response = _create_mock_response({'key': 'value'}, None)
assert _response_to_json(raw_response) == {'key': 'value'}
def test_response_to_no_content_snake():
# Test empty response handling for codes where we're likely to see them.
for code in [202, 204, 205]:
raw_response = _create_empty_response(code, {'header1': 'val1'})
data = convert_response_data_type(raw_response, return_type='snake')
assert isinstance(data, Response)
assert data.json_data is None
assert data.headers == {'header1': 'val1'}
def test_response_to_json_parsing_error():
raw_response = mock.MagicMock()
raw_response.json.side_effect = ValueError('Invalid json')
with pytest.raises(CivisClientError) as excinfo:
_response_to_json(raw_response)
assert 'Unable to parse JSON from response' in str(excinfo.value)
def test_convert_data_type_raw_unparsed():
response = _create_mock_response({}, {'header1': 'val1'})
data = convert_response_data_type(response, return_type='raw')
assert isinstance(data, requests.Response)
assert data.headers == {'header1': 'val1'}
def test_convert_data_type_raw_parsed():
response = {'foo': 'bar'}
data = convert_response_data_type(response, return_type='raw')
assert isinstance(data, dict)
assert data == {'foo': 'bar'}
@pytest.mark.skipif(not has_pandas, reason='pandas not installed')
def test_convert_data_type_pandas_series():
response = _create_mock_response({'foo': 'bar'}, None)
data = convert_response_data_type(response, return_type='pandas')
assert isinstance(data, pd.Series)
assert data.equals(pd.Series({'foo': 'bar'}))
@pytest.mark.skipif(not has_pandas, reason='pandas not installed')
def test_convert_data_type_pandas_df():
response = _create_mock_response([{'foo': 'bar'}], None)
data = convert_response_data_type(response, return_type='pandas')
assert isinstance(data, pd.DataFrame)
assert data.equals(
|
pd.DataFrame.from_records([{'foo': 'bar'}])
|
pandas.DataFrame.from_records
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 17:44:58 2018
@author: Yang
"""
from bs4 import BeautifulSoup
import pandas as pd
# prepare the table
import sqlite3
from sqlite3 import Error
conn = sqlite3.connect("hurricanes.db")
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS Hurricanes;")
cur = conn.cursor()
cur.execute("""
create table Hurricanes
(Year text,
tropical_storms text,
hurricanes text,
major_hurricanes text,
deaths text,
damage text,
notes text)
"""
)
conn.commit()
cur.close()
conn.close()
soup = BeautifulSoup( open("Atlantic hurricane season - Wikipedia.html",'r',encoding='utf-8'), "html.parser")
tables = soup(['table'])
for t,table in enumerate(tables):
headers = [header.text.replace("\n","") for header in table.find_all('th')]
print (headers)
rows = []
df_ = pd.DataFrame(columns=headers)
conn = sqlite3.connect("hurricanes.db")
for i,row in enumerate(table.find_all('tr')):
rows.append( [ str(val.text.encode('utf8')) for val in row.find_all('td')])
#rows.append( [val.text.encode('utf8') for val in row.find_all('td')])
rows[i] = [x.replace("b'","") for x in rows[i]]
rows[i] = [x.replace('b"',"") for x in rows[i]]
rows[i] = [x.replace('\\n"',"") for x in rows[i]]
rows[i] = [x.replace("\\n'","") for x in rows[i]]
rows[i] = [x.replace("'","") for x in rows[i]]
rows[i] = [x.replace("\\xe2\\x80\\x93","-") for x in rows[i]]
rows[i] = [x.replace("\\xe2\\x80\\xa2","-") for x in rows[i]]
rows[i] = [x.replace("\\xe2\\x89\\xa5",">=") for x in rows[i]]
rows[i] = [x.replace("\\xc2\\xa0"," ") for x in rows[i]]
if i>=1:
df_.loc[i] = rows[i]
year= stroms = hurricanes = mhurricanes = deaths = damage = notes = ""
if "Year" in df_: year = str(df_.loc[:,'Year'][i])
if "Number oftropical storms" in df_: stroms = str(df_.loc[:,'Number oftropical storms'][i])
if "Number ofhurricanes" in df_: hurricanes = str(df_.loc[:,'Number ofhurricanes'][i])
if "Number ofmajor hurricanes" in df_: mhurricanes = str(df_.loc[:,'Number ofmajor hurricanes'][i])
if "Deaths" in df_: deaths = str(df_.loc[:,'Deaths'][i])
if "DamageUSD" in df_: damage = str(df_.loc[:,'DamageUSD'][i])
if "Notes" in df_: notes = str(df_.loc[:,'Notes'][i])
new_row = [year,stroms , hurricanes ,mhurricanes , deaths , damage , notes]
sql = "insert into Hurricanes values ({0})".format(str(new_row)).replace('[','').replace("]","")
cur = conn.cursor()
cur.execute(sql)
print( sql )
conn.commit()
conn.close()
conn = sqlite3.connect("hurricanes.db")
df =
|
pd.read_sql_query("select * from Hurricanes;", conn)
|
pandas.read_sql_query
|
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras_preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from tensorflow.keras.applications import Xception
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
# Constants
DATA_DATE = '2020-05-17'
BATCH_SIZE = 32
IMG_HEIGHT = 224
IMG_WIDTH = 224
train_dir = '/Users/brianelinsky/Dropbox/ActiveProjects/modeling_projects/catdog/data/2020-05-17/train'
test_dir = '/Users/brianelinsky/Dropbox/ActiveProjects/modeling_projects/catdog/data/2020-05-17/test1'
# Get train filenames
train_filenames = os.listdir(train_dir)
# Get test filenames
test_filenames = os.listdir(test_dir)
test_filenames = sorted(test_filenames, key=lambda x: float(x[:-4]))
# Label each training image as dog or cat
categories = []
for filename in train_filenames:
category = filename.split('.')[0]
if category == 'dog':
categories.append('dog')
else:
categories.append('cat')
# Create training dataframe of filenames and labels
train_df = pd.DataFrame({
'filename': train_filenames,
'category': categories
})
# Create test dataframe for test filenames
test_df = pd.DataFrame({
'filename': test_filenames
})
# Split training data into train and validate datasets
train_df, validate_df = train_test_split(train_df, test_size=0.15, random_state=22)
# Calculate dataset sizes
train_data_count = len(train_df)
validate_data_count = len(validate_df)
test_data_count = len(test_df)
# Load a pre-trained CNN
conv_base = Xception(weights='imagenet',
include_top=False,
input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
print(conv_base.summary())
# Create generator to rescale images by 1/255
datagen = ImageDataGenerator(rescale=1. / 255)
def extract_features(df, sample_count):
features = np.zeros(shape=(sample_count, 7, 7, 2048))
labels = np.zeros(shape=sample_count)
generator = datagen.flow_from_dataframe(
df,
train_dir,
x_col='filename',
y_col='category',
target_size=(IMG_HEIGHT, IMG_WIDTH), # Resize images to target
color_mode='rgb',
class_mode='binary',
batch_size=BATCH_SIZE
)
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * BATCH_SIZE: (i + 1) * BATCH_SIZE] = features_batch
labels[i * BATCH_SIZE: (i + 1) * BATCH_SIZE] = labels_batch
i += 1
if i * BATCH_SIZE >= sample_count:
break
return features, labels
def extract_test_features(df, sample_count):
features = np.zeros(shape=(sample_count, 7, 7, 2048))
generator = datagen.flow_from_dataframe(
df,
test_dir,
x_col='filename',
target_size=(IMG_HEIGHT, IMG_WIDTH), # Resize images to target
color_mode='rgb',
class_mode=None,
shuffle=False,
batch_size=BATCH_SIZE
)
i = 0
for inputs_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * BATCH_SIZE: (i + 1) * BATCH_SIZE] = features_batch
i += 1
if i * BATCH_SIZE >= sample_count:
break
return features
# Use pre-trained CNN to extract features for train and validation datasets
train_features, train_labels = extract_features(train_df, train_data_count)
validation_features, validation_labels = extract_features(validate_df, validate_data_count)
# Extract features from test dataset
test_features = extract_test_features(test_df, test_data_count)
# Flatten train, test, and validate features so they can be fed into a neural network
train_features = np.reshape(train_features, (train_data_count, 7 * 7 * 2048))
validation_features = np.reshape(validation_features, (validate_data_count, 7 * 7 * 2048))
test_features = np.reshape(test_features, (test_data_count, 7 * 7 * 2048))
# CREATE 2x2 FACTORIAL DESIGN
def create_model(hasDropout):
model = Sequential()
model.add(Dense(256, activation='relu', input_dim=7 * 7 * 2048))
if hasDropout:
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
return model
model_dropout_30_epochs = create_model(True)
model_dropout_10_epochs = create_model(True)
model_no_dropout_30_epochs = create_model(False)
model_no_dropout_10_epochs = create_model(False)
models = [model_dropout_30_epochs, model_dropout_10_epochs, model_no_dropout_30_epochs, model_no_dropout_10_epochs]
for model in models:
model.compile(optimizer='RMSprop', loss='binary_crossentropy', metrics=['acc'])
def fit_model(model, num_epochs):
return model.fit(train_features, train_labels, epochs=num_epochs,
validation_data=(validation_features, validation_labels))
# Fit Models
history_dropout_30 = fit_model(model_dropout_30_epochs, 30)
history_dropout_10 = fit_model(model_dropout_10_epochs, 10)
history_no_dropout_30 = fit_model(model_no_dropout_30_epochs, 30)
history_no_dropout_10 = fit_model(model_no_dropout_10_epochs, 10)
def plot_learning_curve(history, name):
|
pd.DataFrame(history.history)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
results_df =
|
pd.read_csv('./csvs/results_from_mongo.csv')
|
pandas.read_csv
|
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pds
from datetime import datetime as dt
import datetime
import plotly.graph_objs as go
import plotly.express as px
from app import app, template
update_date = dt(2020, 3, 31)
d_trust = pds.read_csv('assets/d_trust.csv')
d_trust['hospitaladmissiondate'] = pds.to_datetime(d_trust['hospitaladmissiondate'], errors='coerce')
icu_forecast = pds.read_csv('assets/icu_risk_forecast.csv')
icu_forecast['hospitaladmissiondate'] = pds.to_datetime(icu_forecast['hospitaladmissiondate'], errors='coerce')
death_forecast = pds.read_csv('assets/death_risk_forecast.csv')
death_forecast['hospitaladmissiondate'] = pds.to_datetime(death_forecast['hospitaladmissiondate'], errors='coerce')
d_patient_age = pds.read_csv('assets/d_patient_age.csv')
d_patient_age['hospitaladmissiondate'] =
|
pds.to_datetime(d_patient_age['hospitaladmissiondate'], errors='coerce')
|
pandas.to_datetime
|
from .sourcehooks import SourceHook
import pandas as pd
from .lrseg import Lrseg
class Agency(SourceHook):
def __init__(self, sourcedata=None, metadata=None):
""" Methods for querying CAST data related to Agencies """
SourceHook.__init__(self, sourcedata=sourcedata, metadata=metadata)
self.lrseg = Lrseg(sourcedata=sourcedata, metadata=metadata)
def all_names(self, astype=pd.Series):
return self.grab_sourcetbl_column(tbl='TblAgency', col='agencycode', astype=astype)
def all_ids(self, astype=pd.Series):
return self.grab_sourcetbl_column(tbl='TblAgency', col='agencyid', astype=astype)
def ids_from_names(self, agencycodes=None):
"""
Args:
agencycodes (list, pd.Series, or pd.DataFrame):
Returns:
same type as input
"""
return self._map_using_sourcetbl(agencycodes, tbl='TblAgency',
fromcol='agencycode', tocol='agencyid')
def agencycodes_from_fullnames(self, fullnames=None):
"""
Args:
agencyfullnames (list, pd.Series, or pd.DataFrame):
Returns:
same type as input
"""
return self._map_using_sourcetbl(fullnames, tbl='TblAgency',
fromcol='agencyfullname', tocol='agencycode')
def ids_from_fullnames(self, fullnames=None):
"""
Args:
fullnames (list, pd.Series, or pd.DataFrame):
Returns:
same type as input
"""
return self._map_using_sourcetbl(fullnames, tbl='TblAgency',
fromcol='agencyfullname', tocol='agencyid')
def fullnames_from_ids(self, ids=None):
"""
Args:
ids (list, pd.Series, or pd.DataFrame):
Returns:
same type as input
"""
return self._map_using_sourcetbl(ids, tbl='TblAgency',
fromcol='agencyid', tocol='agencyfullname')
def append_agencyid_to_lrsegids(self, lrsegids=None):
return self.append_column_to_table(lrsegids, sourcetbl='TblLandRiverSegmentAgency',
commoncol='lrsegid', appendcol='agencyid')
def agencycodes_from_lrsegnames(self, lrsegnames=None):
if not isinstance(lrsegnames, list):
lrsegnames = lrsegnames.tolist()
# self.__ids_from_names(idtype='lrseg', names=lrsegnames)
lrsegids = self.lrseg.ids_from_names(names=lrsegnames)
return self.agencycodes_from_lrsegids(lrsegids=lrsegids)
def agencycodes_from_lrsegids(self, lrsegids=None):
backtolist = False
if isinstance(lrsegids, list):
backtolist = True
lrsegids =
|
pd.DataFrame(lrsegids, columns=['lrsegid'])
|
pandas.DataFrame
|
__version__ = '0.1.3'
__maintainer__ = '<NAME> 31.12.2019'
__contributors__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>'
__birthdate__ = '31.12.2019'
__status__ = 'dev' # options are: dev, test, prod
#----- imports & packages ------
if __package__ is None or __package__ == '':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(__file__))))
import pprint
import pandas as pd
import numpy as np
import warnings
from pathlib import Path
from zipfile import ZipFile
class DataParser:
def __init__(self, configDict: dict, datasetID: str, loadEncrypted=False):
"""
Basic class for parsing a mobility survey trip data set. Currently the both German travel surveys MiD 2008 and
MiD 2017 are pre-configured and one of the two can be given (default: MiD 2017).
The data set can be provided from an encrypted file on a server in which case the link to the ZIP-file as well
as a link to the file within the ZIP-file have to be supplied in the globalConfig and a password has to be
supplied in the parseConfig.
Columns relevant for the EV simulation are selected from the entirety of the data and renamed to VencoPy
internal variable names given in the dictionary parseConfig['dataVariables'] for the respective survey data set.
Manually configured exclude, include, greaterThan and smallerThan filters are applied as they are specified in
parseConfig. For some columns, raw data is transferred to human readable strings and respective columns are
added. Pandas timestamp columns are synthesized from the given trip start and trip end time information.
:param configDict: A dictionary containing multiple yaml config files
:param datasetID: Currently, MiD08 and MiD17 are implemented as travel survey data sets
:param loadEncrypted: If True, load an encrypted ZIP file as specified in parseConfig
"""
self.parseConfig = configDict['parseConfig']
self.globalConfig = configDict['globalConfig']
self.localPathConfig = configDict['localPathConfig']
self.datasetID = self.checkDatasetID(datasetID, self.parseConfig)
self.rawDataPath = Path(self.localPathConfig['pathAbsolute'][self.datasetID]) / self.globalConfig['files'][self.datasetID]['tripsDataRaw']
self.subDict = {}
self.rawData = None
self.data = None
self.__filterDict = {}
self.columns = self.compileVariableList()
self.filterDictNameList = ['include', 'exclude', 'greaterThan', 'smallerThan']
self.updateFilterDict()
print('Parsing properties set up')
if loadEncrypted:
print(f"Starting to retrieve encrypted data file from "
f"{self.globalConfig['pathAbsolute']['encryptedZipfile']}")
self.loadEncryptedData(pathToZip=Path(self.globalConfig['pathAbsolute']['encryptedZipfile']) /
self.globalConfig['files'][self.datasetID]['encryptedZipFileB2'],
pathInZip=self.globalConfig['files'][self.datasetID]['tripDataZipFileRaw'])
else:
print(f"Starting to retrieve local data file from {self.rawDataPath}")
self.loadData()
def updateFilterDict(self) -> None:
"""
Internal function to parse the filter dictionary of a specified data set from parseConfig.yaml
:return: None
"""
self.__filterDict[self.datasetID] = self.parseConfig['filterDicts'][self.datasetID]
self.__filterDict[self.datasetID] = {iKey: iVal for iKey, iVal in self.__filterDict[self.datasetID].items() if self.__filterDict[self.datasetID][iKey] is not
None}
def checkDatasetID(self, datasetID: str, parseConfig: dict) -> str:
"""
General check if data set ID is defined in parseConfig.yaml
:param datasetID: list of strings declaring the datasetIDs to be read in
:param parseConfig: A yaml config file holding a dictionary with the keys 'pathRelative' and 'pathAbsolute'
:return: Returns a string value of a mobility data
"""
availableDatasetIDs = parseConfig['dataVariables']['datasetID']
assert datasetID in availableDatasetIDs, \
f'Defined datasetID {datasetID} not specified under dataVariables in parseConfig. Specified datasetIDs ' \
f'are {availableDatasetIDs}'
return datasetID
def compileVariableList(self) -> list:
"""
Clean up the replacement dictionary of raw data file variable (column) names. This has to be done because some
variables that may be relevant for the analysis later on are only contained in one raw data set while not
contained in another one. E.g. if a trip is an intermodal trip was only assessed in the MiD 2017 while it wasn't
in the MiD 2008. This has to be mirrored by the filter dict for the respective data set.
:return: List of variables
"""
listIndex = self.parseConfig['dataVariables']['datasetID'].index(self.datasetID)
variables = [val[listIndex] if not val[listIndex] == 'NA' else 'NA' for key, val in
self.parseConfig['dataVariables'].items()]
variables.remove(self.datasetID)
self.removeNA(variables)
return variables
def removeNA(self, variables: list):
"""
Removes all strings that can be capitalized to 'NA' from the list of variables
:param variables: List of variables of the mobility dataset
:return: Returns a list with non NA values
"""
vars = [iVar.upper() for iVar in variables]
counter = 0
for idx, iVar in enumerate(vars):
if iVar == 'NA':
del variables[idx - counter]
counter += 1
def loadData(self):
"""
Loads data specified in self.rawDataPath and stores it in self.rawData. Raises an exception if a invalid suffix
is specified in self.rawDataPath. READ IN OF CSV HAS NOT BEEN EXTENSIVELY TESTED BEFORE BETA RELEASE.
:return: None
"""
# Future releases: Are potential error messages (.dta not being a stata file even as the ending matches)
# readable for the user? Should we have a manual error treatment here?
if self.rawDataPath.suffix == '.dta':
self.rawData = pd.read_stata(self.rawDataPath, convert_categoricals=False, convert_dates=False,
preserve_dtypes=False)
# This has not been tested before the beta release
elif self.rawDataPath.suffix == '.csv':
self.rawData = pd.read_csv(self.rawDataPath)
else:
Exception(f"Data type {self.rawDataPath.suffix} not yet specified. Available types so far are .dta and "
f".csv")
print(f'Finished loading {len(self.rawData)} rows of raw data of type {self.rawDataPath.suffix}')
def loadEncryptedData(self, pathToZip, pathInZip):
"""
Since the MiD data sets are only accessible by an extensive data security contract, VencoPy provides the
possibility to access encrypted zip files. An encryption password has to be given in parseConfig.yaml in order
to access the encrypted file. Loaded data is stored in self.rawData
:param pathToZip: path from current working directory to the zip file or absolute path to zipfile
:param pathInZip: Path to trip data file within the encrypted zipfile
:return: None
"""
with ZipFile(pathToZip) as myzip:
if '.dta' in pathInZip:
self.rawData = pd.read_stata(myzip.open(pathInZip, pwd=bytes(self.parseConfig['encryptionPW'],
encoding='utf-8')),
convert_categoricals=False, convert_dates=False, preserve_dtypes=False)
else: # if '.csv' in pathInZip:
self.rawData = pd.read_csv(myzip.open(pathInZip, pwd=bytes(self.parseConfig['encryptionPW'],
encoding='utf-8')), sep=';', decimal=',')
print(f'Finished loading {len(self.rawData)} rows of raw data of type {self.rawDataPath.suffix}')
def selectColumns(self):
"""
Function to filter the rawData for only relevant columns as specified by parseConfig and cleaned in
self.compileVariablesList(). Stores the subset of data in self.data
:return: None
"""
self.data = self.rawData.loc[:, self.columns]
def harmonizeVariables(self):
"""
Harmonizes the input data variables to match internal VencoPy names given as specified in the mapping in
parseConfig['dataVariables']. So far mappings for MiD08 and MiD17 are given. Since the MiD08 doesn't provide
a combined household and person unique identifier, it is synthesized of the both IDs.
:return: None
"""
replacementDict = self.createReplacementDict(self.datasetID, self.parseConfig['dataVariables'])
dataRenamed = self.data.rename(columns=replacementDict)
if self.datasetID == 'MiD08':
dataRenamed['hhPersonID'] = (dataRenamed['hhID'].astype('string') +
dataRenamed['personID'].astype('string')).astype('int')
self.data = dataRenamed
print('Finished harmonization of variables')
def createReplacementDict(self, datasetID: str, dictRaw: dict) -> dict:
"""
Creates the mapping dictionary from raw data variable names to VencoPy internal variable names as specified
in parseConfig.yaml for the specified data set.
:param datasetID: list of strings declaring the datasetIDs to be read in
:param dictRaw: Contains dictionary of the raw data
:return: Dictionary with internal names as keys and raw data column names as values.
"""
if datasetID in dictRaw['datasetID']:
listIndex = dictRaw['datasetID'].index(datasetID)
return {val[listIndex]: key for (key, val) in dictRaw.items()}
else:
raise ValueError(f'Data set {datasetID} not specified in parseConfig variable dictionary.')
def convertTypes(self):
"""
Convert raw column types to predefined python types as specified in parseConfig['inputDTypes'][datasetID]. This is mainly
done for performance reasons. But also in order to avoid index values that are of type int to be cast to float.
The function operates only on self.data and writes back changes to self.data
:return: None
"""
# Filter for dataset specific columns
conversionDict = self.parseConfig['inputDTypes'][self.datasetID]
keys = {iCol for iCol in conversionDict.keys() if iCol in self.data.columns}
self.subDict = {key: conversionDict[key] for key in conversionDict.keys() & keys}
self.data = self.data.astype(self.subDict)
def returnDictBottomValues(self, baseDict: dict, lst: list = []) -> list:
"""
Returns a list of all dictionary values of the last dictionary level (the bottom) of baseDict. The parameter
lst is used as an interface between recursion levels.
:param baseDict: Dictionary of variables
:param lst: empty list, is used as interface to next recursion
:return: Returns a list with all the bottom dictionary values
"""
for iKey, iVal in baseDict.items():
if isinstance(iVal, dict):
lst = self.returnDictBottomValues(iVal, lst)
else:
if iVal is not None:
lst.append(iVal)
return lst
def checkFilterDict(self):
"""
Checking if all values of filter dictionaries are of type list. Currently only checking if list of list str
not typechecked all(map(self.__checkStr, val). Conditionally triggers an assert.
:return: None
"""
assert all(isinstance(val, list) for val in self.returnDictBottomValues(self.__filterDict[self.datasetID])), \
f'All values in filter dictionaries have to be lists, but are not'
def returnDictBottomKeys(self, baseDict: dict, lst: list = None) -> list:
"""
Returns the lowest level keys of baseDict and returns all of them as a list. The parameter lst is used as
interface between recursion levels.
:param baseDict: Dictionary of variables
:param lst: empty list, used as interface between recursion levels
:return: Returns a list with all the bottom level dictionary keys
"""
if lst is None:
lst = []
for iKey, iVal in baseDict.items():
if isinstance(iVal, dict):
lst = self.returnDictBottomKeys(iVal, lst)
else:
if iVal is not None:
lst.append(iKey)
return lst
def filter(self):
"""
Wrapper function to carry out filtering for the four filter logics of including, excluding, greaterThan and
smallerThan. If a filterDict is defined with a different key, a warning is thrown. The function operates on
self.data class-internally.
:return: None
"""
print(f'Starting filtering, applying {len(self.returnDictBottomKeys(self.__filterDict[self.datasetID]))} filters.')
ret = pd.DataFrame(index=self.data.index)
# Future releases: as discussed before we could indeed work here with a plug and pray approach.
# we would need to introduce a filter manager and a folder structure where to look for filters.
# this is very similar code than the one from ioproc. If we want to go down this route we should
# take inspiration from the code there. It was not easy to get it right in the first place. This
# might be easy to code but hard to implement correctly.
for iKey, iVal in self.__filterDict[self.datasetID].items():
if iKey == 'include':
ret = ret.join(self.setIncludeFilter(iVal, self.data.index))
elif iKey == 'exclude':
ret = ret.join(self.setExcludeFilter(iVal, self.data.index))
elif iKey == 'greaterThan':
ret = ret.join(self.setGreaterThanFilter(iVal, self.data.index))
elif iKey == 'smallerThan':
ret = ret.join(self.setSmallerThanFilter(iVal, self.data.index))
else:
warnings.warn(f'A filter dictionary was defined in the parseConfig with an unknown filtering key. '
f'Current filtering keys comprise include, exclude, smallerThan and greaterThan.'
f'Continuing with ignoring the dictionary {iKey}')
self.data = self.data[ret.all(axis='columns')]
self.filterAnalysis(ret)
def setIncludeFilter(self, includeFilterDict: dict, dataIndex) -> pd.DataFrame:
"""
Read-in function for include filter dict from parseConfig.yaml
:param includeFilterDict: Dictionary of include filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return: Returns a data frame with individuals using car as a mode of transport
"""
incFilterCols = pd.DataFrame(index=dataIndex, columns=includeFilterDict.keys())
for incCol, incElements in includeFilterDict.items():
incFilterCols[incCol] = self.data[incCol].isin(incElements)
return incFilterCols
def setExcludeFilter(self, excludeFilterDict: dict, dataIndex) -> pd.DataFrame:
"""
Read-in function for exclude filter dict from parseConfig.yaml
:param excludeFilterDict: Dictionary of exclude filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return: Returns a filtered data frame with exclude filters
"""
exclFilterCols = pd.DataFrame(index=dataIndex, columns=excludeFilterDict.keys())
for excCol, excElements in excludeFilterDict.items():
exclFilterCols[excCol] = ~self.data[excCol].isin(excElements)
return exclFilterCols
def setGreaterThanFilter(self, greaterThanFilterDict: dict, dataIndex):
"""
Read-in function for greaterThan filter dict from parseConfig.yaml
:param greaterThanFilterDict: Dictionary of greater than filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return:
"""
greaterThanFilterCols = pd.DataFrame(index=dataIndex, columns=greaterThanFilterDict.keys())
for greaterCol, greaterElements in greaterThanFilterDict.items():
greaterThanFilterCols[greaterCol] = self.data[greaterCol] >= greaterElements.pop()
if len(greaterElements) > 0:
warnings.warn(f'You specified more than one value as lower limit for filtering column {greaterCol}.'
f'Only considering the last element given in the parseConfig.')
return greaterThanFilterCols
def setSmallerThanFilter(self, smallerThanFilterDict: dict, dataIndex) -> pd.DataFrame:
"""
Read-in function for smallerThan filter dict from parseConfig.yaml
:param smallerThanFilterDict: Dictionary of smaller than filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return: Returns a data frame of trips covering a distance of less than 1000 km
"""
smallerThanFilterCols = pd.DataFrame(index=dataIndex, columns=smallerThanFilterDict.keys())
for smallerCol, smallerElements in smallerThanFilterDict.items():
smallerThanFilterCols[smallerCol] = self.data[smallerCol] <= smallerElements.pop()
if len(smallerElements) > 0:
warnings.warn(f'You specified more than one value as upper limit for filtering column {smallerCol}.'
f'Only considering the last element given in the parseConfig.')
return smallerThanFilterCols
def filterAnalysis(self, filterData: pd.DataFrame):
"""
Function supplies some aggregate info of the data after filtering to the user Function does not change any
class attributes
:param filterData:
:return: None
"""
lenData = sum(filterData.all(axis='columns'))
boolDict = {iCol: sum(filterData[iCol]) for iCol in filterData}
print(f'The following values were taken into account after filtering:')
pprint.pprint(boolDict)
print(f"All filters combined yielded a total of {lenData} was taken into account")
print(f'This corresponds to {lenData / len(filterData)* 100} percent of the original data')
def filterConsistentHours(self):
"""
Filtering out records where starting hour is after end hour but trip takes place on the same day.
These observations are data errors.
:return: No returns, operates only on the class instance
"""
if self.datasetID == 'MiD17' or self.datasetID == 'MiD08':
dat = self.data
self.data = dat.loc[(dat['tripStartClock'] <= dat['tripEndClock']) | (dat['tripEndNextDay'] == 1), :]
# If we want to get rid of tripStartClock and tripEndClock (they are redundant variables)
# self.data = dat.loc[pd.to_datetime(dat.loc[:, 'tripStartHour']) <= pd.to_datetime(dat.loc[:, 'tripEndHour']) |
# (dat['tripEndNextDay'] == 1), :]
def addStrColumnFromVariable(self, colName: str, varName: str):
"""
Replaces each occurence of a MiD/KiD variable e.g. 1,2,...,7 for weekdays with an explicitly mapped string e.g.
'MON', 'TUE',...,'SUN'.
:param colName: Name of the column in self.data where the explicit string info is stored
:param varName: Name of the VencoPy internal variable given in config/parseConfig['dataVariables']
:return: None
"""
self.data.loc[:, colName] \
= self.data.loc[:, varName].replace(self.parseConfig['Replacements'][self.datasetID][varName])
def addStrColumns(self, weekday=True, purpose=True):
"""
Adds string columns for either weekday or purpose.
:param weekday: Boolean identifier if weekday string info should be added in a separate column
:param purpose: Boolean identifier if purpose string info should be added in a separate column
:return: None
"""
if weekday:
self.addStrColumnFromVariable(colName='weekdayStr', varName='tripStartWeekday')
if purpose:
self.addStrColumnFromVariable(colName='purposeStr', varName='tripPurpose')
def composeTimestamp(self, data: pd.DataFrame = None,
colYear: str = 'tripStartYear',
colWeek: str = 'tripStartWeek',
colDay: str = 'tripStartWeekday',
colHour: str = 'tripStartHour',
colMin: str = 'tripStartMinute',
colName: str = 'timestampStart') -> np.datetime64:
"""
:param data: a data frame
:param colYear: year of start of a particular trip
:param colWeek: week of start of a particular trip
:param colDay: weekday of start of a particular trip
:param colHour: hour of start of a particular trip
:param colMin: minute of start of a particular trip
:param colName:
:return: Returns a detailed time stamp
"""
data[colName] = pd.to_datetime(data.loc[:, colYear], format='%Y') + \
pd.to_timedelta(data.loc[:, colWeek] * 7, unit='days') + \
pd.to_timedelta(data.loc[:, colDay], unit='days') + \
pd.to_timedelta(data.loc[:, colHour], unit='hour') + \
pd.to_timedelta(data.loc[:, colMin], unit='minute')
# return data
def composeStartAndEndTimestamps(self):
"""
:return: Returns start and end time of a trip
"""
self.composeTimestamp(data=self.data) # Starting timestamp
self.composeTimestamp(data=self.data, # Ending timestamps
colHour='tripEndHour',
colMin='tripEndMinute',
colName='timestampEnd')
def updateEndTimestamp(self):
"""
:return:
"""
endsFollowingDay = self.data['tripEndNextDay'] == 1
self.data.loc[endsFollowingDay, 'timestampEnd'] = self.data.loc[endsFollowingDay,
'timestampEnd'] + pd.offsets.Day(1)
def updateEndTimestamps(self) -> np.datetime64:
"""
:return: Returns start and end time of a trip
"""
self.updateEndTimestamp()
def harmonizeVariablesGenericIdNames(self):
"""
"""
self.data['genericID'] = self.data[str(self.parseConfig['IDVariablesNames'][self.datasetID])]
print('Finished harmonization of ID variables')
def process(self):
"""
Wrapper function for harmonising and filtering the dataset.
"""
self.selectColumns()
self.harmonizeVariables()
self.convertTypes()
self.checkFilterDict()
self.filter()
self.filterConsistentHours()
self.addStrColumns()
self.composeStartAndEndTimestamps()
self.updateEndTimestamps()
self.harmonizeVariablesGenericIdNames()
print('Parsing completed')
class ParseMiD(DataParser):
# Inherited data class to differentiate between abstract interfaces such as vencopy internal
# variable namings and data set specific functions such as filters etc. Currently not used (06/14/2021)
pass
class ParseKiD(DataParser):
# Inherited data class to differentiate between abstract interfaces such as vencopy internal
# variable namings and data set specific functions such as filters etc.
def __init__(self, configDict: dict, datasetID: str):
super().__init__(configDict, datasetID)
def loadData(self):
rawDataPathTrips = Path(configDict['localPathConfig']['pathAbsolute'][self.datasetID]) / configDict['globalConfig']['files'][self.datasetID]['tripsDataRaw']
rawDataPathVehicles = Path(configDict['localPathConfig']['pathAbsolute'][self.datasetID]) / configDict['globalConfig']['files'][self.datasetID]['vehiclesDataRaw']
rawDataTrips = pd.read_stata(rawDataPathTrips, convert_categoricals=False, convert_dates=False,
preserve_dtypes=False)
rawDataVehicles = pd.read_stata(rawDataPathVehicles, convert_categoricals=False, convert_dates=False,
preserve_dtypes=False)
rawDataVehicles.set_index('k00', inplace=True)
rawData = rawDataTrips.join(rawDataVehicles, on='k00')
self.rawData = rawData
print(f'Finished loading {len(self.rawData)} rows of raw data of type .dta')
def addStrColumns(self, weekday=True, purpose=True):
"""
Adds string columns for either weekday or purpose.
:param weekday: Boolean identifier if weekday string info should be added in a separate column
:param purpose: Boolean identifier if purpose string info should be added in a separate column
:return: None
"""
# from tripStartDate retrieve tripStartWeekday, tripStartWeek, tripStartYear, tripStartMonth, tripStartDay
# from tripStartClock retrieve tripStartHour, tripStartMinute
# from tripEndClock retrieve tripEndHour, tripEndMinute
self.data['tripStartDate'] =
|
pd.to_datetime(self.data['tripStartDate'], format='%d.%m.%Y')
|
pandas.to_datetime
|
import unittest
import platform
import random
import string
import platform
import pandas as pd
import numpy as np
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs, count_parfor_OneDs,
count_array_OneDs, dist_IR_contains, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba.config import IS_32BITS
@hpat.jit
def inner_get_column(df):
# df2 = df[['A', 'C']]
# df2['D'] = np.ones(3)
return df.A
COL_IND = 0
class TestDataFrame(unittest.TestCase):
def test_create1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_create_cond1(self):
def test_impl(A, B, c):
if c:
df = pd.DataFrame({'A': A})
else:
df = pd.DataFrame({'A': B})
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.ones(n)
B = np.arange(n) + 1.0
c = 0
pd.testing.assert_series_equal(hpat_func(A, B, c), test_impl(A, B, c))
c = 2
pd.testing.assert_series_equal(hpat_func(A, B, c), test_impl(A, B, c))
@unittest.skip('Implement feature to create DataFrame without column names')
def test_create_without_column_names(self):
def test_impl():
df = pd.DataFrame([100, 200, 300, 400, 200, 100])
return df
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(), test_impl())
def test_unbox1(self):
def test_impl(df):
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.random.ranf(n)})
pd.testing.assert_series_equal(hpat_func(df), test_impl(df))
@unittest.skip("needs properly refcounted dataframes")
def test_unbox2(self):
def test_impl(df, cond):
n = len(df)
if cond:
df['A'] = np.arange(n) + 2.0
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
pd.testing.assert_series_equal(hpat_func(df.copy(), True), test_impl(df.copy(), True))
pd.testing.assert_series_equal(hpat_func(df.copy(), False), test_impl(df.copy(), False))
@unittest.skip('Implement feature to create DataFrame without column names')
def test_unbox_without_column_names(self):
def test_impl(df):
return df
df = pd.DataFrame([100, 200, 300, 400, 200, 100])
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_box1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_box2(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'bb', 'ccc']})
return df
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(), test_impl())
@unittest.skip("pending df filter support")
def test_box3(self):
def test_impl(df):
df = df[df.A != 'dd']
return df
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_box_categorical(self):
def test_impl(df):
df['A'] = df['A'] + 1
return df
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3],
'B': pd.Series(['N', 'Y', 'Y'],
dtype=pd.api.types.CategoricalDtype(['N', 'Y']))})
pd.testing.assert_frame_equal(hpat_func(df.copy(deep=True)), test_impl(df))
def test_box_dist_return(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df
hpat_func = hpat.jit(distributed={'df'})(test_impl)
n = 11
hres, res = hpat_func(n), test_impl(n)
self.assertEqual(count_array_OneDs(), 3)
self.assertEqual(count_parfor_OneDs(), 2)
dist_sum = hpat.jit(
lambda a: hpat.distributed_api.dist_reduce(
a, np.int32(hpat.distributed_api.Reduce_Type.Sum.value)))
dist_sum(1) # run to compile
np.testing.assert_allclose(dist_sum(hres.A.sum()), res.A.sum())
np.testing.assert_allclose(dist_sum(hres.B.sum()), res.B.sum())
def test_len1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.random.ranf(n)})
return len(df)
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_shape1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.random.ranf(n)})
return df.shape
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_column_getitem1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df['A'].values
return Ac.sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
def test_column_list_getitem1(self):
def test_impl(df):
return df[['A', 'C']]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame(
{'A': np.arange(n), 'B': np.ones(n), 'C': np.random.ranf(n)})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_filter1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + n, 'B': np.arange(n)**2})
df1 = df[df.A > .5]
return df1.B.sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_filter2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + n, 'B': np.arange(n)**2})
df1 = df.loc[df.A > .5]
return np.sum(df1.B)
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_filter3(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + n, 'B': np.arange(n)**2})
df1 = df.iloc[(df.A > .5).values]
return np.sum(df1.B)
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_iloc1(self):
def test_impl(df, n):
return df.iloc[1:n].B.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df, n), test_impl(df, n))
def test_iloc2(self):
def test_impl(df, n):
return df.iloc[np.array([1, 4, 9])].B.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df, n), test_impl(df, n))
def test_iloc3(self):
def test_impl(df):
return df.iloc[:, 1].values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
@unittest.skip("TODO: support A[[1,2,3]] in Numba")
def test_iloc4(self):
def test_impl(df, n):
return df.iloc[[1, 4, 9]].B.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df, n), test_impl(df, n))
def test_iloc5(self):
# test iloc with global value
def test_impl(df):
return df.iloc[:, COL_IND].values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_loc1(self):
def test_impl(df):
return df.loc[:, 'B'].values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_iat1(self):
def test_impl(n):
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
return df.iat[3, 1]
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_iat2(self):
def test_impl(df):
return df.iat[3, 1]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
self.assertEqual(hpat_func(df), test_impl(df))
def test_iat3(self):
def test_impl(df, n):
return df.iat[n - 1, 1]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
self.assertEqual(hpat_func(df, n), test_impl(df, n))
def test_iat_set1(self):
def test_impl(df, n):
df.iat[n - 1, 1] = n**2
return df.A # return the column to check column aliasing
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
df2 = df.copy()
pd.testing.assert_series_equal(hpat_func(df, n), test_impl(df2, n))
def test_iat_set2(self):
def test_impl(df, n):
df.iat[n - 1, 1] = n**2
return df # check df aliasing/boxing
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
df2 = df.copy()
pd.testing.assert_frame_equal(hpat_func(df, n), test_impl(df2, n))
def test_set_column1(self):
# set existing column
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.arange(n) + 3.0})
df['A'] = np.arange(n)
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_set_column_reflect4(self):
# set existing column
def test_impl(df, n):
df['A'] = np.arange(n)
hpat_func = hpat.jit(test_impl)
n = 11
df1 = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.arange(n) + 3.0})
df2 = df1.copy()
hpat_func(df1, n)
test_impl(df2, n)
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(df1, df2, check_dtype=do_check)
def test_set_column_new_type1(self):
# set existing column with a new type
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n) + 3.0})
df['A'] = np.arange(n)
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_set_column2(self):
# create new column
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n) + 1.0})
df['C'] = np.arange(n)
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_set_column_reflect3(self):
# create new column
def test_impl(df, n):
df['C'] = np.arange(n)
hpat_func = hpat.jit(test_impl)
n = 11
df1 = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.arange(n) + 3.0})
df2 = df1.copy()
hpat_func(df1, n)
test_impl(df2, n)
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(df1, df2, check_dtype=do_check)
def test_set_column_bool1(self):
def test_impl(df):
df['C'] = df['A'][df['B']]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3], 'B': [True, False, True]})
df2 = df.copy()
test_impl(df2)
hpat_func(df)
pd.testing.assert_series_equal(df.C, df2.C)
def test_set_column_reflect1(self):
def test_impl(df, arr):
df['C'] = arr
return df.C.sum()
hpat_func = hpat.jit(test_impl)
n = 11
arr = np.random.ranf(n)
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
hpat_func(df, arr)
self.assertIn('C', df)
np.testing.assert_almost_equal(df.C.values, arr)
def test_set_column_reflect2(self):
def test_impl(df, arr):
df['C'] = arr
return df.C.sum()
hpat_func = hpat.jit(test_impl)
n = 11
arr = np.random.ranf(n)
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df2 = df.copy()
np.testing.assert_almost_equal(hpat_func(df, arr), test_impl(df2, arr))
def test_df_values1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df.values
hpat_func = hpat.jit(test_impl)
n = 11
np.testing.assert_array_equal(hpat_func(n), test_impl(n))
def test_df_values2(self):
def test_impl(df):
return df.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_df_values_parallel1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df.values.sum()
hpat_func = hpat.jit(test_impl)
n = 11
np.testing.assert_array_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_df_apply(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)})
B = df.apply(lambda r: r.A + r.B, axis=1)
return df.B.sum()
n = 121
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
def test_df_apply_branch(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)})
B = df.apply(lambda r: r.A < 10 and r.B > 20, axis=1)
return df.B.sum()
n = 121
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
def test_df_describe(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float32),
'B': np.arange(n)})
#df.A[0:1] = np.nan
return df.describe()
hpat_func = hpat.jit(test_impl)
n = 1001
hpat_func(n)
# XXX: test actual output
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_sort_values(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.B.values
n = 1211
np.random.seed(2)
df = pd.DataFrame({'A': np.random.ranf(n), 'B': np.arange(n), 'C': np.random.ranf(n)})
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df.copy()), test_impl(df))
def test_sort_values_copy(self):
def test_impl(df):
df2 = df.sort_values('A')
return df2.B.values
n = 1211
np.random.seed(2)
df = pd.DataFrame({'A': np.random.ranf(n), 'B': np.arange(n), 'C': np.random.ranf(n)})
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df.copy()), test_impl(df))
def test_sort_values_single_col(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.A.values
n = 1211
np.random.seed(2)
df = pd.DataFrame({'A': np.random.ranf(n)})
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df.copy()), test_impl(df))
def test_sort_values_single_col_str(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.A.values
n = 1211
random.seed(2)
str_vals = []
for _ in range(n):
k = random.randint(1, 30)
val = ''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
str_vals.append(val)
df = pd.DataFrame({'A': str_vals})
hpat_func = hpat.jit(test_impl)
self.assertTrue((hpat_func(df.copy()) == test_impl(df)).all())
def test_sort_values_str(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.B.values
n = 1211
random.seed(2)
str_vals = []
str_vals2 = []
for i in range(n):
k = random.randint(1, 30)
val = ''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
str_vals.append(val)
val = ''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
str_vals2.append(val)
df = pd.DataFrame({'A': str_vals, 'B': str_vals2})
# use mergesort for stability, in str generation equal keys are more probable
sorted_df = df.sort_values('A', inplace=False, kind='mergesort')
hpat_func = hpat.jit(test_impl)
self.assertTrue((hpat_func(df) == sorted_df.B.values).all())
def test_sort_parallel_single_col(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
# TODO: better parallel sort test
def test_impl():
df = pd.read_parquet('kde.parquet')
df.sort_values('points', inplace=True)
res = df.points.values
return res
hpat_func = hpat.jit(locals={'res:return': 'distributed'})(test_impl)
save_min_samples = hpat.hiframes.sort.MIN_SAMPLES
try:
hpat.hiframes.sort.MIN_SAMPLES = 10
res = hpat_func()
self.assertTrue((np.diff(res) >= 0).all())
finally:
# restore global val
hpat.hiframes.sort.MIN_SAMPLES = save_min_samples
def test_df_isna1(self):
'''Verify DataFrame.isna implementation for various types of data'''
def test_impl(df):
return df.isna()
hpat_func = hpat.jit(test_impl)
# TODO: add column with datetime values when test_series_datetime_isna1 is fixed
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0],
'B': [np.inf, 5, np.nan, 6],
'C': ['aa', 'b', None, 'ccc'],
'D': [None, 'dd', '', None]})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_astype_str1(self):
'''Verifies DataFrame.astype implementation converting various types to string'''
def test_impl(df):
return df.astype(str)
hpat_func = hpat.jit(test_impl)
# TODO: add column with float values when test_series_astype_float_to_str1 is fixed
df = pd.DataFrame({'A': [-1, 2, 11, 5, 0, -7],
'B': ['aa', 'bb', 'cc', 'dd', '', 'fff']
})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_astype_float1(self):
'''Verifies DataFrame.astype implementation converting various types to float'''
def test_impl(df):
return df.astype(np.float64)
hpat_func = hpat.jit(test_impl)
# TODO: uncomment column with string values when test_series_astype_str_to_float64 is fixed
df = pd.DataFrame({'A': [-1, 2, 11, 5, 0, -7],
# 'B': ['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'],
'C': [3.24, 1E+05, -1, -1.3E-01, np.nan, np.inf]
})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_astype_int1(self):
'''Verifies DataFrame.astype implementation converting various types to int'''
def test_impl(df):
return df.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 6
# TODO: uncomment column with string values when test_series_astype_str_to_int32 is fixed
df = pd.DataFrame({'A': np.ones(n, dtype=np.int64),
'B': np.arange(n, dtype=np.int32),
# 'C': ['-1', '2', '3', '0', '-7', '99'],
'D': np.arange(float(n), dtype=np.float32)
})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_sort_parallel(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
# TODO: better parallel sort test
def test_impl():
df = pd.read_parquet('kde.parquet')
df['A'] = df.points.astype(np.float64)
df.sort_values('points', inplace=True)
res = df.A.values
return res
hpat_func = hpat.jit(locals={'res:return': 'distributed'})(test_impl)
save_min_samples = hpat.hiframes.sort.MIN_SAMPLES
try:
hpat.hiframes.sort.MIN_SAMPLES = 10
res = hpat_func()
self.assertTrue((np.diff(res) >= 0).all())
finally:
# restore global val
hpat.hiframes.sort.MIN_SAMPLES = save_min_samples
def test_itertuples(self):
def test_impl(df):
res = 0.0
for r in df.itertuples():
res += r[1]
return res
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.ones(n, np.int64)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_itertuples_str(self):
def test_impl(df):
res = ""
for r in df.itertuples():
res += r[1]
return res
hpat_func = hpat.jit(test_impl)
n = 3
df = pd.DataFrame({'A': ['aa', 'bb', 'cc'], 'B': np.ones(n, np.int64)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_itertuples_order(self):
def test_impl(n):
res = 0.0
df = pd.DataFrame({'B': np.arange(n), 'A': np.ones(n, np.int64)})
for r in df.itertuples():
res += r[1]
return res
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_itertuples_analysis(self):
"""tests array analysis handling of generated tuples, shapes going
through blocks and getting used in an array dimension
"""
def test_impl(n):
res = 0
df = pd.DataFrame({'B': np.arange(n), 'A': np.ones(n, np.int64)})
for r in df.itertuples():
if r[1] == 2:
A = np.ones(r[1])
res += len(A)
return res
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
@unittest.skipIf(platform.system() == 'Windows', "Attribute 'dtype' are different int64 and int32")
def test_df_head1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df.head(3)
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n))
def test_pct_change1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.pct_change(3)
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n))
def test_mean1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.mean()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_median1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': 2 ** np.arange(n), 'B': np.arange(n) + 1.0})
return df.median()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_std1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.std()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_var1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.var()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_max1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.max()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_min1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.min()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_sum1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.sum()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_prod1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.prod()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_count1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.count()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_df_fillna1(self):
def test_impl(df):
return df.fillna(5.0)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_fillna_str1(self):
def test_impl(df):
return df.fillna("dd")
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_fillna_inplace1(self):
def test_impl(A):
A.fillna(11.0, inplace=True)
return A
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
df2 = df.copy()
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df2))
def test_df_reset_index1(self):
def test_impl(df):
return df.reset_index(drop=True)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_reset_index_inplace1(self):
def test_impl():
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
df.reset_index(drop=True, inplace=True)
return df
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(), test_impl())
def test_df_dropna1(self):
def test_impl(df):
return df.dropna()
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0], 'B': [4, 5, 6, 7]})
hpat_func = hpat.jit(test_impl)
out = test_impl(df).reset_index(drop=True)
h_out = hpat_func(df)
pd.testing.assert_frame_equal(out, h_out)
def test_df_dropna2(self):
def test_impl(df):
return df.dropna()
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
hpat_func = hpat.jit(test_impl)
out = test_impl(df).reset_index(drop=True)
h_out = hpat_func(df)
pd.testing.assert_frame_equal(out, h_out)
def test_df_dropna_inplace1(self):
# TODO: fix error when no df is returned
def test_impl(df):
df.dropna(inplace=True)
return df
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0], 'B': [4, 5, 6, 7]})
df2 = df.copy()
hpat_func = hpat.jit(test_impl)
out = test_impl(df).reset_index(drop=True)
h_out = hpat_func(df2)
pd.testing.assert_frame_equal(out, h_out)
def test_df_dropna_str1(self):
def test_impl(df):
return df.dropna()
df = pd.DataFrame({'A': [1.0, 2.0, 4.0, 1.0], 'B': ['aa', 'b', None, 'ccc']})
hpat_func = hpat.jit(test_impl)
out = test_impl(df).reset_index(drop=True)
h_out = hpat_func(df)
pd.testing.assert_frame_equal(out, h_out)
def test_df_drop1(self):
def test_impl(df):
return df.drop(columns=['A'])
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0], 'B': [4, 5, 6, 7]})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_drop_inplace2(self):
# test droping after setting the column
def test_impl(df):
df2 = df[['A', 'B']]
df2['D'] = np.ones(3)
df2.drop(columns=['D'], inplace=True)
return df2
df = pd.DataFrame({'A': [1, 2, 3], 'B': [2, 3, 4]})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_drop_inplace1(self):
def test_impl(df):
df.drop('A', axis=1, inplace=True)
return df
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0], 'B': [4, 5, 6, 7]})
df2 = df.copy()
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df2))
def test_isin_df1(self):
def test_impl(df, df2):
return df.isin(df2)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
df2 = pd.DataFrame({'A': np.arange(n), 'C': np.arange(n)**2})
df2.A[n // 2:] = n
pd.testing.assert_frame_equal(hpat_func(df, df2), test_impl(df, df2))
@unittest.skip("needs dict typing in Numba")
def test_isin_dict1(self):
def test_impl(df):
vals = {'A': [2, 3, 4], 'C': [4, 5, 6]}
return df.isin(vals)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_isin_list1(self):
def test_impl(df):
vals = [2, 3, 4]
return df.isin(vals)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_append1(self):
def test_impl(df, df2):
return df.append(df2, ignore_index=True)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
df2 = pd.DataFrame({'A': np.arange(n), 'C': np.arange(n)**2})
df2.A[n // 2:] = n
pd.testing.assert_frame_equal(hpat_func(df, df2), test_impl(df, df2))
def test_append2(self):
def test_impl(df, df2, df3):
return df.append([df2, df3], ignore_index=True)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
df2 = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
df2.A[n // 2:] = n
df3 = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
pd.testing.assert_frame_equal(
hpat_func(df, df2, df3), test_impl(df, df2, df3))
def test_concat_columns1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2], axis=1)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([4, 5])
S2 = pd.Series([6., 7.])
# TODO: support int as column name
pd.testing.assert_frame_equal(
hpat_func(S1, S2),
test_impl(S1, S2).rename(columns={0: '0', 1: '1'}))
def test_var_rename(self):
# tests df variable replacement in hiframes_untyped where inlining
# can cause extra assignments and definition handling errors
# TODO: inline freevar
def test_impl():
df =
|
pd.DataFrame({'A': [1, 2, 3], 'B': [2, 3, 4]})
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/7/17 0017 16:42
# @Author : Hadrianl
# @File : mysql_to_mongodb.py
# @License : (C) Copyright 2013-2017, 凯瑞投资
import pymongo
import pymysql
import pandas as pd
import datetime as dt
import json
MongoDB_client = pymongo.MongoClient('mongodb://localhost:27017/')
MysqlDB = pymysql.connect(input('Host:>'), input('User:>'), input('Password:>'), charset='utf8')
sp = MongoDB_client.SP
sp_future_min = sp.get_collection('sp_future_min')
last_date_stamp = sp_future_min.find_one(projection=['date_stamp'], sort=[('date_stamp', pymongo.DESCENDING)])['date_stamp']
last_date = dt.datetime.fromtimestamp(last_date_stamp).strftime('%Y-%m-%d 00:00:00')
cursor = MysqlDB.cursor(pymysql.cursors.DictCursor)
cursor.execute(f'SELECT * FROM carry_investment.wh_same_month_min where datetime >= "{last_date}"')
data = cursor.fetchall()
d =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import pandas as pd
import os
''' Break up a CSV for all states into individual CSV for each states.
Return a dict of state_name:file_name
'''
def get_state(bigcsv: str) -> iter:
all_states =
|
pd.read_csv(bigcsv,usecols=['State'],squeeze=True)
|
pandas.read_csv
|
import gc
from pathlib import Path
from tqdm import tqdm
import skvideo
import skvideo.io
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from moviepy.editor import *
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchvision import transforms
__all__ = ['CreateOptim', 'save_checkpoint',
'preprocess_df', 'preprocess_df_audio']
def plot_losses(train=[], test=[], path=''):
""" Plotting function for training/val losses
Parameters
----------
train : list
Training losses over training
test : list
Test losses over training
path : str
Path for output plot
"""
epochs = [x for x in range(len(train))]
fig = plt.figure(figsize=(5, 5))
sns.lineplot(x=epochs, y=train, label='Train')
sns.lineplot(x=epochs, y=test, label='Test')
plt.legend(loc='upper right')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.savefig('{}/Loss.jpeg'.format(path))
def CreateOptim(parameters, lr=0.001, betas=(0.5, 0.999), weight_decay=0,
factor=0.2, patience=5, threshold=1e-03, eps=1e-08):
""" Creates optimizer and associated learning rate scheduler for a model
Paramaters
----------
parameters : torch parameters
Pytorch network parameters for associated optimzer and scheduler
lr : float
Learning rate for optimizer
betas : 2-tuple(floats)
Betas for optimizer
weight_decay : float
Weight decay for optimizer regularization
factor : float
Factor by which to reduce learning rate on Plateau
patience : int
Patience for learning rate scheduler
Returns
-------
optimizer : torch.optim
optimizer for model
scheduler : ReduceLROnPlateau
scheduler for optimizer
"""
optimizer = optim.Adam(parameters, lr=lr, betas=(
0.5, 0.999), weight_decay=weight_decay)
scheduler = ReduceLROnPlateau(
optimizer, mode='min', factor=0.2, patience=patience,
threshold=threshold, eps=eps, verbose=True)
return optimizer, scheduler
def save_checkpoint(model, description, filename='checkpoint.pth.tar'):
""" Saves input state dict to file
Parameters
----------
state : dict
State dict to save. Can include parameters from model, optimizer, etc.
as well as any other elements.
is_best : bool
If true will save current state dict to a second location
filename : str
File name for save
Returns
-------
"""
state = {
'architecture': str(model),
'description': description,
'model': model.state_dict()
}
torch.save(state, filename)
def preprocess_df(df=None, mtcnn=None, path=None, outpath=None,
target_n_frames=60, frame_rate=10, mini_batch=15,
n_seconds=5, start_at_end=True, debug=False):
""" Preprocessing script for deep fake challenge. Subsamples, videos,
isolates faces and saves frames.
Parameters
----------
df : pd.DataFrame
Dataframe with video metadata
mtcnn : torch.Module
Facial detection module from facenet-python (https://github.com/timesler/facenet-pytorch)
path : str
Path to directory with DFDC data
outpath : str
Destination for preprocessed frames
target_n_frames : int
Target number of frames to extract
frame_rate : int
Number of frames per second to process
mini_batch : str
Mini batch size for preprocessing steps (protects against memory overflow)
n_seconds : int
Numbr of seconds to load video (speed up optimization)
debug : bool
Debug switch to test memory leak
Returns
-------
faces_dataframe : pd.DataFrame
Dataframe of preprocessed data
"""
def split(my_list, n):
""" Splits list into lists of length n
Paramaters
----------
my_list : list
List to subdivide
n : int
Max length of desired sub-lists
Returns
final : list
List of sub-lists of length n
"""
final = [my_list[i * n:(i + 1) * n]
for i in range((len(my_list) + n - 1) // n)]
return final
def process_min_batch_reverse(batch=None, start_index=0):
""" Pre-process and save a mini_batch of frames
Parameters
----------
batch : list(torch.tensor)
List with frames to preprocess
start_index : int
Number fo previously saved frames in the video
Returns
-------
end_index : int
Number of saved frames at end of this mini-batch
"""
with torch.no_grad():
faces, probs = mtcnn(batch, return_prob=True)
saved_frames = 0
faces_probs = []
for ii, face in enumerate(faces):
if face is None or probs[ii] < 0.95:
continue
if start_index-saved_frames < 0:
break
faces_probs.append(probs[ii])
imface = to_pil(face/2 + 0.5)
imface.save('{}/frame_{}.png'.format(dest,
start_index-saved_frames))
del imface
saved_frames += 1
del faces
return saved_frames, faces_probs
def process_min_batch(batch=None, start_index=0):
""" Pre-process and save a mini_batch of frames
Parameters
----------
batch : list(torch.tensor)
List with frames to preprocess
start_index : int
Number fo previously saved frames in the video
Returns
-------
end_index : int
Number of saved frames at end of this mini-batch
"""
with torch.no_grad():
faces, probs = mtcnn(batch, return_prob=True)
saved_frames = 0
faces_probs = []
for ii, face in enumerate(faces):
if face is None or probs[ii] < 0.95:
continue
faces_probs.append(probs[ii])
imface = to_pil(face/2 + 0.5)
imface.save('{}/frame_{}.png'.format(dest,
saved_frames+start_index))
del imface
saved_frames += 1
del faces
return saved_frames, faces_probs
frame_skip = 30//frame_rate
to_pil = transforms.ToPILImage()
pbar = tqdm(total=len(df))
faces_dataframe = []
for idx in range(len(df)):
pbar.update(1)
entry = df.iloc[idx]
this_entry = {'split': entry['split'], 'File': entry['File'],
'label': entry['label'], 'frames': 0,
'probabilites': []}
try:
filename = '{}/{}/{}'.format(path, entry['split'], entry['File'])
dest = '{}/{}/{}/'.format(outpath, entry['split'], entry['File'])
Path(dest).mkdir(parents=True, exist_ok=True)
try:
videodata = skvideo.io.vread(filename, (n_seconds)*30)
except RuntimeError:
videodata = skvideo.io.vread(filename)
except:
continue
if start_at_end:
frames = [to_pil(x) for x in videodata[::-frame_skip]]
frames_batches = split(frames, mini_batch)
else:
frames = [to_pil(x) for x in videodata[0::frame_skip]]
frames_batches = split(frames, mini_batch)
probabilities = []
if start_at_end:
n_frames = target_n_frames
for batch in frames_batches:
if n_frames < 0:
break
t_frames, t_probs = process_min_batch_reverse(
batch, n_frames)
n_frames -= t_frames
probabilities += t_probs
n_frames = target_n_frames
else:
n_frames = 0
for batch in frames_batches:
if n_frames >= target_n_frames:
break
t_frames, t_probs = process_min_batch(batch, n_frames)
n_frames += t_frames
probabilities += t_probs
this_entry['frames'] = n_frames
this_entry['probabilities'] = probabilities
del frames, videodata
except:
pass
faces_dataframe.append(this_entry)
del entry, this_entry
#
if debug:
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size())
except:
pass
pbar.close()
return
|
pd.DataFrame(faces_dataframe)
|
pandas.DataFrame
|
from io import StringIO
import numpy as np
import pytest
from pandas import DataFrame, concat, read_csv
import pandas._testing as tm
class TestInvalidConcat:
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, {}, [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
|
concat([df1, obj])
|
pandas.concat
|
from spines.built_in_exceptions.built_in_exceptions import FileLoadingError
from spines.pandas_wrapper.data_frame import DataFrameWrapper as dfw
import pandas as pd
import numpy as np
def load_data(path, file_type='csv', sheet_name=0, **kwargs) -> pd.DataFrame:
"""
To load standard input data to machine memory.
file_type: csv, excel
"""
df = pd.DataFrame()
if file_type == 'csv':
try:
df =
|
pd.read_csv(path, **kwargs)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from graphysio.dialogs import DlgListChoice
from graphysio.readdata.baseclass import BaseReader
from graphysio.structures import PlotData
try:
import pyedflib
except ImportError:
is_available = False
else:
is_available = True
class EdfReader(BaseReader):
is_available = is_available
def askUserInput(self):
filepath = str(self.userdata['filepath'])
edf = pyedflib.EdfReader(filepath)
signals = {}
for i in range(edf.signals_in_file):
h = edf.getSignalHeader(i)
signals[h['label']] = i
edf.close()
def cb(colnames):
self.userdata['columns'] = [signals[lbl] for lbl in colnames]
colnames = list(signals.keys())
dlgchoice = DlgListChoice(colnames, 'Open EDF', 'Choose curves to load')
dlgchoice.dlgdata.connect(cb)
dlgchoice.exec_()
def __call__(self) -> PlotData:
filepath = str(self.userdata['filepath'])
edf = pyedflib.EdfReader(filepath)
beginns = edf.getStartdatetime().timestamp() * 1e9
nsamplesPerChannel = edf.getNSamples()
signals = []
for i in self.userdata['columns']:
h = edf.getSignalHeader(i)
fs = h['sample_rate']
n = nsamplesPerChannel[i]
endns = beginns + n * 1e9 / fs
idx = np.linspace(beginns, endns, num=n, dtype=np.int64)
s = pd.Series(edf.readSignal(i), index=idx, name=h['label'])
signals.append(s)
edf.close()
if not signals:
return None
df =
|
pd.concat(signals, axis=1)
|
pandas.concat
|
def get_patent_fields_list():
"""Scrape patent fields that are retrievable from PatentsView API"""
import requests
from bs4 import Tag, NavigableString, BeautifulSoup
import pandas as pd
url = "http://www.patentsview.org/api/patent.html"
page = requests.get(url)
l = []
l_fieldnames = []
soup = BeautifulSoup(page.text, 'lxml')
table = soup.find(class_='table table-striped documentation-fieldlist')
table_rows = table.find_all('tr')
counter=0
for tr in table_rows:
if counter ==0:
th = tr.find_all('th')
row = [tr.text for tr in th]
l.append(row)
counter+=1
elif counter>0:
td = tr.find_all('td')
row = [tr.text for tr in td]
l.append(row)
for row in l[1:]:
l_fieldnames.append(row[0])
return l_fieldnames
def get_patent_fields_df():
"""Scrape and return possible fields for patents endpoint
of PatentsView API"""
import requests
from bs4 import Tag, NavigableString, BeautifulSoup
import pandas as pd
url = "http://www.patentsview.org/api/patent.html"
page = requests.get(url)
table = []
soup = BeautifulSoup(page.text, 'lxml')
table = soup.find(class_='table table-striped documentation-fieldlist')
table_rows = table.find_all('tr')
counter = 0
for tr in table_rows:
if counter == 0:
th = tr.find_all('th')
row = [tr.text for tr in th]
table.append(row)
counter += 1
elif counter > 0:
td = tr.find_all('td')
row = [tr.text for tr in td]
table.append(row)
df =
|
pd.DataFrame(table)
|
pandas.DataFrame
|
from copy import deepcopy
import datetime
import inspect
import pydoc
import numpy as np
import pytest
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark, skip_if_no
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
import pandas._testing as tm
class TestDataFrameMisc:
@pytest.mark.parametrize("attr", ["index", "columns"])
def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index([f"foo#{c}" for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
expected = pd.Index([f"{c}#foo" for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
expected = pd.Index([f"%{c}" for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
expected = pd.Index([f"{c}%" for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), pd.Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = pd.DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
colname = "\ud83d"
df = DataFrame({colname: []})
# this should not crash
assert colname not in dir(df)
assert df.columns[0] == colname
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_array_interface(self, float_frame):
with np.errstate(all="ignore"):
result = np.sqrt(float_frame)
assert isinstance(result, type(float_frame))
assert result.index is float_frame.index
assert result.columns is float_frame.columns
tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = DataFrame._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_series_equal(s, expected)
df = DataFrame(
{"floats": np.random.randn(5), "ints": range(5)}, columns=["floats", "ints"]
)
for tup in df.itertuples(index=False):
assert isinstance(tup[1], int)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[["a", "a"]]
assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)]
# repr with int on 32-bit/windows
if not (compat.is_platform_windows() or compat.is_platform_32bit()):
assert (
repr(list(df.itertuples(name=None)))
== "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]"
)
tup = next(df.itertuples(name="TestName"))
assert tup._fields == ("Index", "a", "b")
assert (tup.Index, tup.a, tup.b) == tup
assert type(tup).__name__ == "TestName"
df.columns = ["def", "return"]
tup2 = next(df.itertuples(name="TestName"))
assert tup2 == (0, 1, 4)
assert tup2._fields == ("Index", "_1", "_2")
df3 = DataFrame({"f" + str(i): [i] for i in range(1024)})
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
assert isinstance(tup3, tuple)
if PY37:
assert hasattr(tup3, "_fields")
else:
assert not hasattr(tup3, "_fields")
# GH 28282
df_254_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(254)}])
result_254_columns = next(df_254_columns.itertuples(index=False))
assert isinstance(result_254_columns, tuple)
assert hasattr(result_254_columns, "_fields")
df_255_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(255)}])
result_255_columns = next(df_255_columns.itertuples(index=False))
assert isinstance(result_255_columns, tuple)
# Dataframes with >=255 columns will fallback to regular tuples on python < 3.7
if PY37:
assert hasattr(result_255_columns, "_fields")
else:
assert not hasattr(result_255_columns, "_fields")
def test_sequence_like_with_categorical(self):
# GH 7839
# make sure can iterate
df = DataFrame(
{"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
)
df["grade"] = Categorical(df["raw_grade"])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.items():
str(s)
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
def test_values_mixed_dtypes(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
frame_cols = frame.columns
for i, row in enumerate(arr):
for j, value in enumerate(row):
col = frame_cols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
|
tm.assert_almost_equal(arr, expected)
|
pandas._testing.assert_almost_equal
|
# -*- encoding: utf-8 -*-
import functools
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import scipy.sparse
import sklearn.utils
from sklearn import preprocessing
from sklearn.compose import make_column_transformer
class InputValidator:
"""
Makes sure the input data complies with Auto-sklearn requirements.
Categorical inputs are encoded via a Label Encoder, if the input
is a dataframe.
This class also perform checks for data integrity and flags the user
via informative errors.
"""
def __init__(self) -> None:
self.valid_pd_enc_dtypes = ['category', 'bool']
# If a dataframe was provided, we populate
# this attribute with the column types from the dataframe
# That is, this attribute contains whether autosklearn
# should treat a column as categorical or numerical
# During fit, if the user provided feature_types, the user
# constrain is honored. If not, this attribute is used.
self.feature_types = None # type: Optional[List[str]]
# Whereas autosklearn performed encoding on the dataframe
# We need the target encoder as a decoder mechanism
self.feature_encoder = None
self.target_encoder = None
self.enc_columns = [] # type: List[int]
# During consecutive calls to the validator,
# track the number of outputs of the targets
# We need to make sure y_train/y_test have the
# same dimensionality
self._n_outputs = None
# Add support to make sure that the input to
# autosklearn has consistent dtype through calls.
# That is, once fitted, changes in the input dtype
# are not allowed
self.features_type = None # type: Optional[type]
self.target_type = None # type: Optional[type]
def register_user_feat_type(self, feat_type: Optional[List[str]],
X: Union[pd.DataFrame, np.ndarray]) -> None:
"""
Incorporate information of the feature types when processing a Numpy array.
In case feature types is provided, if using a pd.DataFrame, this utility errors
out, explaining to the user this is contradictory.
"""
if hasattr(X, "iloc") and feat_type is not None:
raise ValueError("When providing a DataFrame to Auto-Sklearn, we extract "
"the feature types from the DataFrame.dtypes. That is, "
"providing the option feat_type to the fit method is not "
"supported when using a Dataframe. Please make sure that the "
"type of each column in your DataFrame is properly set. "
"More details about having the correct data type in your "
"DataFrame can be seen in "
"https://pandas.pydata.org/pandas-docs/stable/reference"
"/api/pandas.DataFrame.astype.html")
elif feat_type is None:
# Nothing to register. No feat type is provided
# or the features are not numpy/list where this is required
return
# Some checks if feat_type is provided
if len(feat_type) != X.shape[1]:
raise ValueError('Array feat_type does not have same number of '
'variables as X has features. %d vs %d.' %
(len(feat_type), X.shape[1]))
if not all([isinstance(f, str) for f in feat_type]):
raise ValueError('Array feat_type must only contain strings.')
for ft in feat_type:
if ft.lower() not in ['categorical', 'numerical']:
raise ValueError('Only `Categorical` and `Numerical` are '
'valid feature types, you passed `%s`' % ft)
# Here we register proactively the feature types for
# Processing Numpy arrays
self.feature_types = feat_type
def validate(
self,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.DataFrame, np.ndarray],
is_classification: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Wrapper for feature/targets validation
Makes sure consistent number of samples within target and
features.
"""
X = self.validate_features(X)
y = self.validate_target(y, is_classification)
if X.shape[0] != y.shape[0]:
raise ValueError(
"The number of samples from the features X={} should match "
"the number of samples from the target y={}".format(
X.shape[0],
y.shape[0]
)
)
return X, y
def validate_features(
self,
X: Union[pd.DataFrame, np.ndarray],
) -> np.ndarray:
"""
Wrapper around sklearn check_array. Translates a pandas
Dataframe to a valid input for sklearn.
"""
# Make sure that once fitted, we don't allow new dtypes
if self.features_type is None:
self.features_type = type(X)
if self.features_type != type(X):
raise ValueError("Auto-sklearn previously received features of type {} "
"yet the current features have type {}. Changing the dtype "
"of inputs to an estimator is not supported.".format(
self.features_type,
type(X)
)
)
# Do not support category/string numpy data. Only numbers
if hasattr(X, "dtype") and not np.issubdtype(X.dtype.type, np.number):
raise ValueError(
"When providing a numpy array to Auto-sklearn, the only valid "
"dtypes are numerical ones. The provided data type {} is not supported."
"".format(
X.dtype.type,
)
)
# Pre-process dataframe to make them numerical
# Also, encode numpy categorical objects
if hasattr(X, "iloc") and not scipy.sparse.issparse(X):
# Pandas validation provide extra user information
X = self._check_and_encode_features(X)
if scipy.sparse.issparse(X):
X.sort_indices()
# sklearn check array will make sure we have the
# correct numerical features for the array
# Also, a numpy array will be created
X = sklearn.utils.check_array(
X,
force_all_finite=False,
accept_sparse='csr'
)
return X
def validate_target(
self,
y: Union[pd.DataFrame, np.ndarray],
is_classification: bool = False,
) -> np.ndarray:
"""
Wrapper around sklearn check_array. Translates a pandas
Dataframe to a valid input for sklearn.
"""
# Make sure that once fitted, we don't allow new dtypes
if self.target_type is None:
self.target_type = type(y)
if self.target_type != type(y):
raise ValueError("Auto-sklearn previously received targets of type {} "
"yet the current target has type {}. Changing the dtype "
"of inputs to an estimator is not supported.".format(
self.target_type,
type(y)
)
)
# Target data as sparse is not supported
if scipy.sparse.issparse(y):
raise ValueError("Unsupported target data provided"
"Input targets to auto-sklearn must not be of "
"type sparse. Please convert the target input (y) "
"to a dense array via scipy.sparse.csr_matrix.todense(). "
)
# No Nan is supported
if np.any(
|
pd.isnull(y)
|
pandas.isnull
|
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range("20010101", periods=4, tz="UTC")
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range("20010101", periods=4)
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels")
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = date_range("2015-01-01", periods=2, tz="utc")
ser = Series(range(2), index=index, dtype="int64")
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
tm.assert_series_equal(ser.loc[sel], ser)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = DataFrame(
np.arange(6.0).reshape(3, 2),
columns=list("AB"),
index=date_range("1/1/2000", periods=3, freq="1H"),
)
expected = df.copy()
expected["C"] = [expected.index[0]] + [pd.NaT, pd.NaT]
mask = df.A < 1
df.loc[mask, "C"] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp("20130101 09:00:00")
dt2 =
|
Timestamp("20130101 10:00:00")
|
pandas.Timestamp
|
#
# * Chapter 05, Series
#%%
# * import libs
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
np.random.seed(12345)
# %%
# * Create Pandas Series
obj = Series([4, 7, -5, 3])
obj
# %%
# * check Series value and index
# ? Series is one dimensional array-like object, containing (1) a sequence of
# ? values, (2) an associated array of data labels, called *index*
obj.values
# %%
obj.index
# %%
# * create a Series with a designated index
obj2 = Series([4,7,-5,3], index=['d','b','a','c'])
obj2
# %%
obj2.index
# %%
obj2['d']
#%%
obj2[['a', 'b', 'c']]
# %%
# * Using numpy functions/operations will preserve the index-value links
obj3 = obj2[obj2 > 0]
obj3
# %%
obj3 = obj2*2
obj3
# %%
# * we can think about Series is as ordered dictionary
'b' in obj2
# %%
'f' in obj2
# %%
# * Actually, we can create Series from a python dict
# * the key in dict is index in Series, the value in dict is value in Series
obj3 = Series({'a':123, 'b': 456, 'c': 789})
obj3
# %%
# * When creating Series using dict, index can be override
letters = ['x','y','z', 'a', 'b', 'c']
obj4 =
|
Series({'a':123, 'b': 456, 'c': 789}, index=letters)
|
pandas.Series
|
# generate features
import networkx as nx
import pandas as pd
import numpy as np
from networkx.algorithms import node_classification
import time
from collections import Counter
from utils import normalize_features
def dayday_feature(data, n_class=2, label_most_common_1=19, flag_unlabel=0):
t1 = time.time()
data = data.copy()
x = data['fea_table'].copy()
num_nodes = x.shape[0]
nodes_all = list(x.index)
df = data['edge_file'].copy()
max_weight = max(df['edge_weight'])
df.rename(columns={'edge_weight': 'weight'}, inplace=True)
degree_in_1st = np.zeros(num_nodes)
degree_out_1st = np.zeros(num_nodes)
weight_in_1st = np.zeros(num_nodes)
weight_out_1st = np.zeros(num_nodes)
for source, target, weight in df.values:
source = int(source)
target = int(target)
degree_in_1st[target] += 1
degree_out_1st[source] += 1
weight_in_1st[target] += weight
weight_out_1st[source] += weight
degree_1st_diff = degree_in_1st - degree_out_1st
weight_1st_diff = weight_in_1st - weight_out_1st
features_1 = np.concatenate([
degree_in_1st.reshape(-1, 1),
degree_out_1st.reshape(-1, 1),
weight_in_1st.reshape(-1, 1),
weight_out_1st.reshape(-1, 1),
degree_1st_diff.reshape(-1, 1),
weight_1st_diff.reshape(-1, 1)
], axis=1)
features_in_1st = pd.DataFrame({"node_index": np.arange(num_nodes), "degree_in_1st": degree_in_1st, "weight_in_1st": weight_in_1st})
df_degree_in_1st =
|
pd.merge(left=df, right=features_in_1st, left_on="src_idx", right_on="node_index", how="left")
|
pandas.merge
|
from __future__ import division
import glob
import os
from datetime import timedelta
from multiprocessing import Process
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sunpy.visualization.colormaps as cm
class AstroMap:
"""
This class is able to download and use fits files for ligthcurves.
"""
def __init__(
self,
global_save_path,
timestamp,
wavelength,
fits_folder,
n_row_col,
n_processes,
relevantCoords,
exposure,
sigma,
showPreview=False,
):
"""
To initialize an AstroMap object, we require a main path,
:param global_save_path: path for reference for rest of functions. Saving here and below.
:param fits_folder: Folder to fits files (if existing)
:param timestamp: Start and endtime to take into account
:param wavelength: Relevant wavelength
:param n_row_col: Number of rows and columns to splice array into
:param n_processes: Number of processes to use
"""
self.global_save_path = global_save_path
self.fits_folder = fits_folder
self.wvlnth = wavelength
self.nrowcol = n_row_col
self.n_cpus = n_processes
self.timestamp = timestamp
self.relevantCoords = relevantCoords
self.exposure = exposure
self.complete_lcurves = []
self.sigma = sigma
self.showPreview = showPreview
if not self.fits_folder:
print("No fits folder given")
@staticmethod
def translate_number(input_region, n_by_n):
"""
Take an input region and a given n_by_n to return two dimensional response to where we are on array
:param input_region: Which number we require
:param n_by_n: Dimensions of array
:return: Returns height, length at which box is located
"""
array = np.arange(0, n_by_n**2, 1)
array = np.split(array, n_by_n)
array = np.vstack(array)
return [
np.where(array == input_region)[0][0],
np.where(array == input_region)[1][0],
]
def download_all(self, down_dir):
"""
Simple script that downloads fits into download folder
:param down_dir: Download directory for the fits
:return:
"""
from Scripts.Imports.Data_Fetch.Fido_Download import run_fido
run_fido(
start=self.timestamp[0],
end=self.timestamp[1],
down_dir=down_dir,
wavelengths=self.wvlnth,
)
self.fits_folder = down_dir
def create_column_plots(self, figsize=(10, 10)):
"""
This function saves the generated light curves into a given path_gen/lightcurve
:param diff: Whether to plot diff or not
:return: Saves complete images to a given folder
"""
from matplotlib import gridspec as gs
def proc_multi(split_indices):
"""
Function created for multiprocessing of getting images and lcurves together
:param split_indices: Which indexes to use
:return: Saves figures into given path
"""
for i in split_indices:
curr_time = timearr[i]
for numpycol in range(lcurve_data.shape[0]):
fig = plt.figure(figsize=figsize)
for numpyrow in range(lcurve_data.shape[1]):
# Save each of the images in their own folder -
folder_image = (
f"{self.global_save_path}{numpycol:02d}_{numpyrow:02d}"
)
curr_aia = plt.imread(f"{folder_image}/{i:03d}.png")
curr_lcurve = lcurve_data[numpyrow, numpycol, :]
plt.subplot(grs[numpyrow, 0])
plt.imshow(curr_aia)
plt.axis("off")
# Plot lightcurve and corresponding column
plt.subplot(grs[numpyrow, 1])
ax = plt.gca()
plt.plot(timearr,
curr_lcurve,
linewidth=2,
color="black")
ax.axvline(
x=curr_time,
color="black",
linestyle="--",
alpha=0.2,
linewidth=2,
)
plt.tight_layout()
combo_path = f"{self.global_save_path}Total_combined/"
os.makedirs(combo_path, exist_ok=True)
plt.savefig(f"{combo_path}{numpycol:02d}_{i:03d}.png")
plt.close(fig)
print(f"Done index : {i}")
# Get total images, data and time arrays
combo_images = sorted(glob.glob(f"{self.global_save_path}Combo/*.png"))
if combo_images == []:
raise ValueError(
f"No images were found in {self.global_save_path}Combo/. "
f"/n Please use the astro_plot function first")
lcurve_data = np.load(f"{self.global_save_path}lcurves.npy")
time_npy = np.load(f"{self.global_save_path}times.npy")
tdf =
|
pd.DataFrame({"Time": time_npy})
|
pandas.DataFrame
|
from collections import defaultdict
from datetime import datetime
from itertools import product
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
array,
concat,
merge,
)
import pandas._testing as tm
from pandas.core.algorithms import safe_sort
import pandas.core.common as com
from pandas.core.sorting import (
decons_group_index,
get_group_index,
is_int64_overflow_possible,
lexsort_indexer,
nargsort,
)
class TestSorting:
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame(
{
"A": A,
"B": B,
"C": A,
"D": B,
"E": A,
"F": B,
"G": A,
"H": B,
"values": np.random.randn(2500),
}
)
lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"])
rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"])
left = lg.sum()["values"]
right = rg.sum()["values"]
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values))
tups = com.asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()["values"]
for k, v in expected.items():
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
def test_int64_overflow_moar(self):
# GH9096
values = range(55109)
data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values})
grouped = data.groupby(["a", "b", "c", "d"])
assert len(grouped) == len(values)
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list("abcde"))
df["jim"], df["joe"] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list("abcde"))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(gr.grouper.shape)
# manually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df["jim"], df["joe"]):
jim[key].append(a)
joe[key].append(b)
assert len(gr) == len(jim)
mi = MultiIndex.from_tuples(jim.keys(), names=list("abcde"))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype="f8")
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=["jim", "joe"], index=mi)
return res.sort_index()
tm.assert_frame_equal(gr.mean(), aggr(np.mean))
tm.assert_frame_equal(gr.median(), aggr(np.median))
def test_lexsort_indexer(self):
keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5]
# orders=True, na_position='last'
result = lexsort_indexer(keys, orders=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=True, na_position='first'
result = lexsort_indexer(keys, orders=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='last'
result = lexsort_indexer(keys, orders=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='first'
result = lexsort_indexer(keys, orders=False, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [np.nan] * 5 + list(range(100)) + [np.nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype="O")
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = nargsort(items, kind="mergesort", ascending=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items, kind="mergesort", ascending=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items, kind="mergesort", ascending=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items, kind="mergesort", ascending=False, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='last'
result = nargsort(items2, kind="mergesort", ascending=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items2, kind="mergesort", ascending=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items2, kind="mergesort", ascending=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(
items2, kind="mergesort", ascending=False, na_position="first"
)
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
class TestMerge:
@pytest.mark.slow
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G1"])
df2 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G2"])
# it works!
result = merge(df1, df2, how="outer")
assert len(result) == 2000
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG"))
left["left"] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ["right"]
right.index = np.arange(len(right))
right["right"] *= -1
out = merge(left, right, how="outer")
assert len(out) == len(left)
tm.assert_series_equal(out["left"], -out["right"], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
tm.assert_series_equal(out["left"], result, check_names=False)
assert result.name is None
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ["left", "right", "outer", "inner"]:
tm.assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how="left", sort=False)
tm.assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how="left", sort=False)
tm.assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(
np.random.randint(low, high, (n, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(
np.random.randint(low, high, (n // 2, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left["left"] = np.random.randn(len(left))
right["right"] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list("ABCDEFG")).iterrows():
ldict[idx].append(row["left"])
for idx, row in right.set_index(list("ABCDEFG")).iterrows():
rdict[idx].append(row["right"])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(
k
+ (
lv,
rv,
)
)
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(
k
+ (
np.nan,
rv,
)
)
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list("ABCDEFG")
tm.assert_frame_equal(
df[kcols].copy(), df[kcols].sort_values(kcols, kind="mergesort")
)
out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"])
out = align(out)
jmask = {
"left": out["left"].notna(),
"right": out["right"].notna(),
"inner": out["left"].notna() & out["right"].notna(),
"outer": np.ones(len(out), dtype="bool"),
}
for how in ["left", "right", "outer", "inner"]:
mask = jmask[how]
frame = align(out[mask].copy())
assert mask.all() ^ mask.any() or how == "outer"
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
tm.assert_frame_equal(
frame, align(res), check_dtype=how not in ("right", "outer")
)
def test_decons():
def testit(codes_list, shape):
group_index = get_group_index(codes_list, shape, sort=True, xnull=True)
codes_list2 = decons_group_index(group_index, shape)
for a, b in zip(codes_list, codes_list2):
tm.assert_numpy_array_equal(a, b)
shape = (4, 5, 6)
codes_list = [
np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64),
]
testit(codes_list, shape)
shape = (10000, 10000)
codes_list = [
np.tile(np.arange(10000, dtype=np.int64), 5),
np.tile(np.arange(10000, dtype=np.int64), 5),
]
testit(codes_list, shape)
class TestSafeSort:
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = safe_sort(values)
expected = np.array(list("aaabbc"), dtype="object")
tm.assert_numpy_array_equal(result, expected)
values = []
result = safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("verify", [True, False])
def test_codes(self, verify):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
codes = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_codes = safe_sort(values, codes, verify=verify)
expected_codes = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
# na_sentinel
codes = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_codes = safe_sort(values, codes, na_sentinel=99, verify=verify)
expected_codes = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
codes = []
result, result_codes = safe_sort(values, codes, verify=verify)
expected_codes = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
@pytest.mark.parametrize("na_sentinel", [-1, 99])
def test_codes_out_of_bound(self, na_sentinel):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
# out of bound indices
codes = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_codes = safe_sort(values, codes, na_sentinel=na_sentinel)
expected_codes = np.array(
[3, na_sentinel, na_sentinel, 2, 0, 3, na_sentinel, 4], dtype=np.intp
)
|
tm.assert_numpy_array_equal(result, expected)
|
pandas._testing.assert_numpy_array_equal
|
from pandas import DataFrame
import numpy as np
from pandas.core.reshape import melt, convert_dummies
import pandas.util.testing as tm
def test_melt():
df = tm.makeTimeDataFrame()[:10]
df['id1'] = (df['A'] > 0).astype(int)
df['id2'] = (df['B'] > 0).astype(int)
molten1 =
|
melt(df)
|
pandas.core.reshape.melt
|
# -*- cding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import pymysql
import datetime
import glob
'''
程序说明:
1.实现能从Mysql数据库读取AIS数据到本地,保存到pandas的dataframe中;
2.实现从本地读取CSV文件,以便于数据处理;
3.利用datetime函数将数据库保存的时间戳转换成具体的时间函数,以便于读取;
4.将轨迹数据进行清洗,并对轨迹数据进行重构,输出符合要求的轨迹数据;
5.形成最终的MMSI唯一性代表的轨迹运动特性的数据,以便于后期深度学习的时候使用。
'''
# 链接数据库的句子
# dbconn = pymysql.connect(host = '127.0.0.1',user = 'root', passwd='<PASSWORD>',db= 'ais_dynamic',charset = 'utf8')
# sql查询语句
# sqlcmd = "select * from ais_dynamic.ais_dynamic limit 100"
# 从CSV文件中读取数据进行处理
# ais_file= pd.read_csv(r'C:\Users\cege-user\Desktop\dataset-ais\1-1000000-ais.csv',header = 0,sep = ' ',names = list('Record_Datetime','MMSI','Longitude','Latitude','Direction',
# 'Heading','Speed','Status','ROT','Position_Accuracy','UTC_Hour',
# 'UTC_Minute','UTC_Second','Message_ID','Rec_Datetime','Source_ID'))
ais_file1 = pd.read_csv(r'D:\Data store file\dataset-ais\1-1000000-ais.csv')
ais_file2 = pd.read_csv(r'D:\Data store file\dataset-ais\1000001-2000000-ais.csv')
ais_file3 = pd.read_csv(r'D:\Data store file\dataset-ais\2000001-3000000.csv')
ais_file4 = pd.read_csv(r'D:\Data store file\dataset-ais\3000001-4000000.csv')
ais_file5 = pd.read_csv(r'D:\Data store file\dataset-ais\4000001-5000000.csv')
ais_file6 = pd.read_csv(r'D:\Data store file\dataset-ais\5000001-6000000.csv')
ais_file7 =
|
pd.read_csv(r'D:\Data store file\dataset-ais\6000001-669370.csv')
|
pandas.read_csv
|
import pickle
from pprint import pprint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from functools import reduce
import sys
import time
from sklearn.decomposition import PCA
from sklearn import cluster as sklearn_clustering
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import data_extract as dext
from heimat import reco
import settings
import cache_manager as cmng
import portals_urls
import data_cleaning
pca_u, G = None, None
pca_a, A_star = None, None
A, U, MAP, txtclf = None, None, None, None
M = None
CLF_MLP = None
CLF_DBSCAN = None
D, PAPERS_LIST = None, None
UVT = None
papers_total, objects_articles_dict, objects_df = cmng.load_work()
pca_G_ncomponents = settings.pca_G_ncomponents
pca_A_ncomponents = settings.pca_A_ncomponents
mlp_iter = settings.mlp_iter
funksvd_iter = settings.funksvd_iter
funksvd_latent_features = settings.funksvd_latent_features
pd.set_option("max_rows", 50)
np.random.seed()
def dist_em(xs1, xs2):
euklid = np.linalg.norm(xs1 - xs2)
manhattan = sum(abs(e - s) for s, e in zip(xs1, xs2))
return euklid, manhattan
def show_articles_by_group(group=0):
"""
Shows paper_id corresponding to objects in some particular group
:param group:
:return:
"""
global U
r = U[U.group == group]
articles = []
for paper_id in r['OBJID'].values:
articles.extend(MAP[paper_id])
for paper_id in list(set(articles)):
print("--------------------------------------------------")
dext.get_paper(paper_id)
def show_first3_components(matrix, title="", start_at_index=0):
"""
:param matrix: G or A_star matrices
:param title:
:param start_at_index: Depending on whether matrix is G or A_star, start_at_index differs (1, respectively 0)
:return:
"""
plt.figure(figsize=(10, 8))
ax = plt.axes(projection='3d')
i, j, k = [start_at_index + t for t in range(0, 3)]
ax.scatter3D(matrix[:, i], matrix[:, j], matrix[:, k], s=8, cmap='Greens', edgecolors='k')
if title:
plt.title(title)
plt.show()
plt.close()
time.sleep(1)
def gen_matrix_G(ncomp=25):
"""
matrix G of principal components for the object representation
- generates the PCA form of matrix U
- adds the OBJID value on the first column
:param ncomp:
:return:
"""
global pca_u, G, U
print("\n[x] PCA for matrix G:")
pca_u = PCA(n_components=ncomp)
U_matrix = U[list(filter(lambda x: x not in ["OBJID", "group"], U.columns))]
G = pca_u.fit_transform(U_matrix.fillna(U_matrix.mean()).values)
G = np.append(U['OBJID'].values.reshape(U.shape[0], 1), G, axis=1)
print("[x] Explained variance ratio:")
print(pca_u.explained_variance_ratio_)
print("[x] Singular values:")
print(pca_u.singular_values_)
print("[x] Sum of variance:")
print(np.sum(pca_u.explained_variance_ratio_))
show_first3_components(G, title="First 3 principal components for G", start_at_index=1)
def gen_matrix_A_star(ncomp=25):
"""
matrix A* of principal components for the article representation
- generates the PCA form of matrix U
- adds the OBJID value on the first column
:param ncomp:
:return:
"""
global pca_a, A_star
print("\n[x] PCA for matrix A:")
pca_a = PCA(n_components=ncomp)
A_star = pca_a.fit_transform(A.fillna(A.mean()).values[:, 1:])
A_star = np.append(A['paper_id'].values.reshape(A_star.shape[0], 1), A_star, axis=1)
print("[x] Explained variance ratio:")
print(pca_a.explained_variance_ratio_)
print("[x] Singular values:")
print(pca_a.singular_values_)
print("[x] Sum of variance:")
print(np.sum(pca_a.explained_variance_ratio_))
show_first3_components(A_star, title="First 3 principal components for A_star", start_at_index=1)
def get_indexes_articles_in_df(objid):
"""
MAP contains the mapping between astronomical object ids and the paper ids
returns the indexes in matrix A of object with objid
:param objid:
:return:
"""
global A, MAP
res = []
for paper_id in MAP[objid]:
record = A[A.paper_id == paper_id].index.values.tolist()
if len(record) != 0:
res.append(record[0])
else:
# ignoring for the moment if a paper id couldn't be found
# (probably there was an exception at download phase)
pass
return res
def gen_matrix_M(balance_factor=3):
"""
- construct matrix M by combining values from G and A_star
- since a brute force would require too much time and would lead to overly unbalanced training set
decided to build up by factor of 3 (balance_factor):
- a portion of data is "as is", thus object data in G corresponds to data in A_star (by MAP)
- a portion of data (3 times bigger) is "simulated" and contains objects to articles that are not associated
- target value is set to 1 if association is given, otherwise 0
:param balance_factor:
:return:
"""
global G, U, A_star, A
M = []
y = []
print("Building matrix M, this will take a while .. ")
for i in range(0, G.shape[0]):
if i != 0 and i % int(0.1 * G.shape[0]) == 0:
print("%.2f" % (100 * i / G.shape[0]) + "% of objects")
r1 = G[i, 1:].tolist()
object_id = U.values[i, 0]
indexes_associations = get_indexes_articles_in_df(object_id)
indexes_non_associations = list(filter(lambda k: k not in indexes_associations, range(A.shape[0])))
indexes_non_associations = pd.Series(indexes_non_associations).sample(
len(indexes_associations) * balance_factor).tolist()
for j in indexes_associations + indexes_non_associations:
r2 = A_star[j, 1:].tolist()
M.append(r1 + r2)
y.append(1 if j in indexes_associations else 0)
M = np.array(M)
return M, y
def gen_matrix_Mi(i):
"""
Generates matrix Mi, that is the portion of Matrix M given an astronomical object id OBJID found at index i in G
This is done by taking the record from G of object and combine it with all records from A_star,
so that the calculation of probability P(Association | Gi, A_star) gets calculated for all A_star papers
:param i:
:return:
"""
global U, G, A, A_star
Mi = []
yi = []
r1 = G[i, 1:].tolist()
for j in range(0, A_star.shape[0]):
object_id = U.values[i, 0].encode("utf-8")
articles_found_related = dext.objects_articles_dict[object_id]
r2 = A_star[j, 1:].tolist()
article_id = A.values[j, 0]
target_value = int(article_id in articles_found_related)
Mi.append(
r1 + r2
)
yi.append(target_value)
Mi = np.array(Mi)
return Mi, yi
def get_confusion_matrix_stats(cm, i):
"""
Given a Confusion Matrix cm, calculates precision, recall and F1 scores
:param cm: confusion matrix
:param i: position of the variable, for with the caculation be done
:return: three statistics: precision, recall and the F1-Score
"""
tp = cm[i, i]
fp = np.sum(cm[i, :]) - tp
fn = np.sum(cm[:, i]) - tp
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1_score = 2 * (precision * recall) / (precision + recall)
return precision, recall, f1_score
def check_mlp(x, y):
global CLF_MLP
print("+++++++++++++++++++++++++++++++++++++")
labels_zuordnung_mlp = CLF_MLP.classes_
beispiel_mlp_x = x
beispiel_mlp_y = y
y_true = np.array(beispiel_mlp_y)
y_pred = np.array([labels_zuordnung_mlp[np.argmax(t)] for t in CLF_MLP.predict_proba(beispiel_mlp_x)])
accuracy = (y_pred == y_true).mean()
cm = confusion_matrix(y_true, y_pred, labels=labels_zuordnung_mlp)
if True:
print("Labels:", labels_zuordnung_mlp)
print("Confusion Matrix:")
print(cm)
for i in range(0, len(cm)):
precision, recall, f1_score = get_confusion_matrix_stats(cm, i)
print("Label {} - precision {}, recall {}, f1_score {}: ".format(
i, np.round(precision, 2), np.round(recall, 2), np.round(f1_score, 2)
))
print("precision:", accuracy)
print("+++++++++++++++++++++++++++++++++++++")
def show_object_details(object_id, article_indexes, pred_df=None, topk=10):
"""
Shows associated papers for an object id according to predicted article_indexes
# U expands categorical variables, so it has a dimension larger than dext.objects_df
:param object_id:
:param article_indexes:
:param pred_df:
:param topk:
:return:
"""
global A
print("""
\nObject with ID: {}
""".format(object_id))
if pred_df is not None:
print("[x] Predicted articles in pred_df:")
print(pred_df)
objid = object_id.encode("utf-8")
url = "http://skyserver.sdss.org/dr16/en/tools/explore/Summary.aspx?id={}".format(
object_id
)
print("[x] You can check the SkyServer Explore page at: ")
print(url, "\n")
print("[x] Compact form from original object pandas dataframe (objects_df as in data_extract.py):")
print(dext.objects_df[dext.objects_df.OBJID == objid].transpose())
print("\n[x] Showing maximum Top-{}:".format(topk))
for k in range(0, min(len(article_indexes), topk)):
print("*************************************************************************************")
if pred_df is not None:
print(pred_df.iloc[k])
j = article_indexes[k]
dext.get_paper(paper_id=A.paper_id.iloc[j])
input(".....")
def apply_mlp(object_id=None):
"""
uses trained MLP classifier to calculate probability P(Bij | ui, aj) for one object_id ui and all aj
- uses construction of matrix Mi to achieve that, that is the portion of general matrix M for the object
:param object_id:
:return:
"""
global U, G, CLF_MLP
if object_id is None:
i = pd.Series(range(0, G.shape[0])).sample(10).iloc[5] # index of object id in matrices G, U
object_id = U.OBJID.iloc[i]
else:
i = U[U.OBJID == object_id].index.values.tolist()[-1]
print("\n[x] Object ID:", object_id)
Mi, yi = gen_matrix_Mi(i)
Mi = pd.DataFrame(Mi)
print("[x] The portion of M matrix, corresponding to | ui | aj |, with j in [0, A_star.shape[0]]: ")
print(Mi)
preds = [np.round(t[1], 2) for t in CLF_MLP.predict_proba(Mi.values)]
# print("\n[x] Predictions:")
# print(preds)
pred_df = pd.DataFrame(
{
"article_index": Mi.index.values.tolist(),
"mlp_proba": preds,
"associated": yi
}
)
pred_df = pred_df.sort_values(by="mlp_proba", ascending=False)
pred_df = pred_df[pred_df.mlp_proba > 0.5]
pred_df = pred_df.reset_index(drop=True)
print("\n[x] Summarised with a threshold for probabilty of 50%, that is P(Bij | ui, aj) > 0.5:")
print(pred_df)
articles_indexes = pred_df.article_index.values.tolist()
print("")
return object_id, articles_indexes, pred_df
def data_extraction():
"""
with module dext original data is accessible: papers_total, objects_articles_dict, objects_df
:return:
"""
print("[x] Extracting data and creating matrices A, U and dictionary map MAP .. ")
dext.run()
A, U, MAP, txtclf = dext.load_matrices()
return A, U, MAP, txtclf
####################### Constructing Matrix M and MLP model #######################
def construct_G_Astar_M_matrices():
"""
uses above methods to construct training data M by combining G and A_star matrices
:return:
"""
global G, A_star, M, pca_A_ncomponents, pca_G_ncomponents
print("[x] Generating PCA projections of:"
"\n- matrices U (matrix G of astronomical objects)"
"\n- and A (matrix A_star of related papers)")
gen_matrix_G(ncomp=pca_G_ncomponents)
# TODO: increase automatically pca_A_ncomponents if the explained variance drops to less than, for instance, 0.85
gen_matrix_A_star(ncomp=pca_A_ncomponents)
print("\n[x] Generating matrix M out of two parts "
"| ui | aj | target {1 if related, 0 otherwise} ")
M, y = gen_matrix_M()
M = pd.DataFrame(M)
target_col = M.shape[1]
M[target_col] = y
p = 100 * M[target_col].sum() / M.shape[0]
print("The percentage of articles that are related directly (found at NED) at about: {}%".format(
np.round(p, 2)
))
print("[x] Done. Head(10):")
print(M.head(10))
print("")
time.sleep(5)
def do_model_mlp():
"""
perform modeling using MLP on constructed matrix M
:return:
"""
global M, CLF_MLP, labels_mlp
print("\n[x] Performing MLP modeling with balancing by choosing combinations objects (matrix G) "
"to articles (matrix A_star)"
"\n(target == 1) and three times those not related")
X = M.copy()
indx = X.index.values.tolist()
np.random.shuffle(indx)
X = X.loc[indx]
X, Y = X.values[:, :-1], X.values[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3)
mlp = MLPClassifier(max_iter=mlp_iter, verbose=True, early_stopping=False)
CLF_MLP = mlp.fit(X_train, y_train)
labels_mlp = CLF_MLP.classes_
print("[x] Validation of MLP classifier results on test data:")
check_mlp(x=X_test, y=y_test)
input("enter to continue ...")
print("")
def apply_clf_mlp(object_id="M 79", show_details=False):
"""
applies trained CLF_MLP model on one object
:param object_id:
:return:
"""
# example prediction using the MLP classifier to calculate probabability of association to any paper
print("\n[x] Applying model MLP to object: {}".format(object_id))
object_id, articles_indexes, pred_df = apply_mlp(object_id=object_id) # * sig Ori
if show_details:
print("\n[x] Example prediction:")
show_object_details(object_id, articles_indexes, pred_df)
####################### Constructing Matrix D, Clustering and FunkSVD models #######################
def perform_object_optimal_clustering():
"""
performs a search for clustering with DBSCAN to be able to construct a reduced form of a "user-item" matrix
:return:
"""
global CLF_DBSCAN, U, G
print("\n[x] Choosing an optimal parameter for DBSCAN object cluster classifier .. ")
list_dist = []
for i in range(0, G.shape[1] - 1):
for j in range(i + 1, G.shape[1]):
euclidean, _ = dist_em(G[i, 1:], G[j, 1:])
list_dist.append(euclidean)
number_of_clusters = []
data_noise_list = []
distribution_data_list = []
param = np.linspace(0.01,
|
pd.Series(list_dist)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 08:23:04 2020
@author: nigo0024
"""
# %% Export Readable hs_settings/df_grid
import os
import pandas as pd
from scripts.analysis import sip_functs_analysis as sip_f
base_dir_results = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results_meta'
df_grid = pd.read_csv(os.path.join(base_dir_results, 'msi_2_hs_settings.csv'))
df_grid_readable = sip_f.rename_scenarios(df_grid)
df_grid_readable.to_csv(os.path.join(base_dir_results, 'msi_2_hs_settings_short.csv'), index=False)
# %% Violin plots1
import os
import pandas as pd
from scripts.analysis import sip_functs_analysis as sip_f
base_dir_results = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results_meta'
base_dir_out = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results_metafigures'
fnames = ['msi_2_biomass_kgha_R2.csv', 'msi_2_nup_kgha_R2.csv', 'msi_2_tissue_n_pct_R2.csv']
df_grid = pd.read_csv(os.path.join(base_dir_results, 'msi_2_hs_settings.csv'))
df_grid = sip_f.rename_scenarios(df_grid)
f_full = [os.path.join(base_dir_results, f) for f in fnames]
for f in f_full:
df = pd.read_csv(f)
df = df_grid.merge(df, left_index=True, right_on='grid_idx')
df_filter = sip_f.sip_results_filter(df, model='Lasso')
for n_feats in range(1, 20, 3):
base_name = os.path.splitext(os.path.split(f)[-1])[0]
fig = sip_f.plot_violin_by_scenario(df_filter, base_name, y=str(n_feats))
fig.savefig(os.path.join(base_dir_out, '{0}_{1}.png'.format(os.path.splitext(os.path.split(f)[-1])[0], n_feats)), dpi=300)
# %% Find optimum accuracy
import os
import pandas as pd
from scripts.analysis import sip_functs_analysis as sip_f
base_dir_results = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results_meta'
base_dir_out = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results_metafigures'
fnames = ['msi_2_biomass_kgha_R2.csv', 'msi_2_nup_kgha_R2.csv', 'msi_2_tissue_n_pct_R2.csv']
df_grid = pd.read_csv(os.path.join(base_dir_results, 'msi_2_hs_settings.csv'))
df_grid = sip_f.rename_scenarios(df_grid)
f_full = [os.path.join(base_dir_results, f) for f in fnames]
subset = ['msi_run_id', 'grid_idx', 'response_label', 'feature_set',
'model_name', 'objective_f', 'n_feats_opt', 'value']
sort_order = ['response_label', 'feature_set', 'model_name', 'objective_f',
'grid_idx']
df_out = None
for response in ['biomass_kgha', 'nup_kgha', 'tissue_n_pct']:
for objective_f in ['R2', 'MAE', 'RMSE']:
f = os.path.join(base_dir_results, 'msi_2_{0}_{1}.csv'.format(response, objective_f))
df = pd.read_csv(f)
df.rename(columns={'extra_feats': 'feature_set'}, inplace=True)
df['objective_f'] = objective_f
if objective_f in ['MAE', 'RMSE']:
df['n_feats_opt'] = df[map(str, range(1, 51))].idxmin(axis=1)
df['value'] = df[map(str, range(1, 51))].min(axis=1)
else:
df['n_feats_opt'] = df[map(str, range(1, 51))].idxmax(axis=1)
df['value'] = df[map(str, range(1, 51))].max(axis=1)
df = df.astype({'n_feats_opt': 'int'})
df_out_temp = df_grid.merge(df[subset], left_index=True, on='grid_idx')
if df_out is None:
df_out = df_out_temp.copy()
else:
df_out = df_out.append(df_out_temp)
df_out.sort_values(by=sort_order, inplace=True)
df_out.to_csv(os.path.join(base_dir_results, 'msi_2_n_feats_opt.csv'), index=False)
# %% Build Spotpy "results" table
import numpy as np
import os
import pandas as pd
from scripts.analysis import sip_functs_analysis as sip_f
base_dir_results = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results_meta'
df_opt = pd.read_csv(os.path.join(base_dir_results, 'msi_2_n_feats_opt.csv'))
cols = ['like1', 'like2', 'like3', 'pardir_panels', 'parcrop', 'parclip', 'parsmooth', 'parbin',
'parsegment', 'simulation_rmse', 'simulation_mae', 'simulation_r2', 'chain']
options = [[response, feature] for response in ['biomass_kgha', 'nup_kgha', 'tissue_n_pct']
for feature in ['reflectance', 'derivative_1', 'derivative_2']]
for response, feature in options:
df_spotpy = pd.DataFrame(data=[], columns=cols)
df_opt_filter1 = df_opt[(df_opt['response_label'] == response) &
(df_opt['feature_set'] == feature)]
df_opt_filter1 = df_opt_filter1.sort_values(['objective_f', 'dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'])
df_spotpy['like1'] = list(df_opt_filter1[df_opt_filter1['objective_f'] == 'RMSE']['value'])
df_spotpy['like2'] = list(df_opt_filter1[df_opt_filter1['objective_f'] == 'MAE']['value'])
df_spotpy['like3'] = list(df_opt_filter1[df_opt_filter1['objective_f'] == 'R2']['value'])
df_spotpy['simulation_rmse'] = list(df_opt_filter1[df_opt_filter1['objective_f'] == 'RMSE']['value'])
df_spotpy['simulation_mae'] = list(df_opt_filter1[df_opt_filter1['objective_f'] == 'MAE']['value'])
df_spotpy['simulation_r2'] = list(df_opt_filter1[df_opt_filter1['objective_f'] == 'R2']['value'])
# df_spotpy['simulation_0'] = list(df_opt_filter1[df_opt_filter1['objective_f'] == 'RMSE'].index)
# df_spotpy['simulation_1'] = list(df_opt_filter1[df_opt_filter1['objective_f'] == 'MAE'].index)
# df_spotpy['simulation_2'] = list(df_opt_filter1[df_opt_filter1['objective_f'] == 'R2'].index)
df_opt_filter2 = df_opt_filter1[(df_opt['objective_f'] == 'RMSE')]
df_spotpy['pardir_panels'] = list(df_opt_filter2['dir_panels'])
df_spotpy['parcrop'] = list(df_opt_filter2['crop'])
df_spotpy['parclip'] = list(df_opt_filter2['clip'])
df_spotpy['parsmooth'] = list(df_opt_filter2['smooth'])
df_spotpy['parbin'] = list(df_opt_filter2['bin'])
df_spotpy['parsegment'] = list(df_opt_filter2['segment'])
df_opt_filter2.loc[df_opt_filter2['model_name'] == 'Lasso', 'model_idx'] = 0
df_opt_filter2.loc[df_opt_filter2['model_name'] == 'PLSRegression', 'model_idx'] = 1
df_spotpy['chain'] = list(df_opt_filter2['model_idx'])
df_spotpy_int = df_spotpy.copy()
levels_dict = {}
for param in ['pardir_panels', 'parcrop', 'parclip', 'parsmooth', 'parbin', 'parsegment']:
labels, levels = pd.factorize(df_spotpy[param])
df_spotpy_int.loc[:, param] = labels
levels_dict[param] = list(levels)
levels_dict[param + '_idx'] = list(np.unique(labels))
df_spotpy_int.to_csv(
os.path.join(base_dir_results, 'spotpy',
'results_{0}_{1}.csv'.format(response, feature)),
index=False)
with open(os.path.join(base_dir_results, 'spotpy', 'README_params.txt'), 'w') as f:
for k in levels_dict:
f.write('{0}: {1}\n'.format(str(k), str(levels_dict[k])))
# %% Histogram - get data
import numpy as np
import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import matplotlib.ticker as mtick
base_dir_results = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results_meta'
base_dir_spotpy = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results_meta\spotpy'
df_opt = pd.read_csv(os.path.join(base_dir_results, 'msi_2_n_feats_opt.csv'))
options = [[response, feature, obj] for response in ['biomass_kgha', 'nup_kgha', 'tissue_n_pct']
for feature in ['reflectance', 'derivative_1', 'derivative_2']
for obj in ['MAE', 'RMSE', 'R2']]
units_print = {
'biomass_kgha': r'kg {ha\boldmath$^-1}$',
'nup_kgha': r'kg ha$^{-1}$',
'tissue_n_pct': '%'}
for response, feature, obj in options:
df_opt_filter = df_opt[
(df_opt['response_label'] == response) &
(df_opt['feature_set'] == feature) &
(df_opt['objective_f'] == obj)]
break
# Choose response, feature set, and cost function manually
response = 'nup_kgha'
feature = 'reflectance'
obj = 'RMSE'
df_opt_filter = df_opt[
(df_opt['response_label'] == response) &
(df_opt['feature_set'] == feature) &
(df_opt['objective_f'] == obj)]
sns.set_style("whitegrid")
grids_all = [(r, c) for r in range(2) for c in range(3)]
grids = [(r, c) for r in range(1) for c in range(3)]
# grids_bottom = [(r, c) for r in range(1,2) for c in range(3)]
scenario_options = { # These indicate the legend labels
'dir_panels': {
'name': 'Reflectance panels',
'closest': 'Closest panel',
'all': 'All panels (mean)'},
'crop': {
'name': 'Crop',
'plot_bounds': 'By plot boundary',
'crop_buf': 'Edges cropped'},
'clip': {
'name': 'Clip',
'none': 'No spectral clipping',
'ends': 'Ends clipped',
'all': 'Ends + H2O and O2 absorption'},
'smooth': {
'name': 'Smooth',
'none': 'No spectral smoothing',
'sg-11': 'Savitzky-Golay smoothing'},
'bin': {
'name': 'Bin',
'none': 'No spectral binning',
'sentinel-2a_mimic': 'Spectral "mimic" - Sentinel-2A',
'bin_20nm': 'Spectral "bin" - 20 nm'},
'segment': {
'name': 'Segment',
'none': 'No segmenting',
'ndi_upper_50': 'NDVI > 50th',
'ndi_lower_50': 'NDVI < 50th',
'mcari2_upper_50': 'MCARI2 > 50th',
'mcari2_lower_50': 'MCARI2 < 50th',
'mcari2_upper_90': 'MCARI2 > 90th',
'mcari2_in_50-75': '50th > MCARI2 < 75th',
'mcari2_in_75-95': '75th > MCARI2 < 95th',
'mcari2_upper_90_green_upper_75': 'MCARI2 > 90th; green > 75th'},
}
scenario_options_top = {k: scenario_options[k] for k in ['dir_panels', 'crop', 'clip']}
scenario_options_bottom = {k: scenario_options[k] for k in ['smooth', 'bin', 'segment']}
# %% Curate stats for cumulative density plots
classes_rmse = [14, 15, 16, 17, 18, 19]
classes_n_feats = [0, 5, 10, 15, 20, 25]
array_props = {'models': np.empty([2, len(classes_rmse)])}
cols = ['model_name', 'metric', 'class_min_val', 'proportion']
df_props_model = None
for i_model, model in enumerate(['Lasso', 'PLSRegression']):
df_model = df_opt_filter[df_opt_filter['model_name'] == model]
for i_classes, i_min_val in enumerate(range(len(classes_rmse))):
min_val = classes_rmse[i_min_val]
prop = (len(df_model[df_model['value'].between(min_val, min_val+1)]) / len(df_model)) * 100
array_props['models'][i_model, i_classes] = prop
data = [model, 'rmse', min_val, prop]
if df_props_model is None:
df_props_model = pd.DataFrame([data], columns=cols)
else:
df_props_model = df_props_model.append(pd.DataFrame([data], columns=cols))
min_val2 = classes_n_feats[i_min_val]
prop2 = (len(df_model[df_model['n_feats_opt'].between(min_val2, min_val2+5)]) / len(df_model)) * 100
array_props['models'][i_model, i_classes] = prop2
data2 = [model, 'n_feats_opt', min_val2, prop2]
if df_props_model is None:
df_props_model = pd.DataFrame([data2], columns=cols)
else:
df_props_model = df_props_model.append(pd.DataFrame([data2], columns=cols))
# %% Calculate mean and median of all RMSE values
df_opt_filter['value'].describe()
# %% A1. Plot MSE train and validation score vs number of features
import matplotlib.patches as mpatches
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
from ast import literal_eval
def plot_and_fill_std(df, ax, palette, legend, objective='mae'):
df_wide = df[['feat_n', 'score_train_' + objective, 'score_test_' + objective]].apply(pd.to_numeric).set_index('feat_n')
if df_wide['score_train_' + objective].iloc[1] < 0:
df_wide[['score_train_' + objective, 'score_test_' + objective]] = df_wide[['score_train_' + objective, 'score_test_' + objective]] * -1
x_feats = df_wide.index
ax = sns.lineplot(data=df_wide[['score_train_' + objective, 'score_test_' + objective]], ax=ax, palette=palette, legend=legend)
ax.lines[0].set_linewidth(1)
ax.lines[0].set_linestyle('-')
ax.lines[1].set_linestyle('-')
return ax
def plot_secondary(df, ax, palette, legend, objective='r2'):
df_wide = df[['feat_n', 'score_train_' + objective, 'score_test_' + objective]].apply(pd.to_numeric).set_index('feat_n')
if df_wide['score_train_' + objective].iloc[1] < 0:
df_wide[['score_train_' + objective, 'score_test_' + objective]] = df_wide[['score_train_' + objective, 'score_test_' + objective]] * -1
ax2 = ax.twinx()
ax2 = sns.lineplot(data=df_wide[['score_train_' + objective, 'score_test_' + objective]], ax=ax2, palette=palette, legend=legend)
ax2.lines[0].set_linewidth(1)
ax2.lines[0].set_linestyle('--')
ax2.lines[1].set_linestyle('--')
ax2.grid(False)
return ax, ax2
plt.style.use('seaborn-whitegrid')
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
fontsize = 12
fontcolor = '#464646'
colors = ['#f0bf5d', '#68de78']
palette = sns.color_palette("mako_r", 2)
fig, ax = plt.subplots(1, 1, figsize=(4, 3), dpi=300)
fig.subplots_adjust(
top=0.89,
bottom=0.158,
left=0.16,
right=0.836)
df_las = pd.read_csv(r'G:\BBE\AGROBOT\Shared Work\_migrate\hs_process_results\results\msi_2_results\msi_2_521\nup_kgha\reflectance\testing\msi_2_521_nup_kgha_test-scores-lasso.csv')
d = {}
with open(r'G:\BBE\AGROBOT\Shared Work\_migrate\hs_process_results\results\msi_2_results\msi_2_521\nup_kgha\reflectance\reflectance_README.txt') as f:
for i, l in enumerate(f):
if i >= 6:
band, wl = l.split(': ')
d[int(band)] = float(wl)
feat_n_opt = df_las[df_las['score_test_rmse'] == df_las['score_test_rmse'].min()]['feat_n'].values[0]
feats_opt = literal_eval(df_las[df_las['score_test_rmse'] == df_las['score_test_rmse'].min()]['feats'].values[0])
wls_opt = [d[b] for b in feats_opt]
len(wls_opt) == feat_n_opt
print([round(wl) for wl in wls_opt])
# [399, 405, 417, 516, 571, 682, 705, 721, 723, 735, 764, 781, 811, 815, 824, 826, 848, 850, 856, 863]
objective = 'rmse'
ax1 = plot_and_fill_std(df_las, ax, palette, legend=False, objective=objective)
ax1, ax1b = plot_secondary(df_las, ax1, palette, legend='full', objective='r2')
if objective == 'rmse':
ylabel = r'RMSE (kg ha$^{-1}$)'
ax1.set_ylim([0, 32])
elif objective == 'mae':
ylabel = r'Error (kg ha$^{-1}$)'
ax1.set_ylim([0, 25])
ax1b.set_ylim([0, 1])
ax1.tick_params(labelsize=int(fontsize), colors=fontcolor, labelleft=True)
# t1 = ax1b.set_title('Lasso', fontsize=fontsize*1.1, fontweight='bold', color='white', bbox=dict(facecolor=(0.35,0.35,0.35), edgecolor=(0.35,0.35,0.35)))
ax1.set_ylabel(ylabel, fontsize=fontsize, color=fontcolor)
ax1b.set_ylabel(r'R$^{2}$', fontsize=fontsize, color=fontcolor, rotation=0, labelpad=15)
ax1b.tick_params(labelsize=int(fontsize), colors=fontcolor, labelright=True)
ax1.set_xlabel('Number of features', fontsize=fontsize, color=fontcolor)
ax1.set_xlim([-0.1, df_las['feat_n'].max() + 1])
h1, l1 = ax1b.get_legend_handles_labels()
h1.insert(0, mpatches.Patch(color=palette[1], label='Test set'))
h1.insert(0, mpatches.Patch(color=palette[0], label='Train set'))
l1 = [r'Train set', r'Test set', 'RMSE', r'R$^{2}$']
h1[2].set_linestyle('-')
h1[3].set_linestyle('--')
h1[2].set_linewidth(2)
h1[3].set_linewidth(2)
h1[2].set_color((0.35,0.35,0.35))
h1[3].set_color((0.35,0.35,0.35))
leg = ax1b.legend(h1, l1, loc='upper center',
handletextpad=0.4, ncol=4, columnspacing=1, fontsize=int(fontsize*0.8),
bbox_to_anchor=(0.5, 1.17), frameon=True, framealpha=1,
edgecolor=fontcolor)
for handle, text in zip(leg.legendHandles, leg.get_texts()):
text.set_color(fontcolor)
ax1b.add_artist(leg)
# %% A2a: Lasso vs PLS PDF/boxplot top
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=False, sharey=False, figsize=(8, 3.8),
gridspec_kw={'height_ratios': [2, 6]})
fig.subplots_adjust(
top=1.0,
bottom=0.268,
left=0.076,
right=0.942,
hspace=0.04,
wspace=0.399)
for i, val in enumerate(['value', 'n_feats_opt']):
# break
palette = sns.color_palette('muted', n_colors=len(df_opt_filter['model_name'].unique()))
ax2 = sns.histplot(ax=axes[1][i], data=df_opt_filter, x=val, alpha=0.3, color='#999999')
ax2.set_ylabel('Count', weight='bold', fontsize='large')
ax2.yaxis.set_major_locator(mtick.FixedLocator(ax2.get_yticks()))
ax2.set_yticklabels(['{:.0f}'.format(t) for t in ax2.get_yticks()], weight='bold', fontsize='large')
ax3 = ax2.twinx()
ax3 = sns.kdeplot(ax=ax3, data=df_opt_filter, x=val, hue='model_name',
cut=0, bw_adjust=0.7, common_norm=True, common_grid=False,
label='temp', palette=palette)
if val == 'value':
ax2.yaxis.set_ticks(np.arange(0, 160, 50))
ax2.set_ylim(bottom=0, top=160)
ax2.set_xlim(left=14, right=20)
ax2.set_xlabel('{0} ({1})'.format(obj, units_print[response]),
fontweight='bold', fontsize='large')
else:
ax2.yaxis.set_ticks(np.arange(0, 330, 100))
ax2.set_ylim(bottom=0, top=330)
ax2.set_xlim(left=0, right=31)
ax2.set_xlabel('Feature $\it{n}$ at optimum ' + '{0}'.format(obj),
fontweight='bold', fontsize='large')
# else:
# ax1.set_xlim(left=0, right=20)
# set ticks visible, if using sharex = True. Not needed otherwise
ax2.tick_params(labelbottom=True)
ax2.xaxis.set_major_locator(mtick.FixedLocator(ax3.get_xticks()))
ax2.set_xticklabels(['{:.0f}'.format(t) for t in ax2.get_xticks()],
fontweight='bold', fontsize='large')
step_size = ax2.yaxis.get_ticklocs()[-1] - ax2.yaxis.get_ticklocs()[-2]
extra = (ax2.get_ylim()[-1] - ax2.yaxis.get_ticklocs()[-1]) / step_size
space = ax3.get_ylim()[-1] / (3.0 + extra)
ax3.set_yticks(np.arange(ax3.get_ylim()[0], ax3.get_ylim()[-1]+(space*extra), space))
ax3.grid(False)
ax3.set_yticklabels(ax3.get_yticks(), weight='bold', rotation=90,
horizontalalignment='left', verticalalignment='center',
fontsize='large')
ax3.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax3.set_ylabel('Density (%)', weight='bold', fontsize='large')
h, _ = ax3.get_legend_handles_labels()
h = list(reversed(h))
l2 = ['Lasso', 'Partial Least Squares']
ncol = 2 if len(h) > 4 else 1
fontsize = 8.75 if len(h) > 3 else 'medium'
label_color='#464646'
color_grid = ax3.get_ygridlines()[0].get_color()
leg = ax3.legend(h, l2,
bbox_to_anchor=(0, -0.24, 1, 0), loc='upper left',
mode='expand', ncol=ncol,
# fontsize=fontsize,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
# Finally, add boxplot now knowing the width
box_width = 0.7 if len(h) > 4 else 0.65 if len(h) > 2 else 0.45
ax1 = sns.boxplot(ax=axes[0][i], data=df_opt_filter, x=val, y='model_name',
width=box_width, fliersize=2, linewidth=1, palette=palette)
ax1.set(yticklabels=[], ylabel=None)
ax1.set(xticklabels=[], xlabel=None)
if val == 'value':
ax1.set_xlim(left=14, right=20)
else:
ax1.set_xlim(left=0, right=31)
# %% A2b: Lasso vs PLS ECDF/heatmap bottom
fig, axes = plt.subplots(nrows=3, ncols=2, sharex=False, sharey=False, figsize=(8, 3.8),
gridspec_kw={'height_ratios': [1, 1, 6]})
fig.subplots_adjust(
top=0.994,
bottom=0.274,
left=0.078,
right=0.942,
hspace=0.000,
wspace=0.399)
for i, val in enumerate(['value', 'n_feats_opt']):
# break
ax1 = sns.histplot(ax=axes[2][i], data=df_opt_filter, x=val, common_norm=False, cumulative=True, stat='density', alpha=0.3, color='#999999')
palette=sns.color_palette('muted', n_colors=len(df_opt_filter['model_name'].unique()))
ax2 = sns.ecdfplot(ax=ax1, data=df_opt_filter, x=val, hue='model_name',
label='temp', palette=palette)
ax1.set_ylim(bottom=0, top=1.05)
ax1.yaxis.set_ticks(np.arange(0, 1.05, 0.25))
if val == 'value':
ax1.set_ylabel('Density', weight='bold', fontsize='large')
ax1.set_yticklabels(ax1.get_yticks(), weight='bold', fontsize='large')
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax1.set_xlim(left=14, right=20)
ax1.set_xlabel('{0} ({1})'.format(obj, units_print[response]),
fontweight='bold', fontsize='large')
df_props_model_pivot = df_props_model[df_props_model['metric'] == 'rmse'].pivot('model_name', 'class_min_val', 'proportion')
else:
ax1.set(yticklabels=[], ylabel=None)
ax1.set_ylabel('')
ax1.set_xlim(left=0, right=31)
ax1.set_xlabel('Feature $\it{n}$ at optimum ' + '{0}'.format(obj),
fontweight='bold', fontsize='large')
df_props_model_pivot = df_props_model[df_props_model['metric'] == 'n_feats_opt'].pivot('model_name', 'class_min_val', 'proportion')
ax1.tick_params(labelbottom=True)
ax1.xaxis.set_major_locator(mtick.FixedLocator(ax1.get_xticks()))
ax1.set_xticklabels(['{:.0f}'.format(t) for t in ax1.get_xticks()],
fontweight='bold', fontsize='large')
# legend
h, _ = ax2.get_legend_handles_labels()
h = list(reversed(h))
l2 = ['Lasso', 'Partial Least Squares']
ncol = 2 if len(h) > 4 else 1
fontsize = 8.75 if len(h) > 3 else 'medium'
label_color='#464646'
color_grid = ax2.get_ygridlines()[0].get_color()
leg = ax2.legend(h, l2,
bbox_to_anchor=(0, -0.24, 1, 0), loc='upper left',
mode='expand', ncol=ncol,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
for i_model, model in enumerate(['Lasso', 'PLSRegression']):
ax = sns.heatmap(
ax=axes[i_model][i], data=df_props_model_pivot.loc[[model]],
annot=True, fmt='.1f', annot_kws={'weight': 'bold', 'fontsize': 9.5},
linewidths=4, yticklabels=False,
cbar=False, cmap=sns.light_palette(palette[i_model], as_cmap=True))
for t in ax.texts: t.set_text('') if float(t.get_text()) == 0.0 else t.set_text(t.get_text() + '%')
ax.set_xlabel('')
ax.set_ylabel('')
if val == 'n_feats_opt':
ax.set_xlim(left=0, right=6.2)
# %% A2c: Lasso vs PLS boxplot RMSE
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=False, figsize=(4, 3.5),
gridspec_kw={'height_ratios': [6, 2.5]})
fig.subplots_adjust(
top=0.987,
bottom=0.15,
left=0.148,
right=0.884,
hspace=0.49,
wspace=0.185)
# fig.suptitle('Model accuracy from subjective image processing ({0} features)'.format(feature), fontsize=16)
# for scenario, (row, col) in zip(scenario_options, grids):
ax1 = sns.histplot(ax=axes[0], data=df_opt_filter, x='value', alpha=0.3, color='#999999')
ax1.yaxis.set_ticks(np.arange(0, 160, 50))
ax1.set_ylabel('Count', weight='bold', fontsize='large')
ax1.set_yticklabels(ax1.get_yticks(), weight='bold', fontsize='large')
palette=sns.color_palette('muted', n_colors=len(df_opt_filter['model_name'].unique()))
ax2 = ax1.twinx()
ax2 = sns.kdeplot(ax=ax2, data=df_opt_filter, x='value', hue='model_name',
cut=0, bw_adjust=0.7, common_norm=True, common_grid=False,
label='temp', palette=palette)
step_size = ax1.yaxis.get_ticklocs()[-1] - ax1.yaxis.get_ticklocs()[-2]
extra = (ax1.get_ylim()[-1] - ax1.yaxis.get_ticklocs()[-1]) / step_size
space = ax2.get_ylim()[-1] / (3.0 + extra)
ax2.set_yticks(np.arange(ax2.get_ylim()[0], ax2.get_ylim()[-1]+(space*extra), space))
ax2.grid(False)
ax2.set_yticklabels(ax2.get_yticks(), weight='bold', rotation=90,
horizontalalignment='left', verticalalignment='center',
fontsize='large')
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax2.set_ylabel('Density (%)', weight='bold', fontsize='large')
h, _ = ax2.get_legend_handles_labels()
h = list(reversed(h))
# l1 = list(df_opt_filter['model_name'].unique())
# l2 = [scenario_options['model_name'][uid] for uid in df_opt_filter['model_name'].unique()]
l2 = ['Lasso', 'Partial Least Squares']
width = 0.7 if len(h) > 4 else 0.6 if len(h) > 2 else 0.4
ax3 = sns.boxplot(ax=axes[1], data=df_opt_filter, x='value', y='model_name',
width=width, fliersize=2, linewidth=1, palette=palette)
ax3.set_xlabel('{0} ({1})'.format(obj, units_print[response]),
fontweight='bold', fontsize='large')
# set ticks visible, if using sharex = True. Not needed otherwise
ax3.tick_params(labelbottom=True)
ax3.xaxis.set_major_locator(mtick.FixedLocator(ax3.get_xticks()))
ax3.set_xticklabels(['{:.0f}'.format(t) for t in ax3.get_xticks()],
fontweight='bold', fontsize='large')
ax3.set(yticklabels=[], ylabel=None)
ncol = 2 if len(h) > 4 else 1
fontsize = 8.75 if len(h) > 3 else 'medium'
# fontsize = 'small'
label_color='#464646'
color_grid = ax1.get_ygridlines()[0].get_color()
leg = ax3.legend(h, l2,
bbox_to_anchor=(0, 1, 1, 0), loc='lower left',
mode='expand', ncol=ncol,
# fontsize=fontsize,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
ax2.get_legend().remove()
# %% A2d: Lasso vs PLS boxplot number of features to optimum
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=False, figsize=(4, 3.5),
gridspec_kw={'height_ratios': [6, 2.5]})
fig.subplots_adjust(
top=0.987,
bottom=0.15,
left=0.148,
right=0.884,
hspace=0.49,
wspace=0.185)
# fig.suptitle('Model accuracy from subjective image processing ({0} features)'.format(feature), fontsize=16)
# for scenario, (row, col) in zip(scenario_options, grids):
ax1 = sns.histplot(ax=axes[0], data=df_opt_filter, x='n_feats_opt', alpha=0.3, color='#999999')
ax1.set_ylim(bottom=0, top=330)
ax1.yaxis.set_ticks(np.arange(0, 330, 100))
ax1.set_ylabel('Count', weight='bold', fontsize='large')
ax1.set_yticklabels(ax1.get_yticks(), weight='bold', fontsize='large')
palette=sns.color_palette('muted', n_colors=len(df_opt_filter['model_name'].unique()))
ax2 = ax1.twinx()
ax2 = sns.kdeplot(ax=ax2, data=df_opt_filter, x='n_feats_opt', hue='model_name',
cut=0, bw_adjust=0.7, common_norm=True, common_grid=False,
label='temp', palette=palette)
ax2.set_ylim(bottom=0)
# break
step_size = ax1.yaxis.get_ticklocs()[-1] - ax1.yaxis.get_ticklocs()[-2]
extra = (ax1.get_ylim()[-1] - ax1.yaxis.get_ticklocs()[-1]) / step_size
space = ax2.get_ylim()[-1] / (3.0 + extra)
ax2.set_yticks(np.arange(ax2.get_ylim()[0], ax2.get_ylim()[-1]+(space*extra), space))
ax2.grid(False)
ax2.set_yticklabels(ax2.get_yticks(), weight='bold', rotation=90,
horizontalalignment='left', verticalalignment='center',
fontsize='large')
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax2.set_ylabel('Density (%)', weight='bold', fontsize='large')
h, _ = ax2.get_legend_handles_labels()
h = list(reversed(h))
# l1 = list(df_opt_filter['model_name'].unique())
# l2 = [scenario_options['model_name'][uid] for uid in df_opt_filter['model_name'].unique()]
l2 = ['Lasso', 'Partial Least Squares']
width = 0.7 if len(h) > 4 else 0.6 if len(h) > 2 else 0.4
ax3 = sns.boxplot(ax=axes[1], data=df_opt_filter, x='n_feats_opt', y='model_name',
width=width, fliersize=2, linewidth=1, palette=palette)
ax3.set_xlabel('Feature $\it{n}$ at optimum ' + '{0}'.format(obj),
fontweight='bold', fontsize='large')
# set ticks visible, if using sharex = True. Not needed otherwise
ax3.tick_params(labelbottom=True)
ax3.xaxis.set_major_locator(mtick.FixedLocator(ax3.get_xticks()))
ax3.set_xticklabels(['{:.0f}'.format(t) for t in ax3.get_xticks()],
fontweight='bold', fontsize='large')
ax3.set(yticklabels=[], ylabel=None)
ncol = 2 if len(h) > 4 else 1
fontsize = 8.75 if len(h) > 3 else 'medium'
# fontsize = 'small'
label_color='#464646'
color_grid = ax1.get_ygridlines()[0].get_color()
leg = ax3.legend(h, l2,
bbox_to_anchor=(0, 1, 1, 0), loc='lower left',
mode='expand', ncol=ncol,
# fontsize=fontsize,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
ax2.get_legend().remove()
# %% A3 top: Plot histogram + boxplots n_feats_opt
fig, axes = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=False, figsize=(12.5, 4),
gridspec_kw={'height_ratios': [2, 6]})
fig.subplots_adjust(
top=0.99,
bottom=0.31,
left=0.05,
right=0.96,
hspace=0.04,
wspace=0.185)
for scenario, (row, col) in zip(scenario_options_top, grids):
palette = sns.color_palette('muted', n_colors=len(df_opt_filter[scenario].unique()))
ax1 = sns.histplot(ax=axes[row+1][col], data=df_opt_filter, x='n_feats_opt', alpha=0.3, color='#999999')
ax1.set_ylim(bottom=0, top=330)
ax1.yaxis.set_ticks(np.arange(0, 330, 100))
if col >= 1:
ax1.set(yticklabels=[], ylabel=None)
else:
ax1.set_ylabel('Count', weight='bold', fontsize='large')
ax1.set_yticklabels(ax1.get_yticks(), weight='bold', fontsize='large')
ax2 = ax1.twinx()
ax2 = sns.kdeplot(ax=ax2, data=df_opt_filter, x='n_feats_opt', hue=scenario,
cut=0, bw_adjust=0.7, common_norm=True, common_grid=False,
label='temp', palette=palette)
ax2.set_ylim(bottom=0)
ax1.set_xlabel('Feature $\it{n}$ at optimum ' + '{0}'.format(obj),
fontweight='bold', fontsize='large')
# set ticks visible, if using sharex = True. Not needed otherwise
ax1.tick_params(labelbottom=True)
ax1.xaxis.set_major_locator(mtick.FixedLocator(ax3.get_xticks()))
ax1.set_xticklabels(['{:.0f}'.format(t) for t in ax2.get_xticks()],
fontweight='bold', fontsize='large')
step_size = ax1.yaxis.get_ticklocs()[-1] - ax1.yaxis.get_ticklocs()[-2]
extra = (ax1.get_ylim()[-1] - ax1.yaxis.get_ticklocs()[-1]) / step_size
space = ax2.get_ylim()[-1] / (3.0 + extra)
ax2.set_yticks(np.arange(ax3.get_ylim()[0], ax2.get_ylim()[-1]+(space*extra), space))
ax2.grid(False)
ax2.set_yticklabels(ax2.get_yticks(), weight='bold', rotation=90,
horizontalalignment='left', verticalalignment='center',
fontsize='large')
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax2.set_ylabel('Density (%)', weight='bold', fontsize='large')
l1 = list(df_opt_filter[scenario].unique())
l2 = [scenario_options[scenario][uid] for uid in df_opt_filter[scenario].unique()]
h, l1 = ax2.get_legend_handles_labels()
h = list(reversed(h))
ncol = 2 if len(h) > 4 else 1
fontsize = 8.75 if len(h) > 3 else 'medium'
label_color='#464646'
color_grid = ax1.get_ygridlines()[0].get_color()
leg = ax2.legend(h, l2,
bbox_to_anchor=(0, -0.24, 1, 0), loc='upper left',
mode='expand', ncol=ncol,
# fontsize=fontsize,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
# Finally, add boxplot now knowing the width
box_width = 0.7 if len(h) > 4 else 0.65 if len(h) > 2 else 0.45
ax3 = sns.boxplot(ax=axes[row][col], data=df_opt_filter, x='n_feats_opt', y=scenario,
width=box_width, fliersize=2, linewidth=1, palette=palette)
ax3.set(yticklabels=[], ylabel=None)
# %% A3 bottom: Plot histogram + boxplots n_feats_opt
fig, axes = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=False, figsize=(12.5, 5),
gridspec_kw={'height_ratios': [5, 6]})
fig.subplots_adjust(
top=0.99,
bottom=0.31,
left=0.05,
right=0.96,
hspace=0.04,
wspace=0.185)
for scenario, (row, col) in zip(scenario_options_bottom, grids):
palette = sns.color_palette('muted', n_colors=len(df_opt_filter[scenario].unique()))
ax1 = sns.histplot(ax=axes[row+1][col], data=df_opt_filter, x='n_feats_opt', alpha=0.3, color='#999999')
ax1.set_ylim(bottom=0, top=330)
ax1.yaxis.set_ticks(np.arange(0, 330, 100))
if col >= 1:
ax1.set(yticklabels=[], ylabel=None)
else:
ax1.set_ylabel('Count', weight='bold', fontsize='large')
ax1.set_yticklabels(ax1.get_yticks(), weight='bold', fontsize='large')
ax2 = ax1.twinx()
ax2 = sns.kdeplot(ax=ax2, data=df_opt_filter, x='n_feats_opt', hue=scenario,
cut=0, bw_adjust=0.7, common_norm=True, common_grid=False,
label='temp', palette=palette)
ax2.set_ylim(bottom=0)
ax1.set_xlabel('Feature $\it{n}$ at optimum ' + '{0}'.format(obj),
fontweight='bold', fontsize='large')
# set ticks visible, if using sharex = True. Not needed otherwise
ax1.tick_params(labelbottom=True)
ax1.xaxis.set_major_locator(mtick.FixedLocator(ax3.get_xticks()))
ax1.set_xticklabels(['{:.0f}'.format(t) for t in ax2.get_xticks()],
fontweight='bold', fontsize='large')
step_size = ax1.yaxis.get_ticklocs()[-1] - ax1.yaxis.get_ticklocs()[-2]
extra = (ax1.get_ylim()[-1] - ax1.yaxis.get_ticklocs()[-1]) / step_size
space = ax2.get_ylim()[-1] / (3.0 + extra)
ax2.set_yticks(np.arange(ax3.get_ylim()[0], ax2.get_ylim()[-1]+(space*extra), space))
ax2.grid(False)
ax2.set_yticklabels(ax2.get_yticks(), weight='bold', rotation=90,
horizontalalignment='left', verticalalignment='center',
fontsize='large')
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax2.set_ylabel('Density (%)', weight='bold', fontsize='large')
l1 = list(df_opt_filter[scenario].unique())
l2 = [scenario_options[scenario][uid] for uid in df_opt_filter[scenario].unique()]
h, l1 = ax2.get_legend_handles_labels()
h = list(reversed(h))
ncol = 2 if len(h) > 4 else 1
fontsize = 8.75 if len(h) > 3 else 'medium'
label_color='#464646'
color_grid = ax1.get_ygridlines()[0].get_color()
leg = ax2.legend(h, l2,
bbox_to_anchor=(0, -0.24, 1, 0), loc='upper left',
mode='expand', ncol=ncol,
# fontsize=fontsize,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
# Finally, add boxplot now knowing the width
box_width = 0.7 if len(h) > 4 else 0.65 if len(h) > 2 else 0.45
ax3 = sns.boxplot(ax=axes[row][col], data=df_opt_filter, x='n_feats_opt', y=scenario,
width=box_width, fliersize=2, linewidth=1, palette=palette)
ax3.set(yticklabels=[], ylabel=None)
# %% A0[not used]: ECDF for n_feats_opt
fig, axes = plt.subplots(nrows=2, ncols=3, sharex=False, sharey=False, figsize=(12.5, 6),
gridspec_kw={'height_ratios': [6, 6]})
fig.subplots_adjust(
top=0.998,
bottom=0.251,
left=0.051,
right=0.99,
hspace=0.825,
wspace=0.075)
for scenario, (row, col) in zip(scenario_options, grids_all):
ax1 = sns.histplot(ax=axes[row][col], data=df_opt_filter, x='n_feats_opt', common_norm=False, cumulative=True, stat='density', alpha=0.3, color='#999999')
palette=sns.color_palette('muted', n_colors=len(df_opt_filter[scenario].unique()))
ax2 = sns.ecdfplot(ax=ax1, data=df_opt_filter, x='n_feats_opt', hue=scenario,
label='temp', palette=palette)
ax1.set_ylim(bottom=0, top=1.05)
# ax1.set_xlim(left=14, right=20)
ax1.yaxis.set_ticks(np.arange(0, 1.05, 0.25))
if col >= 1:
ax1.set(yticklabels=[], ylabel=None)
ax1.set_ylabel('')
else:
ax1.set_ylabel('Density', weight='bold', fontsize='large')
ax1.set_yticklabels(ax1.get_yticks(), weight='bold', fontsize='large')
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# legend
h, _ = ax2.get_legend_handles_labels()
h = list(reversed(h))
l2 = [scenario_options[scenario][uid] for uid in df_opt_filter[scenario].unique()]
ncol = 2 if len(h) > 4 else 1
fontsize = 8.75 if len(h) > 3 else 'medium'
label_color='#464646'
color_grid = ax2.get_ygridlines()[0].get_color()
leg = ax2.legend(h, l2,
bbox_to_anchor=(0, -0.3, 1, 0), loc='upper left',
mode='expand', ncol=ncol,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
width = 0.7 if len(h) > 4 else 0.6 if len(h) > 2 else 0.4
# ax3 = sns.boxplot(ax=axes[row*2][col], data=df_opt_filter, x='value', y=scenario,
# width=width, fliersize=2, linewidth=1, palette=palette)
ax1.set_xlabel('Feature $\it{n}$ at optimum ' + '{0}'.format(obj),
fontweight='bold', fontsize='large')
# set ticks visible, if using sharex = True. Not needed otherwise
ax1.tick_params(labelbottom=True)
ax1.xaxis.set_major_locator(mtick.FixedLocator(ax1.get_xticks()))
ax1.set_xticklabels(['{:.0f}'.format(t) for t in ax1.get_xticks()],
fontweight='bold', fontsize='large')
# %% Fig 1: Get/organize NUP ground truth data
# base_dir_nup_data = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results\msi_2_000\nup_kgha'
# df_nup = pd.read_csv(os.path.join(base_dir_nup_data, 'msi_2_000_nup_kgha_data.csv'))
base_dir_nup_data = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results\msi_2_325\nup_kgha'
df_nup = pd.read_csv(os.path.join(base_dir_nup_data, 'msi_2_325_nup_kgha_data.csv'))
subset = ['dataset_id', 'study', 'date', 'plot_id', 'trt', 'rate_n_pp_kgha',
'rate_n_sd_plan_kgha', 'rate_n_total_kgha', 'growth_stage', 'nup_kgha']
df_nup = df_nup[subset]
df_nup_stats = df_nup[['study', 'date', 'nup_kgha']].groupby(['study', 'date']).describe()
df_nup_stats.to_csv(os.path.join(r'F:\nigo0024\Dropbox\UMN\UMN_Publications\2020_sip\data', 'nup_stats.csv'), index=True)
df_nup_stat_total = df_nup['nup_kgha'].describe()
df_nup_stat_total.to_csv(os.path.join(r'F:\nigo0024\Dropbox\UMN\UMN_Publications\2020_sip\data', 'nup_stats_total.csv'), index=True)
# %% Fig 1: NUP histogram
dataset_dict = {
0: 'Waseca whole field - 2019-06-29',
1: 'Waseca whole field - 2019-07-08',
2: 'Waseca whole field - 2019-07-23',
3: 'Waseca small plot - 2019-06-29',
4: 'Waseca small plot - 2019-07-09',
5: 'Waseca small plot - 2019-07-23',
6: 'Wells small plot - 2018-06-28',
7: 'Wells small plot - 2019-07-08'}
fig, (ax_boxes, ax_box, ax1) = plt.subplots(3, sharex=True, figsize=(7, 5.5), gridspec_kw={"height_ratios": (5, 0.75, 5.25)})
fig.subplots_adjust(
top=0.808,
bottom=0.111,
left=0.120,
right=0.992,
hspace=0.05,
wspace=0.2)
palette = sns.color_palette('viridis', n_colors=len(df_nup['dataset_id'].unique())+5)
if len(df_nup['dataset_id'].unique()) == 7:
hue_order = [6, 7, 3, 4, 0, 1, 2]
ds_list = [0, 1, 2, 3, 4, 6, 7]
else:
hue_order = [6, 7, 3, 4, 5, 0, 1, 2]
n_order = [143, 144, 24, 24, 24, 16, 16, 16]
ds_list = [0, 1, 2, 3, 4, 5, 6, 7]
growthstage_ticks = [str(df_nup[df_nup['dataset_id'] == i]['growth_stage'].unique()[0]) + ' ({0})'.format(j) for i, j in zip(hue_order, n_order)]
growthstage_ticks[1] = 'V8 (144)'
# growthstage_ticks = [str(df_nup[df_nup['dataset_id'] == i]['growth_stage'].unique()[0]) + ' ($\it{n}$=' + '{0})'.format(j) for i, j in zip(hue_order, n_order)]
# growthstage_ticks[1] = 'V8 ($\it{n}$=144)'
ax_boxes = sns.boxplot(ax=ax_boxes, data=df_nup.replace({'dataset_id': dataset_dict}),
x='nup_kgha', y='dataset_id',
order=[dataset_dict[i] for i in hue_order],
# order=hue_order,
width=0.7, fliersize=2, linewidth=1, palette=palette[2:-3])
ax_boxes.set_ylabel('')
ax_boxes.set_yticklabels(growthstage_ticks, fontweight='bold', fontsize='medium')
ax_boxes.set(xticklabels=[], xlabel=None)
for i, p in enumerate(ax_boxes.artists):
p.set_alpha(0.8)
p.set_edgecolor('#555555')
for j in range(i*6, i*6+6):
line = ax_boxes.lines[j]
line.set_color('#555555')
line.set_mfc('#555555')
line.set_mec('#555555')
ax_box = sns.boxplot(x=df_nup['nup_kgha'], ax=ax_box, color='#C0C0C0', width=0.7, linewidth=1, fliersize=3)
for i, p in enumerate(ax_box.artists):
p.set_alpha(0.8)
p.set_edgecolor('#555555')
for j in range(i*6, i*6+6):
line = ax_box.lines[j]
line.set_color('#555555')
line.set_mfc('#555555')
line.set_mec('#555555')
ax_box.set(xlabel='')
ax_box.set_yticklabels(['Total (407)'], fontweight='bold', fontsize=10.5)
ax1 = sns.histplot(ax=ax1, data=df_nup, x='nup_kgha', binwidth=3, multiple='stack', hue='dataset_id', hue_order=hue_order, palette=palette[2:-3], label='temp')
ax1.set_xlim(left=0)
ax1.set_xlabel('Nitrogen uptake ({0})'.format(units_print[response]), weight='bold', fontsize='x-large')
ax1.xaxis.set_major_locator(mtick.FixedLocator(ax1.get_xticks()))
ax1.set_xticklabels(['{:.0f}'.format(t) for t in ax1.get_xticks()],
fontweight='bold', fontsize='x-large')
ax1.set_ylabel('Count', weight='bold', fontsize='x-large')
ax1.yaxis.set_major_locator(mtick.FixedLocator(ax1.get_yticks()))
ax1.set_yticklabels(['{:.0f}'.format(t) for t in ax1.get_yticks()],
fontweight='bold', fontsize='x-large')
h, l1 = ax1.get_legend_handles_labels()
l2 = [dataset_dict[ds_id] for ds_id in hue_order]
h = list(reversed(h))
ncol = 2
fontsize = 12
label_color='#464646'
color_grid = ax1.get_ygridlines()[0].get_color()
leg = ax1.legend(h, l2,
bbox_to_anchor=(0, 2.15, 1, 0), loc='lower left',
mode='expand', ncol=ncol,
# fontsize=fontsize,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
# %% Fig 2b: Spectral mimic demo using a single pixel from a Wells 2018 image
import os
from hs_process import hsio
from hs_process import spec_mod
from ast import literal_eval
from matplotlib.patches import Polygon
data_dir = r'F:\\nigo0024\Documents\hs_process_demo'
fname_hdr = os.path.join(data_dir, 'Wells_rep2_20180628_16h56m_pika_gige_7-Radiance Conversion-Georectify Airborne Datacube-Convert Radiance Cube to Reflectance from Measured Reference Spectrum.bip.hdr')
io = hsio()
io.read_cube(fname_hdr)
my_spec_mod = spec_mod(io.spyfile)
# Use spec_mod.spectral_mimic to mimic the Sentinel-2A spectral response function.
array_s2a, metadata_s2a = my_spec_mod.spectral_mimic(sensor='sentinel-2a', center_wl='weighted')
array_bin, metadata_bin = my_spec_mod.spectral_resample(bandwidth=20)
# Plot the mean spectral response of the hyperspectral image to that of the
# mimicked Sentinel-2A image bands (mean calculated across the entire image).
fwhm_s2a = [float(i) for i in metadata_s2a['fwhm'][1:-1].split(', ')]
spy_hs = my_spec_mod.spyfile.open_memmap() # datacube before smoothing
meta_bands = list(io.tools.meta_bands.values())
meta_bands_s2a = sorted([float(i) for i in literal_eval(metadata_s2a['wavelength'])])
meta_bands_bin20 = sorted([float(i) for i in literal_eval(metadata_bin['wavelength'])])
# %% Fig 2b: Plot the spectral mimic demo
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(9, 3))
fig.subplots_adjust(
top=0.97,
bottom=0.14,
left=0.056,
right=0.99,
hspace=0.2,
wspace=0.178)
bands = [meta_bands_s2a, meta_bands_bin20]
arrays = [array_s2a, array_bin]
fwhms = [fwhm_s2a, [20]*(len(meta_bands_bin20)-1) + [(meta_bands_bin20[-1] - meta_bands_bin20[-2]) / 2]]
labels = ['Spectral "mimic" – Sentinel-2A',
'Spectral "bin" - 20 nm']
for i, ax in enumerate(axes):
if i == 0:
zorders = [1, 1, 1, 1, 1, 1, 2, 1, 2]
alpha=0.7
ms=7
else:
zorders = [2]*len(bands[i])
alpha=0.5
ms=6
ax1 = sns.lineplot(ax=ax, x=meta_bands, y=spy_hs[200][800]*100, label='Hyperspectral', linewidth=2, color=palette[2], zorder=0)
ax2 = sns.lineplot(ax=ax1, x=bands[i], y=arrays[i][200][800]*100,
label=labels[i], linestyle='None',
marker='o', ms=ms, color=palette[0+i], zorder=2)
ax1.set_ylim([0, 40])
wedge_height = 1.5
for wl, ref_pct, fwhm, zorder in zip(bands[i], arrays[i][200][800]*100, fwhms[i], zorders):
wl_min = wl - (fwhm/2)
wl_max = wl + (fwhm/2)
verts = [(wl_min, 0), *zip([wl_min, wl, wl_max], [ref_pct-wedge_height, ref_pct, ref_pct-wedge_height]), (wl_max, 0)]
poly = Polygon(verts, facecolor='0.9', edgecolor='0.5', alpha=alpha, zorder=zorder)
ax2.add_patch(poly)
ax1.set_xlabel('Wavelength (nm)', weight='bold', fontsize='large')
ax1.set_ylabel('Reflectance (%)', weight='bold', fontsize='large')
ax1.xaxis.set_major_locator(mtick.FixedLocator(ax1.get_xticks()))
ax1.set_xticklabels(['{:.0f}'.format(t) for t in ax1.get_xticks()],
fontweight='bold', fontsize='large')
ax1.yaxis.set_major_locator(mtick.FixedLocator(ax1.get_yticks()))
ax1.set_yticklabels(['{:.0f}'.format(t) for t in ax1.get_yticks()],
fontweight='bold', fontsize='large')
h, l1 = ax1.get_legend_handles_labels()
leg = ax1.legend(h, l1,
framealpha=0.85,
handletextpad=0.5, # spacing between handle and label
frameon=True,
edgecolor=ax1.get_ygridlines()[0].get_color(),
prop={'weight':'bold',
'size': 'medium'})
# %% Fig 3: Plot histogram + boxplots RMSE top
fig, axes = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=False, figsize=(12.5, 4),
gridspec_kw={'height_ratios': [2, 6]})
fig.subplots_adjust(
top=0.99,
bottom=0.31,
left=0.05,
right=0.96,
hspace=0.04,
wspace=0.185)
for scenario, (row, col) in zip(scenario_options_top, grids):
palette = sns.color_palette('muted', n_colors=len(df_opt_filter[scenario].unique()))
ax2 = sns.histplot(ax=axes[row+1][col], data=df_opt_filter, x='value', alpha=0.3, color='#999999')
ax2.yaxis.set_ticks(np.arange(0, 160, 50))
if col >= 1:
ax2.set(yticklabels=[], ylabel=None)
else:
ax2.set_ylabel('Count', weight='bold', fontsize='large')
ax2.set_yticklabels(ax2.get_yticks(), weight='bold', fontsize='large')
ax3 = ax2.twinx()
ax3 = sns.kdeplot(ax=ax3, data=df_opt_filter, x='value', hue=scenario,
cut=0, bw_adjust=0.7, common_norm=True, common_grid=False,
label='temp', palette=palette)
ax2.set_xlabel('{0} ({1})'.format(obj, units_print[response]),
fontweight='bold', fontsize='large')
# set ticks visible, if using sharex = True. Not needed otherwise
ax2.tick_params(labelbottom=True)
ax2.xaxis.set_major_locator(mtick.FixedLocator(ax3.get_xticks()))
ax2.set_xticklabels(['{:.0f}'.format(t) for t in ax2.get_xticks()],
fontweight='bold', fontsize='large')
step_size = ax2.yaxis.get_ticklocs()[-1] - ax2.yaxis.get_ticklocs()[-2]
extra = (ax2.get_ylim()[-1] - ax2.yaxis.get_ticklocs()[-1]) / step_size
space = ax3.get_ylim()[-1] / (3.0 + extra)
ax3.set_yticks(np.arange(ax3.get_ylim()[0], ax3.get_ylim()[-1]+(space*extra), space))
ax3.grid(False)
ax3.set_yticklabels(ax3.get_yticks(), weight='bold', rotation=90,
horizontalalignment='left', verticalalignment='center',
fontsize='large')
ax3.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax3.set_ylabel('Density (%)', weight='bold', fontsize='large')
l1 = list(df_opt_filter[scenario].unique())
l2 = [scenario_options[scenario][uid] for uid in df_opt_filter[scenario].unique()]
h, l1 = ax3.get_legend_handles_labels()
h = list(reversed(h))
ncol = 2 if len(h) > 4 else 1
fontsize = 8.75 if len(h) > 3 else 'medium'
label_color='#464646'
color_grid = ax3.get_ygridlines()[0].get_color()
leg = ax3.legend(h, l2,
bbox_to_anchor=(0, -0.24, 1, 0), loc='upper left',
mode='expand', ncol=ncol,
# fontsize=fontsize,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
# Finally, add boxplot now knowing the width
box_width = 0.7 if len(h) > 4 else 0.65 if len(h) > 2 else 0.45
ax1 = sns.boxplot(ax=axes[row][col], data=df_opt_filter, x='value', y=scenario,
width=box_width, fliersize=2, linewidth=1, palette=palette)
ax1.set(yticklabels=[], ylabel=None)
ax1.set_xlim(left=14, right=20)
# %% Fig 3: Plot histogram + boxplots RMSE bottom
fig, axes = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=False, figsize=(12.5, 5),
gridspec_kw={'height_ratios': [5, 6]})
fig.subplots_adjust(
top=0.99,
bottom=0.31,
left=0.05,
right=0.96,
hspace=0.04,
wspace=0.185)
for scenario, (row, col) in zip(scenario_options_bottom, grids):
palette = sns.color_palette('muted', n_colors=len(df_opt_filter[scenario].unique()))
ax2 = sns.histplot(ax=axes[row+1][col], data=df_opt_filter, x='value', alpha=0.3, color='#999999')
ax2.yaxis.set_ticks(np.arange(0, 160, 50))
if col >= 1:
ax2.set(yticklabels=[], ylabel=None)
else:
ax2.set_ylabel('Count', weight='bold', fontsize='large')
ax2.set_yticklabels(ax2.get_yticks(), weight='bold', fontsize='large')
ax3 = ax2.twinx()
ax3 = sns.kdeplot(ax=ax3, data=df_opt_filter, x='value', hue=scenario,
cut=0, bw_adjust=0.7, common_norm=True, common_grid=False,
label='temp', palette=palette)
ax2.set_xlabel('{0} ({1})'.format(obj, units_print[response]),
fontweight='bold', fontsize='large')
# set ticks visible, if using sharex = True. Not needed otherwise
ax2.tick_params(labelbottom=True)
ax2.xaxis.set_major_locator(mtick.FixedLocator(ax3.get_xticks()))
ax2.set_xticklabels(['{:.0f}'.format(t) for t in ax2.get_xticks()],
fontweight='bold', fontsize='large')
step_size = ax2.yaxis.get_ticklocs()[-1] - ax2.yaxis.get_ticklocs()[-2]
extra = (ax2.get_ylim()[-1] - ax2.yaxis.get_ticklocs()[-1]) / step_size
space = ax3.get_ylim()[-1] / (3.0 + extra)
ax3.set_yticks(np.arange(ax3.get_ylim()[0], ax3.get_ylim()[-1]+(space*extra), space))
ax3.grid(False)
ax3.set_yticklabels(ax3.get_yticks(), weight='bold', rotation=90,
horizontalalignment='left', verticalalignment='center',
fontsize='large')
ax3.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax3.set_ylabel('Density (%)', weight='bold', fontsize='large')
l1 = list(df_opt_filter[scenario].unique())
l2 = [scenario_options[scenario][uid] for uid in df_opt_filter[scenario].unique()]
h, l1 = ax3.get_legend_handles_labels()
h = list(reversed(h))
ncol = 2 if len(h) > 4 else 1
fontsize = 8.75 if len(h) > 3 else 'medium'
label_color='#464646'
color_grid = ax3.get_ygridlines()[0].get_color()
leg = ax3.legend(h, l2,
bbox_to_anchor=(0, -0.28, 1, 0), loc='upper left',
mode='expand', ncol=ncol,
# fontsize=fontsize,
framealpha=0.85,
handletextpad=0.1, # spacing between handle and label
columnspacing=0.5,
frameon=True,
edgecolor=color_grid,
prop={'weight':'bold',
'size': fontsize})
# Finally, add boxplot now knowing the width
box_width = 0.7 if len(h) > 4 else 0.65 if len(h) > 2 else 0.45
ax1 = sns.boxplot(ax=axes[row][col], data=df_opt_filter, x='value', y=scenario,
width=box_width, fliersize=2, linewidth=1, palette=palette)
ax1.set(yticklabels=[], ylabel=None)
ax1.set_xlim(left=14, right=20)
# %% Fig 4: SALib functions
import numpy as np
import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import rcParams
# seed = np.random.randint(0, 9999)
seed = 7818
def SALib_load_results():
base_dir_salib = r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\msi_2_results_meta\salib'
df = pd.read_csv(os.path.join(os.path.dirname(base_dir_salib), 'spotpy', 'results_nup_kgha_reflectance.csv'))
Y = df['simulation_rmse'].to_numpy()
df.rename(columns={'pardir_panels': 0,
'parcrop': 1,
'parclip': 2,
'parsmooth': 3,
'parbin': 4,
'parsegment': 5},
inplace=True)
return df, Y
def SALib_set_Y(param_values, df):
Y = np.zeros(len(param_values))
df['used'] = False
for i, row in enumerate(param_values):
df_filter = df[(df[0] == round(row[0])) &
(df[1] == round(row[1])) &
(df[2] == round(row[2])) &
(df[3] == round(row[3])) &
(df[4] == round(row[4])) &
(df[5] == round(row[5]))]
if df.loc[df_filter.index[0], 'used'] == True:
if df.loc[df_filter.index[1], 'used'] == True: # get first one
Y[i] = df.loc[df_filter.index[0], 'simulation_rmse']
else: # get second one
Y[i] = df.loc[df_filter.index[0], 'simulation_rmse']
df.loc[df_filter.index[1], 'used'] = True
else:
Y[i] = df.loc[df_filter.index[0], 'simulation_rmse']
df.loc[df_filter.index[0], 'used'] = True
print('Number of observations used: {0}'.format(len(df[df['used'] == True])))
print('Number of observations NOT used: {0}'.format(len(df[df['used'] == False])))
return df, Y
def SALib_get_problem(as_int=False):
if as_int == True:
problem = {
'num_vars': 6,
'names': ['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'],
'bounds': [[0, 2],
[0, 2],
[0, 3],
[0, 2],
[0, 3],
[0, 9]]}
else:
problem = {
'num_vars': 6,
'names': ['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'],
'bounds': [[0, 1],
[0, 1],
[0, 2],
[0, 1],
[0, 2],
[0, 8]]}
return problem
scenario_options = { # These indicate the legend labels
'dir_panels': {
'name': 'Reflectance panels',
'closest': 'Closest panel',
'all': 'All panels (mean)'},
'crop': {
'name': 'Crop',
'plot_bounds': 'By plot boundary',
'crop_buf': 'Edges cropped'},
'clip': {
'name': 'clip',
'none': 'No spectral clipping',
'ends': 'Ends clipped',
'all': 'Ends + H2O and O2 absorption'},
'smooth': {
'name': 'Smooth',
'none': 'No spectral smoothing',
'sg-11': 'Savitzky-Golay smoothing'},
'bin': {
'name': 'Bin',
'none': 'No spectral binning',
'sentinel-2a_mimic': 'Spectral "mimic" - Sentinel-2A',
'bin_20nm': 'Spectral "bin" - 20 nm'},
'segment': {
'name': 'Segment',
'none': 'No segmenting',
'ndi_upper_50': 'NDVI > 50th',
'ndi_lower_50': 'NDVI < 50th',
'mcari2_upper_50': 'MCARI2 > 50th',
'mcari2_lower_50': 'MCARI2 < 50th',
'mcari2_upper_90': 'MCARI2 > 90th',
'mcari2_in_50-75': '50th > MCARI2 < 75th',
'mcari2_in_75-95': '75th > MCARI2 < 95th',
'mcari2_upper_90_green_upper_75': 'MCARI2 > 90th; green > 75th'},
}
def get_df_results(Si, scenario_options, obj='rmse'):
df_results = None
for k, v in Si.items():
sa_dict = {}
sa_dict['step'] = list(scenario_options.keys())
sa_dict['obj'] = [obj] * len(v)
sa_dict['order'] = [k] * len(v)
sa_dict['sensitivity_idx'] = v
df_temp = pd.DataFrame.from_dict(sa_dict)
if df_results is None:
df_results = df_temp.copy()
else:
df_results = df_results.append(df_temp).reset_index(drop=True)
return df_results
def plot_SA_bar(df_sa, ax1_str='S1', ax2_str='ST',
ax1_title='First order', ax2_title='Total order',
ylabel_str='Sensitivity Index',
ax1_ylim=[0, 0.4], ax2_ylim=[0, 0.8]):
# rcParams.update({'errorbar.capsize': 4})
fig, axes = plt.subplots(nrows=1, ncols=2, sharex=False, sharey=False, figsize=(8, 4))
fig.subplots_adjust(
top=0.904,
bottom=0.286,
left=0.128,
right=0.981,
hspace=0.2,
wspace=0.278)
groups1 = df_sa[df_sa['order'] == ax1_str].groupby('step').sum()
groups2 = df_sa[df_sa['order'] == ax2_str].groupby('step').sum()
pal = sns.color_palette('Blues', len(groups1)+4)
rank1 = groups1['sensitivity_idx'].sort_values().argsort().reindex(['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'])
rank2 = groups2['sensitivity_idx'].sort_values().argsort().reindex(['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'])
ax1 = sns.barplot(x='step', y='sensitivity_idx', data=df_sa[df_sa['order'] == ax1_str],
# yerr=Si['S1_conf'], ecolor='#333333',
ax=axes[0], palette=np.array(pal[4:])[rank1])
ax2 = sns.barplot(x='step', y='sensitivity_idx', data=df_sa[df_sa['order'] == ax2_str],
# yerr=Si['ST_conf'], ecolor='#333333',
ax=axes[1], palette=np.array(pal[4:])[rank2])
ax1.set_ylim(ax1_ylim[0], ax1_ylim[1])
ax2.set_ylim(ax2_ylim[0], ax2_ylim[1])
ax1.yaxis.set_major_locator(plt.MaxNLocator(4))
ax2.yaxis.set_major_locator(plt.MaxNLocator(5))
labels_tick = ['Reference panels', 'Crop', 'Clip', 'Smooth', 'Bin', 'Segment']
for ax in [ax1, ax2]:
ax.set_xlabel('', weight='bold', fontsize='large')
ax.set_ylabel(ylabel_str, weight='bold', fontsize='large')
ax.set_xticklabels(labels_tick, weight='bold', fontsize='medium')
ax.set_yticklabels(['{0:.2f}'.format(x) for x in ax.get_yticks().tolist()], weight='bold', fontsize='medium')
plt.setp(ax.get_xticklabels(), rotation=35, ha='right',
rotation_mode='anchor')
ax1.set_title(ax1_title, weight='bold', fontsize='x-large')
ax2.set_title(ax2_title, weight='bold', fontsize='x-large')
return fig, axes
# %% Fig 4: SALib SA FAST
from SALib.sample import fast_sampler
from SALib.analyze import fast
df, Y_file = SALib_load_results()
problem = SALib_get_problem()
param_values = fast_sampler.sample(problem, 1500, M=8, seed=seed)
df_FAST, Y = SALib_set_Y(param_values, df)
Si_FAST = fast.analyze(problem, Y, M=8, print_to_console=False, seed=seed)
df_sa_fast = get_df_results(Si_FAST, scenario_options, obj='rmse')
# %% Fig 4: SALib SA Sobel
from SALib.sample import saltelli
from SALib.analyze import sobol
df, Y = SALib_load_results()
param_values = saltelli.sample(problem, 400, calc_second_order=False, seed=seed)
param_values_round = param_values.round()
# param_values = saltelli.sample(problem, 162, calc_second_order=False, seed=seed).astype(int)
_, Y = SALib_set_Y(param_values_round, df)
problem = SALib_get_problem(as_int=False)
Si_sobol = sobol.analyze(problem, Y, calc_second_order=False, print_to_console=False, seed=seed)
# param_values_file = df[[0, 1, 2, 3, 4, 5]].to_numpy()
df_sa_sobol = get_df_results(Si_sobol, scenario_options, obj='rmse')
# df_sa_rmse = df_sa[df_sa['obj'] == 'rmse']
# %% Fig 4: Plot FAST, Sobol, and ranking for RMSE
def plot_FAST_Sobol_SA(df_sa_fast, df_sa_sobol,
ylabel_str='Sensitivity Index',
ax1_ylim=[0, 0.32], ax2_ylim=[0, 0.6]):
# rcParams.update({'errorbar.capsize': 4})
colors = sns.color_palette(['#a8a8a8', '#dcc457', '#57d4dc']) # grey, gold, and cyan
df_sa_fast['sa_type'] = 'fast'
df_sa_sobol['sa_type'] = 'sobol'
grp_fast_s1 = df_sa_fast[df_sa_fast['order'] == 'S1'].groupby('step').sum()
grp_fast_st = df_sa_fast[df_sa_fast['order'] == 'ST'].groupby('step').sum()
grp_sobol_s1 = df_sa_sobol[df_sa_sobol['order'] == 'S1'].groupby('step').sum()
grp_sobol_st = df_sa_sobol[df_sa_sobol['order'] == 'ST'].groupby('step').sum()
df_sa_fast_filter = df_sa_fast[df_sa_fast.order.isin(['S1','ST'])]
df_sa = df_sa_fast_filter.append(df_sa_sobol[df_sa_sobol.order.isin(['S1','ST'])]).reset_index(drop=True)
fig, axes = plt.subplots(nrows=1, ncols=2, sharex=False, sharey=False, figsize=(7.97, 4))
fig.subplots_adjust(
top=0.904,
bottom=0.286,
left=0.128,
right=0.981,
hspace=0.2,
wspace=0.278)
ax1 = sns.barplot(x='step', y='sensitivity_idx', data=df_sa[df_sa['order'] == 'S1'],
hue='sa_type', ax=axes[0])
ax2 = sns.barplot(x='step', y='sensitivity_idx', data=df_sa[df_sa['order'] == 'ST'],
hue='sa_type', ax=axes[1])
# pal_fast = sns.color_palette('Blues', len(grp_fast_s1)+8)
# pal_sobol = sns.color_palette('YlOrBr', len(grp_sobol_s1)+16)
pal_fast = sns.light_palette('#a8a8a8', len(grp_fast_s1)+3)
pal_sobol = sns.light_palette('#597DBF', len(grp_sobol_s1)+3) # blue
# pal_fast = sns.light_palette('#dcc457', as_cmap=True)
# pal_sobol = sns.light_palette('#57d4dc', as_cmap=True)
rank_fast_s1 = grp_fast_s1['sensitivity_idx'].sort_values().argsort().reindex(['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'])
rank_fast_st = grp_fast_st['sensitivity_idx'].sort_values().argsort().reindex(['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'])
rank_sobol_s1 = grp_sobol_s1['sensitivity_idx'].sort_values().argsort().reindex(['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'])
rank_sobol_st = grp_sobol_st['sensitivity_idx'].sort_values().argsort().reindex(['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'])
# pal_fast_s1 = np.array(pal_fast[6:])[rank_fast_s1]
# pal_fast_st = np.array(pal_fast[6:])[rank_fast_st]
# pal_sobol_s1 = np.array(pal_sobol[7:])[rank_sobol_s1]
# pal_sobol_st = np.array(pal_sobol[7:])[rank_sobol_st]
pal_fast_s1 = np.array(pal_fast[3:])[rank_fast_s1]
pal_fast_st = np.array(pal_fast[3:])[rank_fast_st]
pal_sobol_s1 = np.array(pal_sobol[3:])[rank_sobol_s1]
pal_sobol_st = np.array(pal_sobol[3:])[rank_sobol_st]
for i, (p1, p2) in enumerate(zip(ax1.patches, ax2.patches)):
if i <= 5: # fast
p1.set_color(pal_fast_s1[i])
p2.set_color(pal_fast_st[i])
else: # sobol
p1.set_color(pal_sobol_s1[i-6])
p2.set_color(pal_sobol_st[i-6])
ax1.set_ylim(ax1_ylim[0], ax1_ylim[1])
ax2.set_ylim(ax2_ylim[0], ax2_ylim[1])
ax1.yaxis.set_major_locator(plt.MaxNLocator(4))
ax2.yaxis.set_major_locator(plt.MaxNLocator(5))
labels_tick = ['Reference panels', 'Crop', 'Clip', 'Smooth', 'Bin', 'Segment']
h, l = ax1.get_legend_handles_labels()
for ax in [ax1, ax2]:
ax.legend(handles=h, labels=['FAST', 'Sobol'], prop={'weight': 'bold'})
# ax.legend(handles=label_list[0], labels=label_list[1])
# labels_leg = ax.get_legend().get_texts()
# labels_leg[0].set_text('FAST')
# labels_leg[1].set_text('Sobol')
ax.set_xlabel('', weight='bold', fontsize='large')
ax.set_ylabel(ylabel_str, weight='bold', fontsize='large')
ax.set_xticklabels(labels_tick, weight='bold', fontsize='medium')
ax.set_yticklabels(['{0:.2f}'.format(x) for x in ax.get_yticks().tolist()], weight='bold', fontsize='medium')
plt.setp(ax.get_xticklabels(), rotation=35, ha='right',
rotation_mode='anchor')
ax2.get_legend().remove()
ax1.set_title('First order', weight='bold', fontsize='x-large')
ax2.set_title('Total order', weight='bold', fontsize='x-large')
return fig, axes
# %% Fig 4: Plot FAST, Sobol, and ranking for RMSE and modify
fig, axes = plot_FAST_Sobol_SA(df_sa_fast, df_sa_sobol,
ylabel_str='Sensitivity Index',
ax1_ylim=[0, 0.32], ax2_ylim=[0, 0.6])
# %% Fig 4b: Step importance via stepwise regression
df_stepwise = pd.read_csv(r'G:\BBE\AGROBOT\Shared Work\hs_process_results\results\var_importance_stepwise_all.csv')
df_stepwise['Variable Importance'] = df_stepwise['Variable Importance'] * 100
grp_res_var = df_stepwise[df_stepwise['method'] == 'residual_var'].groupby('step').sum()
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=False, sharey=False, figsize=(3.5, 3.5))
fig.subplots_adjust(
top=0.992,
bottom=0.283,
left=0.259,
right=0.99,
hspace=0.2,
wspace=0.2)
ax1 = sns.barplot(x='step', y='Variable Importance', data=df_stepwise[df_stepwise['method'] == 'residual_var'],
order=['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'], ax=ax)
pal_res_var = sns.light_palette('#597DBF', len(grp_res_var)+3) # blue
rank_res_var = grp_res_var['Variable Importance'].sort_values().argsort().reindex(['dir_panels', 'crop', 'clip', 'smooth', 'bin', 'segment'])
pal_res_var = np.array(pal_res_var[3:])[rank_res_var]
for i, p1 in enumerate(ax.patches):
p1.set_color(pal_res_var[i])
# ax.set_ylim(ax_ylim[0], ax_ylim[1])
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
labels_tick = ['Reference panels', 'Crop', 'Clip', 'Smooth', 'Bin', 'Segment']
h, l = ax.get_legend_handles_labels()
ax.set_xlabel('', weight='bold', fontsize='large')
ax.set_ylabel('Variable Importance (%)', weight='bold', fontsize='large')
ax.set_xticklabels(labels_tick, weight='bold', fontsize='medium')
ax.set_yticklabels(['{0:.0f}'.format(x) for x in ax.get_yticks().tolist()], weight='bold', fontsize='medium')
# ax.set_yticklabels(['{0:.2f}'.format(x) for x in mtick.FixedLocator(ax.get_yticks()).tolist()], weight='bold', fontsize='medium')
# ax.yaxis.set_major_locator(mtick.FixedLocator(ax.get_yticks()))
plt.setp(ax.get_xticklabels(), rotation=35, ha='right',
rotation_mode='anchor')
# ax.get_legend().remove()
# %% Fig 5: Curate stats for cumulative density plots
classes = [14, 15, 16, 17, 18, 19]
array_props = {step: np.empty([len(scenario_options[step]), len(classes)]) for step in scenario_options}
array_props['segment'] = np.delete(array_props['segment'], (-1), axis=0)
cols = ['step', 'scenario', 'class_min_val', 'proportion']
df_props = None
for i_steps, step in enumerate(scenario_options):
for i_scenarios, scenario in enumerate(scenario_options[step]):
if scenario == 'name':
continue
df_scenario = df_opt_filter[df_opt_filter[step] == scenario]
for i_classes, min_val in enumerate(classes):
prop = (len(df_scenario[df_scenario['value'].between(min_val, min_val+1)]) / len(df_scenario)) * 100
array_props[step][i_scenarios-1, i_classes] = prop
data = [step, scenario, min_val, prop]
if df_props is None:
df_props = pd.DataFrame([data], columns=cols)
else:
df_props = df_props.append(
|
pd.DataFrame([data], columns=cols)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 20 11:13:50 2020
@author: github.com/sahandv
"""
import gc
import pandas as pd
import numpy as np
# from gensim.test.utils import common_texts
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize
from tqdm import tqdm
from itertools import chain
from scipy import spatial,sparse,sign
step = 6
from_data = step*30000
to_data = from_data+30000
dir_root = '/home/sahand/GoogleDrive/Data/'
# dir_root = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/'
texts = pd.read_csv(dir_root+'Corpus/Scopus new/clean/abstract_title method_b_3')['abstract'].values.tolist()[from_data:to_data]
# texts_b = pd.read_csv(dir_root+'Corpus/Dimensions All/clean/abstract_title method_b_3')['abstract'].values.tolist()[from_data:to_data]
# texts_b = texts+texts_b
# # shared_tags = pd.read_csv(dir_root+'Corpus/cora-classify/cora/embeddings/single_component_small_18k/n2v 300-70-20 p1q05 DEC 500, 1000, 1000, 500, 10 k10 labels - 0')['label'].values.tolist()
# # =============================================================================
# # Train Model
# # =============================================================================
# documents = [TaggedDocument(word_tokenize(doc.lower()), [i]) for i, doc in enumerate(texts_b)]
# model = Doc2Vec(documents, vector_size=300, window=10, min_count=1, dm=1, workers=15, epochs=40)
# fname = dir_root+'Corpus/Dimensions All/models/dim-scop doc2vec 300D dm=1 window=10 b3'
# model.save(fname)
# # shared_tags = pd.read_csv(dir_root+'Corpus/cora-classify/cora/embeddings/single_component_small_18k/n2v 300-70-20 p1q05 DEC 500, 1000, 1000, 500, 10 k10 labels - 0')['label'].values.tolist()
# # =============================================================================
# # Train Model
# # =============================================================================
# documents = [TaggedDocument(word_tokenize(doc.lower()), [i]) for i, doc in enumerate(texts)]
# model = Doc2Vec(documents, vector_size=300, window=10, min_count=1, dm=1, workers=15, epochs=40)
# fname = dir_root+'Corpus/Dimensions All/models/scop doc2vec 300D dm=1 window=10 b3'
# model.save(fname)
# =============================================================================
# Train Model with Tags
# =============================================================================
# tagged_documents = [TaggedDocument(words=word_tokenize(_d.lower()), tags=['cluster_'+str(shared_tags[i]),str(i)]) for i, _d in enumerate(texts)]
# model = Doc2Vec(tagged_documents, size=300, window=10, min_count=1, dm=1, workers=16, epochs=40)
# fname = dir_root+'Corpus/Dimensions All/models/doc2vec 300D dm=1 window=10 tagged'
# model.save(fname)
# =============================================================================
# Test Model
# =============================================================================
fname = dir_root+'Corpus/Dimensions All/models/scop doc2vec 300D dm=1 window=10 b3'
model = Doc2Vec.load(fname)
documents = [word_tokenize(doc.lower()) for doc in tqdm(texts)]
# test_docs2 = [doc.lower().split() for doc in texts] # This is way faster than word_tokenize
# test_docs = test_docs[480000:]
start_alpha=0.01
infer_epoch=1000
X=[]
for d in tqdm(documents):
X.append( model.infer_vector(d, alpha=start_alpha, epochs=infer_epoch))
X_df = pd.DataFrame(X)
X_df.to_csv(dir_root+'Corpus/Dimensions All/embeddings/doc2vec 300D dm=1 window=10 b3 '+str(from_data),index=False)
#%%### =============================================================================
# concat vecs
# =============================================================================
sections = ['30000','60000','90000','120000','150000','180000']
data = pd.read_csv(dir_root+'Corpus/Dimensions All/embeddings/doc2vec 300D dm=1 window=10 b3 0')
for section in tqdm(sections):
data = data.append(pd.read_csv(dir_root+'Corpus/Dimensions All/embeddings/doc2vec 300D dm=1 window=10 b3 '+section),ignore_index=True)
data = data.reset_index(drop=True)
data.to_csv(dir_root+'Corpus/Scopus new/embeddings/doc2vec 300D dm=1 window=10 b3',index=False)
# =============================================================================
# Get keyword embedding
# =============================================================================
directory = dir_root+'Corpus/Taxonomy/'
file_name = 'CSO.3.3-with-labels-US-lem.csv'
corpus =
|
pd.read_csv(directory+file_name)
|
pandas.read_csv
|
import argparse
import logging
import multiprocessing as mp
import os
import pickle
import re
import sys
import warnings
from datetime import datetime
from itertools import product
import pandas as pd
import tabulate
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from greenguard import get_pipelines
from greenguard.demo import load_demo
from greenguard.loaders import CSVLoader
from greenguard.metrics import (METRICS, accuracy_score, f1_score,
fpr_score, tpr_score, threshold_score)
from greenguard.pipeline import GreenGuardPipeline, generate_init_params, generate_preprocessing
from greenguard.results import load_results, write_results
LOGGER = logging.getLogger(__name__)
DEFAULT_TUNING_METRIC_KWARGS = {'threshold': 0.5}
LEADERBOARD_COLUMNS = [
'problem_name',
'window_size',
'resample_rule',
'template',
'default_test',
'default_cv',
'tuned_cv',
'tuned_test',
'tuning_metric',
'tuning_metric_kwargs',
'fit_predict_time',
'default_cv_time',
'average_cv_time',
'total_time',
'status',
]
def _scorer(metric, metric_args):
if isinstance(metric, str):
metric, cost = METRICS[metric]
def f(expected, observed):
try:
return metric(expected, observed, **metric_args)
except TypeError:
if 'threshold' not in metric_args:
raise
kwargs = metric_args.copy()
threshold = kwargs.pop('threshold')
observed = observed >= threshold
return metric(expected, observed, **kwargs)
return f
def _build_init_params(template, window_size, rule, template_params):
if 'dfs' in template:
window_size_rule_params = {
'pandas.DataFrame.resample#1': {
'rule': rule,
},
'featuretools.dfs.json#1': {
'training_window': window_size,
}
}
elif 'lstm' in template:
window_size_rule_params = {
'pandas.DataFrame.resample#1': {
'rule': rule,
},
'mlprimitives.custom.timeseries_preprocessing.cutoff_window_sequences#1': {
'window_size': window_size,
}
}
for primitive, params in window_size_rule_params.items():
primitive_params = template_params.setdefault(primitive, {})
primitive_params.update(params)
return template_params
def evaluate_template(
template,
target_times,
readings,
tuning_iterations=50,
init_params=None,
preprocessing=0,
metrics=None,
threshold=None,
tpr=None,
tuning_metric='roc_auc_score',
tuning_metric_kwargs=DEFAULT_TUNING_METRIC_KWARGS,
cost=False,
cv_splits=3,
test_size=0.25,
random_state=0,
cache_path=None,
scores={}
):
"""Returns the scores for a given template.
Args:
template (str):
Given template to evaluate.
target_times (DataFrame):
Contains the specefication problem that we are solving, which has three columns:
* turbine_id: Unique identifier of the turbine which this label corresponds to.
* cutoff_time: Time associated with this target.
* target: The value that we want to predict. This can either be a numerical value
or a categorical label. This column can also be skipped when preparing
data that will be used only to make predictions and not to fit any
pipeline.
readings (DataFrame):
Contains the signal data from different sensors, with the following columns:
* turbine_id: Unique identifier of the turbine which this reading comes from.
* signal_id: Unique identifier of the signal which this reading comes from.
* timestamp (datetime): Time where the reading took place, as a datetime.
* value (float): Numeric value of this reading.
metric (function or str):
Metric to use. If an ``str`` is give it must be one of the metrics
defined in the ``greenguard.metrics.METRICS`` dictionary.
tuning_iterations (int):
Number of iterations to be used.
preprocessing (int, list or dict):
Number of preprocessing steps to be used.
init_params (list):
Initialization parameters for the pipeline.
cost (bool):
Wheter the metric is a cost function (the lower the better) or not.
test_size (float):
Percentage of the data set to be used for the test.
cv_splits (int):
Amount of splits to create.
random_state (int):
Random number of train_test split.
cache_path (str):
If given, cache the generated cross validation splits in this folder.
Defatuls to ``None``.
Returns:
scores (dict):
Stores the four types of scores that are being evaluate.
"""
start_time = datetime.utcnow()
scores['tuning_metric'] = str(tuning_metric)
scores['tuning_metric_kwargs'] = tuning_metric_kwargs
tuning_metric = _scorer(tuning_metric, tuning_metric_kwargs)
train, test = train_test_split(target_times, test_size=test_size, random_state=random_state)
pipeline = GreenGuardPipeline(
template,
metric=tuning_metric,
cost=cost,
cv_splits=cv_splits,
init_params=init_params,
preprocessing=preprocessing,
cache_path=cache_path
)
# Computing the default test score
fit_predict_time = datetime.utcnow()
pipeline.fit(train, readings)
predictions = pipeline.predict(test, readings)
fit_predict_time = datetime.utcnow() - fit_predict_time
scores['default_test'] = tuning_metric(test['target'], predictions)
# Computing the default cross validation score
default_cv_time = datetime.utcnow()
session = pipeline.tune(train, readings)
session.run(1)
default_cv_time = datetime.utcnow() - default_cv_time
scores['default_cv'] = pipeline.cv_score
# Computing the cross validation score with tuned hyperparameters
average_cv_time = datetime.utcnow()
session.run(tuning_iterations)
average_cv_time = (datetime.utcnow() - average_cv_time) / tuning_iterations
scores['tuned_cv'] = pipeline.cv_score
# Computing the test score with tuned hyperparameters
pipeline.fit(train, readings)
predictions = pipeline.predict(test, readings)
ground_truth = test['target']
# compute different metrics
if tpr:
tpr = tpr if isinstance(tpr, list) else [tpr]
for value in tpr:
threshold = threshold_score(ground_truth, predictions, tpr)
scores[f'fpr_tpr/{value}'] = fpr_score(ground_truth, predictions, tpr=tpr)
predictions_classes = predictions >= threshold
scores[f'accuracy_tpr/{value}'] = accuracy_score(ground_truth, predictions_classes)
scores[f'f1_tpr/{value}'] = f1_score(ground_truth, predictions_classes)
scores[f'threshold_tpr/{value}'] = threshold_score(ground_truth, predictions, value)
if f'accuracy_tpr/{value}' not in LEADERBOARD_COLUMNS:
LEADERBOARD_COLUMNS.extend([
f'accuracy_tpr/{value}',
f'f1_tpr/{value}',
f'fpr_tpr/{value}',
f'threshold_tpr/{value}',
])
else:
threshold = 0.5 if threshold is None else threshold
threshold = threshold if isinstance(threshold, list) else [threshold]
for value in threshold:
scores[f'fpr_threshold/{value}'] = fpr_score(
ground_truth, predictions, threshold=value)
predictions_classes = predictions >= threshold
scores[f'accuracy_threshold/{value}'] = accuracy_score(
ground_truth, predictions_classes)
scores[f'f1_threshold/{value}'] = f1_score(ground_truth, predictions_classes)
scores[f'tpr_threshold/{value}'] = tpr_score(ground_truth, predictions, value)
if f'accuracy_threshold/{value}' not in LEADERBOARD_COLUMNS:
LEADERBOARD_COLUMNS.extend([
f'accuracy_threshold/{value}',
f'f1_threshold/{value}',
f'fpr_threshold/{value}',
f'tpr_threshold/{value}',
])
scores['tuned_test'] = tuning_metric(test['target'], predictions)
scores['fit_predict_time'] = fit_predict_time
scores['default_cv_time'] = default_cv_time
scores['average_cv_time'] = average_cv_time
scores['total_time'] = datetime.utcnow() - start_time
return scores
def evaluate_templates(
templates,
window_size_rule,
tuning_iterations=50,
init_params=None,
preprocessing=0,
metrics=None,
threshold=None,
tpr=None,
tuning_metric='roc_auc_score',
tuning_metric_kwargs=DEFAULT_TUNING_METRIC_KWARGS,
target_times=None,
readings=None,
cost=False,
test_size=0.25,
cv_splits=3,
random_state=0,
cache_path=None,
cache_results=None,
problem_name=None,
output_path=None,
progress_bar=None,
multiprocess=False
):
"""Execute the benchmark process and optionally store the result as a ``CSV``.
Args:
templates (list):
List of templates to try.
window_size_rule (list):
List of tuples (int, str or Timedelta object).
metric (function or str):
Metric to use. If an ``str`` is give it must be one of the metrics
defined in the ``greenguard.metrics.METRICS`` dictionary.
tuning_iterations (int):
Number of iterations to be used.
init_params (dict):
Initialization parameters for the pipelines.
target_times (DataFrame):
Contains the specefication problem that we are solving, which has three columns:
* turbine_id: Unique identifier of the turbine which this label corresponds to.
* cutoff_time: Time associated with this target.
* target: The value that we want to predict. This can either be a numerical value
or a categorical label. This column can also be skipped when preparing
data that will be used only to make predictions and not to fit any
pipeline.
readings (DataFrame):
Contains the signal data from different sensors, with the following columns:
* turbine_id: Unique identifier of the turbine which this reading comes from.
* signal_id: Unique identifier of the signal which this reading comes from.
* timestamp (datetime): Time where the reading took place, as a datetime.
* value (float): Numeric value of this reading.
preprocessing (int, list or dict):
Number of preprocessing steps to be used.
cost (bool):
Wheter the metric is a cost function (the lower the better) or not.
test_size (float):
Percentage of the data set to be used for the test.
cv_splits (int):
Amount of splits to create.
random_state (int):
Random number of train_test split.
output_path (str):
Path where to save the benchmark report.
cache_path (str):
If given, cache the generated cross validation splits in this folder.
Defatuls to ``None``.
Returns:
pandas.DataFrame or None:
If ``output_path`` is ``None`` it will return a ``pandas.DataFrame`` object,
else it will dump the results in the specified ``output_path``.
Example:
>>> from sklearn.metrics import f1_score
>>> templates = [
... 'normalize_dfs_xgb_classifier',
... 'unstack_lstm_timeseries_classifier'
... ]
>>> window_size_rule = [
... ('30d','12h'),
... ('7d','4h')
... ]
>>> preprocessing = [0, 1]
>>> scores_df = evaluate_templates(
... templates=templates,
... window_size_rule=window_size_rule,
... metric=f1_score,
... tuning_iterations=5,
... preprocessing=preprocessing,
... cost=False,
... test_size=0.25,
... cv_splits=3,
... random_state=0
... )
>>> scores_df
template window_size resample_rule default_test default_cv tuned_cv tuned_test status
0 unstack_lstm_timeseries_classifier 30d 12h 0.720000 0.593634 0.627883 0.775510 OK
1 unstack_lstm_timeseries_classifier 7d 4h 0.723404 0.597440 0.610766 0.745098 OK
2 normalize_dfs_xgb_classifier 30d 12h 0.581818 0.619698 0.637123 0.596491 OK
3 normalize_dfs_xgb_classifier 7d 4h 0.581818 0.619698 0.650367 0.603774 OK
""" # noqa
if readings is None and target_times is None:
target_times, readings = load_demo()
init_params = generate_init_params(templates, init_params)
preprocessing = generate_preprocessing(templates, preprocessing)
scores_list = []
for template, window_rule in product(templates, window_size_rule):
window_size, rule = window_rule
try:
LOGGER.info('Evaluating template %s on problem %s (%s, %s)',
template, problem_name, window_size, rule)
template_params = init_params[template]
template_params = _build_init_params(template, window_size, rule, template_params)
template_preprocessing = preprocessing[template]
if multiprocess:
manager = mp.Manager()
scores = manager.dict()
process = mp.Process(
target=evaluate_template,
args=(
template,
target_times,
readings,
tuning_iterations,
init_params,
preprocessing,
metrics,
threshold,
tpr,
tuning_metric,
tuning_metric_kwargs,
cost,
cv_splits,
test_size,
random_state,
cache_path,
scores
)
)
process.start()
process.join()
if 'tuned_test' not in scores:
scores['status'] = 'ERRORED'
scores = dict(scores) # parse the managed dict to dict for pandas.
else:
scores = dict()
scores['problem_name'] = problem_name
scores['template'] = template
scores['window_size'] = window_size
scores['resample_rule'] = rule
result = evaluate_template(
template=template,
target_times=target_times,
readings=readings,
metrics=metrics,
tuning_metric=tuning_metric,
tuning_metric_kwargs=tuning_metric_kwargs,
threshold=threshold,
tpr=tpr,
tuning_iterations=tuning_iterations,
preprocessing=template_preprocessing,
init_params=template_params,
cost=cost,
test_size=test_size,
cv_splits=cv_splits,
random_state=random_state,
cache_path=cache_path
)
scores.update(result)
scores['status'] = 'OK'
except Exception:
scores['status'] = 'ERRORED'
LOGGER.exception('Could not score template %s ', template)
if cache_results:
os.makedirs(cache_results, exist_ok=True)
template_name = template
if os.path.isfile(template_name):
template_name = os.path.basename(template_name).replace('.json', '')
file_name = '{}_{}_{}_{}.csv'.format(problem_name, template_name, window_size, rule)
df = pd.DataFrame([scores]).reindex(LEADERBOARD_COLUMNS, axis=1)
df.to_csv(os.path.join(cache_results, file_name), index=False)
scores_list.append(scores)
if progress_bar:
progress_bar.update(1)
results = pd.DataFrame.from_records(scores_list)
results = results.reindex(LEADERBOARD_COLUMNS, axis=1)
if output_path:
LOGGER.info('Saving benchmark report to %s', output_path)
results.to_csv(output_path)
else:
return results
def _generate_target_times_readings(target_times, readings_path, window_size, rule, signals):
"""
Returns:
pandas.DataFrame:
Table of readings for the target times, including the columns ``turbine_id``,
``signal_id``, ``timestamp`` and ``value``.
"""
csv_loader = CSVLoader(
readings_path,
rule=rule,
)
return csv_loader.load(target_times, window_size=window_size, signals=signals)
def make_problems(target_times_paths, readings_path, window_size_resample_rule,
output_path=None, signals=None):
"""Make problems with the target times and readings for each window size and resample rule.
Create problems in the accepted format by ``run_benchmark`` as pickle files containing:
* ``target_times``: ``pandas.DataFrame`` containing the target times.
* ``readings``: ``pandas.DataFrame`` containing the readings for the target times.
* ``window_size``: window size value used.
* ``resample_rule``: resample rule value used.
Or return a ``dict`` containing as keys the names of the problems generated and tuples with
the previously specified fields of target times, readings, window size and resample rule.
Args:
target_times_paths (list):
List of paths to CSVs that contain target times.
readings_path (str):
Path to the folder where readings in raw CSV format can be found.
window_size_resample_rule (list):
List of tuples (int, str or Timedelta object).
output_path (str):
Path to save the generated problems.
signals (str):
List of signal names or csv file that has a `signal_id` column to use as the signal
names list.
"""
if isinstance(target_times_paths, str):
target_times_paths = [target_times_paths]
if isinstance(target_times_paths, list):
target_times_paths = {
os.path.basename(path).replace('.csv', ''): path
for path in target_times_paths
}
if output_path:
generated_problems = list()
else:
generated_problems = {}
if isinstance(signals, str) and os.path.exists(signals):
signals = pd.read_csv(signals).signal_id
for problem_name, target_time_path in tqdm(target_times_paths.items()):
for window_size, rule in window_size_resample_rule:
target_times = pd.read_csv(target_time_path, parse_dates=['cutoff_time'])
new_target_times, readings = _generate_target_times_readings(
target_times,
readings_path,
window_size,
rule,
signals=signals,
)
pickle_name = '{}_{}_{}'.format(problem_name, window_size, rule)
if output_path:
os.makedirs(output_path, exist_ok=True)
output_pickle_path = os.path.join(output_path, pickle_name + '.pkl')
with open(output_pickle_path, 'wb') as pickle_file:
pickle.dump((new_target_times, readings, window_size, rule), pickle_file)
generated_problems.append(output_pickle_path)
else:
generated_problems[pickle_name] = (new_target_times, readings, window_size, rule)
return generated_problems
def run_benchmark(templates, problems, window_size_resample_rule=None,
tuning_iterations=50, signals=None, preprocessing=0, init_params=None,
metrics=None, threshold=None, tpr=None, tuning_metric='roc_auc_score',
tuning_metric_kwargs=DEFAULT_TUNING_METRIC_KWARGS, cost=False, cv_splits=5,
test_size=0.33, random_state=0, cache_path=None, cache_results=None,
output_path=None, multiprocess=False):
"""Execute the benchmark function and optionally store the result as a ``CSV``.
This function provides a user-friendly interface to interact with the ``evaluate_templates``
function. It allows the user to specify an ``output_path`` where the results can be
stored. If this path is not provided, a ``pandas.DataFrame`` will be returned.
This function evaluates each template against each problem for each window size and resample
rule possible, and will tune each teamplate for the given amount of tuning iterations.
The problems can be a pickle file that contains the following values:
* ``target_times``: ``pandas.DataFrame`` containing the target times.
* ``readings``: ``pandas.DataFrame`` containing the readings for the target times.
* ``window_size``: window size value used.
* ``resample_rule``: resample rule value used.
Or it can be dictionary containing the problem's name and as values either a path to a pickle
file or a tuple containing the previously specified fields.
Args:
templates (str or list):
Name of the json pipelines that will be evaluated against the problems.
problems (str, list or dict):
There are three possible values for problems:
* ``str``: Path to a given problem stored as a pickle file (pkl).
* ``list``: List of paths to given problems stored as a pickle files (pkl).
* ``dict``: A dict containing as keys the name of the problem and as value the
path to a pickle file or a tuple with target times and readings data
frames and the window size and resample rule used to generate this
problem.
The pickle files has to contain a tuple with target times and readings data frames and
the window size and resample rule used to generate that problem. We recommend using
the function ``make_problems`` to generate those files.
window_size_resample_rule (list):
List of tuples (int, str or Timedelta object).
tuning_iterations (int):
Amount of tuning iterations to perfrom over each template.
signals (str or list):
Path to a csv file containing ``signal_id`` column that we would like to use or a
``list`` of signals that we would like to use. If ``None`` use all the signals from
the readings.
preprocessing (int, dict or list):
There are three possible values for preprocessing:
* ``int``: the value will be used for all templates.
* ``dict`` with the template name as a key and a number as a value, will
be used for that template.
* ``list``: each value will be assigned to the corresponding position of
self.templates.
Defaults to ``0``.
init_params (dict or list):
There are three possible values for init_params:
* Init params ``dict``: It will be used for all templates.
* ``dict`` with the name of the template as a key and dictionary with its
init params.
* ``list``: each value will be assigned to the corresponding position of
self.templates.
Defaults to ``None``.
metric (function or str):
Metric to use. If an ``str`` is give it must be one of the metrics
defined in the ``greenguard.metrics.METRICS`` dictionary.
cost (bool):
Whether the metric is a cost function (the lower the better) or not.
Defaults to ``False``.
cv_splits (int):
Number of cross validation folds to use. Defaults to ``5``.
test_size (float):
Amount of data that will be saved for test, represented in percentage between 0 and 1.
random_state (int or RandomState):
random state to use for the cross validation partitioning. Defaults to ``0``.
cache_path (str):
If given, cache the generated cross validation splits in this folder.
Defatuls to ``None``.
cache_results (str):
If provided, store the progress of each pipeline and each problem while runing.
output_path (str):
If provided, store the results to the given filename. Defaults to ``None``.
"""
templates = templates if isinstance(templates, (list, tuple)) else [templates]
results = list()
if isinstance(problems, str):
problems = [problems]
if isinstance(problems, list):
problems = {
os.path.basename(problem).replace('.pkl', ''): problem
for problem in problems
}
if signals is not None:
if isinstance(signals, str) and os.path.exists(signals):
signals = pd.read_csv(signals).signal_id
total_runs = len(templates) * len(problems) * len(window_size_resample_rule or [1])
pbar = tqdm(total=total_runs)
for problem_name, problem in problems.items():
# remove window_size resample_rule nomenclature from the problem's name
problem_name = re.sub(r'\_\d+[DdHhMmSs]', r'', problem_name)
if isinstance(problem, str):
with open(problem, 'rb') as pickle_file:
target_times, readings, orig_window_size, orig_rule = pickle.load(pickle_file)
else:
target_times, readings, orig_window_size, orig_rule = problem
if signals is not None:
readings = readings[readings.signal_id.isin(signals)]
wsrr = window_size_resample_rule or [(orig_window_size, orig_rule)]
orig_window_size = pd.to_timedelta(orig_window_size)
orig_rule = pd.to_timedelta(orig_rule)
for window_size, resample_rule in wsrr:
# window_size can be only smaller than pickle window size
# resample rule can be only bigger than picke rule
if (orig_window_size >=
|
pd.to_timedelta(window_size)
|
pandas.to_timedelta
|
"""
Copyright (C) 2020-2021 <NAME> <...>
"""
from typing import List, Optional
import pandas as pd
from tqdm import tqdm
# see: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
#: define conversion for frequency grouping
from repostats.data_io import is_in_time_period
DATETIME_FREQ = {
"D": "%Y-%m-%d",
"W": "%Y-w%W",
"M": "%Y-%m",
"Y": "%Y",
}
def compute_users_summary(items: List[dict], datetime_from: str = None, datetime_to: str = None) -> pd.DataFrame:
"""Aggregate issue/PR affiliations and summary counts.
>>> items = [dict(type='PR', state='closed', author='me', commenters=['me', 'you']),
... dict(type='PR', state='open', author='me', commenters=['me', 'you']),
... dict(type='issue', state='closed', author='me', commenters=['me', 'you']),
... dict(type='PR', state='merged', author='you', commenters=['me', 'you']),
... dict(type='issue', state='open', author='you', commenters=['me', 'you'])]
>>> df = compute_users_summary(items)
>>> df.columns = [c.replace('opened', 'o/').replace('merged', 'm/').replace('commented', 'c/') for c in df.columns]
>>> df # doctest: +NORMALIZE_WHITESPACE
o/ PRs m/ PRs c/ PRs o/ issues m/ issues c/ issues all o/
user
me 2 0 1 1 0 1 3
you 1 1 2 1 0 1 2
"""
assert items, "nothing to do..."
df_items = pd.DataFrame(items)
users_stat = []
for user in tqdm(df_items["author"].unique(), desc="Processing users"):
user_stat = {"user": user}
# parse particular user stats
for tp, df in df_items.groupby("type"):
df_self_author = df[df["author"] == user]
# add selection if it is in range
for c_out, c_in in [("created", "created_at"), ("closed", "closed_at")]:
if c_in not in df_self_author.columns:
df_self_author[c_in] = None
df_self_author[c_out] = [
is_in_time_period(dt, datetime_from=datetime_from, datetime_to=datetime_to)
for dt in df_self_author[c_in]
]
df_merged = df_self_author[df_self_author["state"] == "merged"]
df_not_author = df[df["author"] != user]
user_stat.update(
{
# count only opened cases in such time
f"opened {tp}s": sum(df_self_author["created"]),
# count only closed/merged cases in such time
f"merged {tp}s": sum(df_merged["closed"]),
# in this time all comments shall be already filtered and we need all issues
# as they can be created before time window and commented in given period...
f"commented {tp}s": sum(df_not_author["commenters"].apply(lambda l: user in l)),
}
)
users_stat.append(user_stat)
# transform to pandas table
df_users = pd.DataFrame(users_stat).set_index(["user"])
df_users["all opened"] = df_users["opened PRs"] + df_users["opened issues"]
df_users.sort_values(["all opened"], ascending=False, inplace=True)
return df_users
def compute_user_comment_timeline(
items: List[dict],
freq: str = "W",
parent_type: Optional[str] = None,
) -> pd.DataFrame:
"""Aggregate comments from all issues/PRs.
>>> items = [dict(created_at='2020-10-05', parent_idx=1, parent_type='issue', author='me'),
... dict(created_at='2020-10-17', parent_idx=2, parent_type='PR', author='me'),
... dict(created_at='2020-10-17', parent_idx=1, parent_type='issue', author='me'),
... dict(created_at='2020-10-29', parent_idx=3, parent_type='issue', author='me'),
... dict(created_at='2020-11-15', parent_idx=3, parent_type='issue', author='you')]
>>> compute_user_comment_timeline(items, freq='M', parent_type='issue') # doctest: +NORMALIZE_WHITESPACE
author me you
created_at
2020-10 2 0
2020-11 0 1
"""
assert freq in DATETIME_FREQ, "unsupported freq format, allowed: %r" % DATETIME_FREQ.keys()
def _reformat(dt):
return
|
pd.to_datetime(dt)
|
pandas.to_datetime
|
### This is type of content-based recommender system
### Steps:
# - represent the documents in form of vectors
# - find the cosine similarity between the documents and form a similarity matrix
# - prepare the document-term matrix (indexing) for fast access
# - get the most similar documents
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pickle
## Load the data
dataframe = pd.read_csv('./models/output/results_dataframe.csv')
# Cast as a list of values for calculating weights
text_data = dataframe['processed_text'].values.tolist()
# Calculate TF-IDF matrix
def tf_idf(search_keys, data):
## Load from the saved vectorizer later
## TFIDF vectorize the data
tf_idf_vectorizer = pickle.load(open('./models/output/tf_idf_vectorizer.pkl', 'rb'))
tfidf_weights_matrix = tf_idf_vectorizer.transform(data).toarray()
search_query_weights = tf_idf_vectorizer.transform([search_keys]).toarray()
## Dimension reduction of data
pca_reduction_model = pickle.load(open('./models/output/pca_method.pkl', 'rb'))
dimension_reduced_query = pca_reduction_model.transform(search_query_weights)
dimension_reduced_data = pca_reduction_model.transform(tfidf_weights_matrix)
return dimension_reduced_query, dimension_reduced_data
# Calculate the cosine similarity between search query and TF-IDF vectors
def cos_similarity(search_query_weights, tfidf_weights_matrix):
cosine_sim = cosine_similarity(search_query_weights, tfidf_weights_matrix)
similarity_list = cosine_sim[0]
return similarity_list
# Calculate number of relevant vectors
def calculate_num_vectors(cosine_similarity):
num = 0
for i in cosine_similarity:
if i != 0.0:
num += 1
return num
# Calculate the most relevant vectors
def most_similar(similarity_list, N):
most_similar = []
while N > 0:
tmp_index = np.argmax(similarity_list)
most_similar.append(tmp_index)
similarity_list[tmp_index] = 0
N -= 1
return most_similar
# Create weights at specific index for quick retrieval
def create_matrix_dict(cosine_similarity):
matrix_dict = {}
iter_counter = 0
for i in cosine_similarity:
matrix_dict[iter_counter] = i
iter_counter += 1
return matrix_dict
# -----------
# Return the articles with relevant search term
def return_relevant_articles(search_term, cluster_dataframe = None):
# Create local variables
# convert_documents to vector representations
if (cluster_dataframe.shape[0] != 0 ):
cluster_text_data = cluster_dataframe['processed_text'].values.tolist()
search, matrix = tf_idf(search_term, cluster_text_data)
dataframe_copy = cluster_dataframe
else:
search, matrix = tf_idf(search_term, text_data)
dataframe_copy = dataframe
# Find the cosine similarity
cosine_sim_list = cos_similarity(search, matrix)
# Get the number of relevant documents
num_relevant_vectors = calculate_num_vectors(cosine_sim_list)
# Prepare the " indexing " (one of stages in web information retrieval) for faster retrieval
# (Similar concept is also used by the Google, namely stored as document-term matrix)
dictionary = create_matrix_dict(cosine_sim_list)
# Get the most similar items
list_of_most_similar = most_similar(cosine_sim_list, num_relevant_vectors)
df = pd.DataFrame()
for index in list_of_most_similar:
article = dataframe_copy.iloc[index]
if df.empty:
to_dataframe = article.to_frame()
df = to_dataframe.T
else:
to_dataframe = article.to_frame()
df =
|
pd.concat([df, to_dataframe.T], join='outer')
|
pandas.concat
|
"""Inference benchmark tool for TensorFlow Serving and Triton."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import csv
from requests.sessions import default_headers
import distribution
import functools
import io
import numbers
import os
import pandas as pd
import queue as Queue
import requests as r
import threading
import multiprocessing
import time
import numpy as np
# Disable GPU, so tensorflow initializes faster
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow.compat.v1 as tf
from google.protobuf.json_format import Parse as ProtoParseJson
from tensorflow.core.framework import types_pb2
from tensorflow.python.platform import gfile
from itertools import cycle, islice
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from clients import base_client
tf.app.flags.DEFINE_integer("num_requests", 20, "Total # of requests sent.")
tf.app.flags.DEFINE_integer(
"num_warmup_requests", 0,
"Number requests to send before starting benchmark.")
tf.app.flags.DEFINE_string(
"qps_range",
"",
"Desired client side request QPS in"
"one of the following formats:"
" - qps - benchmark at one QPS"
" - start, stop - benchmark at QPS range [start, stop)"
" - start, stop, step - benchmark at QPS range [start, stop) with step"
" - [qps1, qps2] - benchmark at give QPS range values",
)
tf.app.flags.DEFINE_float("request_timeout", 300.0,
"Timeout for inference request.")
tf.app.flags.DEFINE_string("model_name", "",
"Name of the model being served on the ModelServer")
tf.app.flags.DEFINE_string("model_version", "",
"Version the model being served on the ModelServer")
tf.app.flags.DEFINE_string(
"signature_name",
"serving_default",
"Name of the model signature on the ModelServer",
)
tf.app.flags.DEFINE_string(
"host", "localhost",
"Host name to connect to, localhost by default. Alternatively you can specify"
"full URL to send requests to.")
tf.app.flags.DEFINE_integer("port", None, "Port to connect to.")
tf.app.flags.DEFINE_enum(
"mode",
"grpc",
["grpc", "sync_grpc", "rest", "triton_grpc", "triton_rest", "vertex_gapic",
"vertex_rest"],
"Benchmark mode: gRPC, synchronous gRPC, or REST, or Triton format.",
)
tf.app.flags.DEFINE_enum("distribution", "uniform",
["uniform", "poisson", "pareto"], "Distribution")
tf.app.flags.DEFINE_string(
"tfrecord_dataset_path", "",
"The path to data in tfrecord or tfrecord.gz format.")
tf.app.flags.DEFINE_string(
"requests_file_path",
"",
"The path the predict_pb2.PredictRequest requests file serialized in json format.",
)
tf.app.flags.DEFINE_string("jsonl_file_path", "",
"The path the dataset file in jsonl format.")
tf.app.flags.DEFINE_string("input_name", "input",
"The name of the model input tensor.")
tf.app.flags.DEFINE_integer("batch_size", None, "Per request batch size.")
tf.app.flags.DEFINE_integer("workers", 1, "Number of workers.")
tf.app.flags.DEFINE_string(
"api_key", "",
"API Key for ESP service if authenticating external requests.")
tf.app.flags.DEFINE_string("csv_report_filename", "",
"Optional filename to generate report")
tf.app.flags.DEFINE_string("title", "", "Optional title for the report")
tf.app.flags.DEFINE_enum("grpc_compression", "none",
["none", "deflate", "gzip"], "grpc compression")
tf.app.flags.DEFINE_string("authorization_header", "",
"Authorization header for REST requests.")
tf.app.flags.DEFINE_string("grpc_destination", "",
"gRPC destination metadata header.")
tf.app.flags.DEFINE_string("default_int_type", "",
"Default type to use for integer values.")
tf.app.flags.DEFINE_string("default_float_type", "",
"Default type to use for fractional values.")
FLAGS = tf.app.flags.FLAGS
def get_client_class():
if FLAGS.mode == "grpc":
from clients import tensorflow_serving_grpc
return tensorflow_serving_grpc.TensorflowServingGrpc
elif FLAGS.mode == "sync_grpc":
from clients import tensorflow_serving_sync_grpc
return tensorflow_serving_sync_grpc.TensorflowServingSyncGrpc
elif FLAGS.mode == "rest":
from clients import tensorflow_serving_rest
return tensorflow_serving_rest.TensorflowServingRest
elif FLAGS.mode == "triton_rest":
from clients import triton_rest
return triton_rest.TritonRest
elif FLAGS.mode == "triton_grpc":
from clients import triton_grpc
return triton_grpc.TritonGrpc
elif FLAGS.mode == "vertex_gapic":
from clients import vertex_gapic
return vertex_gapic.VertexGapic
elif FLAGS.mode == "vertex_rest":
from clients import vertex_rest
return vertex_rest.VertexRest
else:
raise ValueError("Invalid mode")
def get_grpc_compression():
if FLAGS.grpc_compression == "gzip":
return grpc.Compression.Gzip
elif FLAGS.grpc_compression == "deflate":
return grpc.Compression.Deflate
else:
return None
def get_qps_range(qps_range_string):
qps_range_string = qps_range_string.strip()
if qps_range_string.startswith("[") and qps_range_string.endswith("]"):
qps_range_string = qps_range_string.lstrip("[").rstrip("]")
qps_range_list = list(map(lambda v: float(v), qps_range_string.split(",")))
return qps_range_list
qps_range_list = list(map(lambda v: float(v), qps_range_string.split(",")))
qps_range_start = 0
qps_range_step = 1
if len(qps_range_list) == 1:
return [qps_range_list[0]]
elif len(qps_range_list) == 2:
return range(qps_range_list[0], qps_range_list[1])
elif len(qps_range_list) == 3:
return range(qps_range_list[0], qps_range_list[1], qps_range_list[2])
else:
raise ValueError("Invalid argument qps_range:" + qps_range_start)
def merge_results(results, result):
for key, value in result.items():
if not key.startswith("_"):
if key not in results:
results[key] = [value]
else:
results[key].append(value)
def merge_worker_results(worker_results):
success = 0
error = 0
reqested_qps = 0
start_time = []
end_time = []
latency = []
avg_miss_rate_percent = []
for worker_result in worker_results:
success += worker_result["success"]
error += worker_result["error"]
reqested_qps += worker_result["reqested_qps"]
avg_miss_rate_percent.append(worker_result["avg_miss_rate_percent"])
latency.extend(worker_result["_latency"])
start_time.append(worker_result["_start_time"])
end_time.append(worker_result["_end_time"])
time = np.max(end_time) - np.min(start_time)
return {
"reqested_qps": reqested_qps,
"actual_qps": (success + error) / time,
"success": success,
"error": error,
"time": time,
"avg_latency": np.average(latency) * 1000,
"p50": np.percentile(latency, 50) * 1000,
"p90": np.percentile(latency, 90) * 1000,
"p99": np.percentile(latency, 99) * 1000,
"avg_miss_rate_percent": np.average(avg_miss_rate_percent),
}
def print_result(result):
v = []
for key, value in result.items():
if not key.startswith("_"):
if "float" in str(type(value)):
v.append("{}: {:.2f}".format(key, value))
else:
v.append("{}: {}".format(key, value))
tf.logging.info("\t".join(v))
def main(argv):
del argv
tf.disable_v2_behavior()
if FLAGS.qps_range is None or FLAGS.qps_range == "":
tf.logging.error("Please specify qps_range")
exit(1)
request_path = None
request_format = None
if FLAGS.tfrecord_dataset_path != "":
request_format = base_client.RequestFormat.TFRECORD
request_path = FLAGS.tfrecord_dataset_path
elif FLAGS.requests_file_path != "":
request_format = base_client.RequestFormat.FILE
request_path = FLAGS.requests_file_path
elif FLAGS.jsonl_file_path != "":
request_format = base_client.RequestFormat.DICTIONARY
request_path = FLAGS.jsonl_file_path
else:
raise ValueError(
"Either tfrecord_dataset_path or requests_file_path flag has to be specified"
)
http_headers = {}
if FLAGS.authorization_header:
http_headers["authorization"] = FLAGS.authorization_header
grpc_metadata = []
if FLAGS.api_key:
grpc_metadata.append(("x-api-key", FLAGS.api_key))
if FLAGS.grpc_destination:
grpc_metadata.append(("grpc-destination", FLAGS.grpc_destination))
client_class = get_client_class()
client = client_class(FLAGS.host, FLAGS.port, FLAGS.model_name,
FLAGS.model_version, FLAGS.signature_name,
FLAGS.distribution, FLAGS.input_name,
FLAGS.default_int_type,
FLAGS.default_float_type, http_headers, grpc_metadata,
get_grpc_compression(), FLAGS.request_timeout)
tf.logging.info("Loading data")
requests_list = client.get_requests(request_format, request_path,
FLAGS.num_warmup_requests,
FLAGS.batch_size)
if len(requests_list) < FLAGS.workers * FLAGS.num_requests:
tf.logging.warn("Dataset you specified contains data for {} requests, "
"while you need {} requests for each of {} workers. "
"Some requests are going to be reused.".format(
len(requests_list), FLAGS.num_requests, FLAGS.workers))
results = {}
if FLAGS.num_warmup_requests > 0:
tf.logging.info("Sending {} warmup requests".format(
FLAGS.num_warmup_requests))
warmup_requests = islice(cycle(requests_list), FLAGS.num_warmup_requests)
_ = client.run(warmup_requests, FLAGS.num_warmup_requests,
get_qps_range(FLAGS.qps_range)[0])
tf.logging.info("Warmup complete")
if FLAGS.workers == 1:
for qps in get_qps_range(FLAGS.qps_range):
worker_requests = islice(cycle(requests_list), FLAGS.num_requests)
result = client.run(worker_requests, FLAGS.num_requests, qps)
print_result(result)
merge_results(results, result)
else:
def _worker_load_test_func(qps, worker_results, worker_index):
worker_requests = islice(
cycle(requests_list),
worker_index * FLAGS.num_requests,
(worker_index + 1) * FLAGS.num_requests,
)
worker_results[worker_index] = client.run(worker_requests,
FLAGS.num_requests, qps)
for qps in get_qps_range(FLAGS.qps_range):
worker_processes = []
with multiprocessing.Manager() as manager:
worker_results = manager.list()
for worker_index in range(FLAGS.workers):
worker_process = multiprocessing.Process(
target=_worker_load_test_func,
args=(qps, worker_results, worker_index))
worker_processes.append(worker_process)
worker_results.append({})
worker_process.start()
for worker_process in worker_processes:
worker_process.join()
result = merge_worker_results(worker_results)
print_result(result)
merge_results(results, result)
if FLAGS.title and "reqested_qps" in results and len(
results["reqested_qps"]) > 0:
results["title"] = [""] * len(results["reqested_qps"])
results["title"][0] = FLAGS.title
df =
|
pd.DataFrame.from_dict(results)
|
pandas.DataFrame.from_dict
|
"""
this file is for loose exploration, the datasets are not included in the files stream.
Author: <NAME>
Created: 6/01/2021 10:13 AM
"""
import netCDF4 as nc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import glob
import os
lat, lon = -43.372, 172.333
def get_sim(sim_path, lat=lat, lon=lon, rain_mm=True):
variables = [
'pstar',
'sw_flux',
'wind_speed',
'precipitation',
'mslp',
'rh_max',
'tmax',
'dewpoint',
'rh_min',
'spec_h',
'wind_u',
'tmin',
'wind_v',
'soil_moisture',
]
data = nc.Dataset(sim_path)
dates = np.array(nc.num2date(data.variables['time0'], data.variables['time0'].units))
dates = pd.to_datetime([e._to_real_datetime() for e in dates])
outdata =
|
pd.DataFrame(index=dates, columns=variables)
|
pandas.DataFrame
|
# env: py3
# Author: <NAME>
import pandas as pd
import datetime
import urllib
from urllib.request import urlopen
def AirNow():
baseURL = "http://www.airnowapi.org/aq/forecast/"
api_key = '###YOUR_API_KEY###'
#date = '2018-08-04'
# get the current date as input
now = datetime.datetime.now()
date = str(now)
miles = 25
dfs = list()
text_file = open("INPUT.txt", "r")
latlong = text_file.read().split(' ')
text_file.close()
lat = latlong[0::2]
lon = latlong[1::2]
for lats, lons in zip(lat, lon):
latlonURL = baseURL + "latLong/?" + urllib.parse.urlencode({
'format': 'application/json',
'latitude': lats,
'longitude': lons,
'date': date[:10],
'distance': miles,
'API_KEY': api_key
})
response = urlopen(latlonURL).read().decode('utf-8')
df =
|
pd.read_json(response)
|
pandas.read_json
|
#!/usr/bin/env python
"""
Executable that reads in a list of patient IDs, finds specified imagery,
converts them to NiFTI format, skullstrips and coregisters them (if desired).
Also does FAST segmentation
Takes the following arguments in the shell
-i : a comma-delimited csv containing columns of pt IDs
-f : the folder that contains subfolder of pt data. can be nested
-t : the folder to write each pts data to
-o : an integer bool (0 or 1) indicating whether to overwrite a pts data
if they already have a subfolder in the target folder
The script will output a txt file to the target folder indicating what pts, if
any, could not be located, and if any FLAIR or T1 data could not be found.
Example use:
/Users/manusdonahue/Documents/Sky/repositories/neurosegment/bin/move_and_prepare -i /Users/manusdonahue/Documents/Sky/stroke_segmentations_playground/pts_of_interest.csv -t /Users/manusdonahue/Documents/Sky/stroke_segmentations_playground/pt_data -f /Volumes/DonahueDataDrive/Data_sort/SCD_Grouped -o 0
"""
import os
import sys
import getopt
import glob
import shutil
from datetime import datetime
from contextlib import contextmanager
from time import time, sleep
import pandas as pd
import numpy as np
np.random.seed(0)
# I am a liar this script is now accessed directly rather than as a bash command
overwrite = 0
infile = '/Users/manusdonahue/Documents/Sky/nigeria_mra/orig_report_labels.csv'
targetfolder = '/Users/manusdonahue/Documents/Sky/nigeria_mra/data/'
filefolder = '/Volumes/DonahueDataDrive/Data_sort/SCD_Grouped/'
skullstrip_f_val = 0.15
n_healthy = 100
##### the following variables generally just need to be set once
# column names in the csv that contain pt IDs of interest
#pt_id_cols = ['MRI 1 - MR ID', 'MRI 2 - MR ID', 'MRI 3 - MR ID']
pt_id_cols = ['MRI 1 - MR ID']
pt_id_cols_alt = ['Alternate MR ID 1']
rect_name = ['MR 1 ID Rectified']
# dcm2nii is an executable packaged with MRIcron that ca be ued to turn par-recs into NiFTIs
path_to_dcm2nii = '/Users/manusdonahue/Documents/Sky/mricron/dcm2nii64'
mni_standard = '/usr/local/fsl/data/standard/MNI152_T1_1mm_brain.nii.gz'
# relates a unique sequence in a filename that can be used to identify a file
# as a certain type of image to the basename for the ouput, whether and what to
# register them to, and whether to skullstrip them
# signatures work such that if the key is found in the filename and the excl
# strings are NOT found in the filename, then that file is IDd as that signature
# note that duplicate patients who were given an alternate study ID need to be manually removed
signature_relationships = {('MRA_COW','TOF_HEAD'):
{'basename': 'headMRA', 'register': 'master', 'skullstrip': 'no', 'excl':['MIP'], 'optional':False},
('MIP*MRA_COW','MIP*TOF_HEAD'):
{'basename': 'headMRA_mip', 'register': 'no', 'skullstrip': 'no', 'excl':[], 'optional':True}
}
"""
signature_relationships = {('MRA_COW','TOF_HEAD'):
{'basename': 'headMRA', 'register': 'master', 'skullstrip': 'no', 'excl':['WIP_MIP', 'MIP_WIP'], 'optional':True},
('TOF_NECK',):
{'basename': 'neckMRA', 'register': 'no', 'skullstrip': 'no', 'excl':['WIP_MIP', 'MIP_WIP', 'VWIP'], 'optional':True}
}
signature_relationships = {('MRA_COW',):
{'basename': 'headMRA', 'register': 'master', 'skullstrip': 'no', 'excl':['WIP_MIP', 'MIP_WIP']},
('lica',):
{'basename': 'pc_lica', 'register': 'no', 'skullstrip': 'no', 'excl':[]},
('rica',):
{'basename': 'pc_rica', 'register': 'no', 'skullstrip': 'no', 'excl':[]},
('lvert',):
{'basename': 'pc_lvert', 'register': 'no', 'skullstrip': 'no', 'excl':[]},
('rvert',):
{'basename': 'pc_rvert', 'register': 'no', 'skullstrip': 'no', 'excl':[]},
}
"""
def any_in_str(s, l):
"""
Returns whether any of a list of substrings is in a string
Parameters
----------
s : str
string to look for substrings in.
l : list of str
substrings to check for in s.
Returns
-------
bool
"""
return any([substr in s for substr in l])
start = time()
@contextmanager
def suppress_stdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
def get_terminal(path):
"""
Takes a filepath or directory tree and returns the last file or directory
Parameters
----------
path : path
path in question.
Returns
-------
str of only the final file or directory.
"""
return os.path.basename(os.path.normpath(path))
successful = 0
# datetime object containing current date and time
now = datetime.now()
dt_string = now.strftime("%d-%m-%y-%H+%M")
message_file_name = os.path.join(targetfolder, f'move_and_prepare_messages_{dt_string}.txt')
df_file_name = os.path.join(targetfolder, f'move_and_prepare_tabular_{dt_string}.csv')
trimmed_file_name = os.path.join(targetfolder, f'pt_data.csv')
message_file = open(message_file_name, 'w')
message_file.write('Status messages for move_and_prepare\n\nSignatures')
for key, val in signature_relationships.items():
message_file.write(f'\n{key}\n\t{str(val)}')
message_file.write('\n\n\n')
#extract pt IDs
raw_data = pd.read_csv(infile)
for i,val in enumerate(pt_id_cols):
mrs = raw_data[pt_id_cols[i]]
mrs_alt = raw_data[pt_id_cols_alt[i]]
raw_data[rect_name[i]] = mrs_alt.combine_first(mrs)
#not_excluded = raw_data['Exclude from Analysis (choice=exclude)'] == 'Unchecked'
not_inadequate = raw_data['Result of MRA Head 1'] != 'Technically inadequate'
is_done = raw_data['Result of MRA Head 1'] != 'Not done'
not_post_transf = raw_data['Is this patient post-transplant at initial visit?'] != 'Yes'
normal_or_scd = [any([i,j]) for i,j in zip(raw_data['Hemoglobin genotype'] == 'Normal (AA)', raw_data['Hemoglobin genotype'] == 'SS')]
#keep = [all(i) for i in zip(not_excluded, not_inadequate, is_done, not_post_transf, normal_or_scd)]
keep = [all(i) for i in zip(not_inadequate, is_done, not_post_transf, normal_or_scd)]
pt_data = raw_data[keep]
has_stenosis = pt_data[pt_data['Is there intracranial stenosis (>50%)?'] == 'Yes']
is_healthy = pt_data[pt_data['Result of MRA Head 1'] == 'Normal']
n_stenosis = len(has_stenosis)
add_healthy = is_healthy.sample(n_healthy)
pt_data = has_stenosis.append(add_healthy)
# FOR TESTING
# pt_data = pt_data.iloc[18:20]
n_unique_pts = len(pt_data)
print(f'We have {n_unique_pts} patients. {len(has_stenosis)} stenosis, {len(add_healthy)} healthy')
sleep(2)
pt_ids = []
for col in rect_name:
of_interest = list(pt_data[col])
pt_ids.extend(of_interest)
pt_ids = [x for x in pt_ids if str(x) != 'nan']
# create a nested dict giving the the status of each pt id (found their file, found specific scans)
inner_dict = {'found_pt':0}
for key in signature_relationships:
inner_dict[key] = (0,0)
inner_dict['successful'] = 0
pt_status = {pt:inner_dict.copy() for pt in pt_ids}
# start processing
all_subdirectories = [x[0] for x in os.walk(filefolder)] # list of all possible subdirectories
for i, pt in enumerate(pt_ids):
print(f'\nOn patient {pt} ({i+1} of {len(pt_ids)})\n')
candidate_folders = [sub for sub in all_subdirectories if get_terminal(sub) == pt] # check if last subfolder is pt name
n_cands = len(candidate_folders)
pt_status[pt]['found_pt'] = n_cands
if n_cands == 1:
data_folder = candidate_folders[0]
else:
print(f'------ pt {pt} has {n_cands} candidate folders. skipping ------')
continue
master_output_folder = os.path.join(targetfolder, pt)
if os.path.exists(master_output_folder):
if overwrite:
shutil.rmtree(master_output_folder)
else:
print(f'--- pt {pt} exists in target folder and overwrite is disabled. skipping ---')
continue
has_required_files = True
bin_folder = os.path.join(master_output_folder, 'bin') # bin for working with data
processed_folder = os.path.join(master_output_folder, 'processed') # where we'll write the final data to
acquired_folder = os.path.join(data_folder, 'Acquired') # where we're looking to pull data from
sig_tracker = {} # to store filepaths to files
optional_and_missing = []
for signature, subdict in signature_relationships.items():
candidate_pars = []
candidate_recs = []
# note that the signature matching includes the full path. probably not a great idea
for subsig in signature:
potential_pars = glob.glob(os.path.join(acquired_folder, f'*{subsig}*.PAR'))
potential_recs = glob.glob(os.path.join(acquired_folder, f'*{subsig}*.REC'))
potential_pars = [f for f in potential_pars if not any_in_str(f, subdict['excl'])]
potential_recs = [f for f in potential_recs if not any_in_str(f, subdict['excl'])]
candidate_pars.extend(potential_pars)
candidate_recs.extend(potential_recs)
n_cand_files = (len(candidate_pars), len(candidate_recs))
pt_status[pt][signature] = n_cand_files
if all([i >= 1 for i in n_cand_files]):
sig_tracker[signature] = {'original_par': candidate_pars[-1]}
sig_tracker[signature]['original_rec'] = candidate_recs[-1]
moved_par = os.path.join(bin_folder, get_terminal(candidate_pars[-1]))
moved_rec = os.path.join(bin_folder, get_terminal(candidate_recs[-1]))
sig_tracker[signature]['moved_par'] = moved_par
sig_tracker[signature]['moved_rec'] = moved_rec
if any(i != 1 for i in n_cand_files):
print(f'warning: pt {pt} returned {n_cand_files} for {signature}. using last option')
else:
if subdict['optional']:
print(f'pt {pt} has {n_cand_files} candidate par/recs for {signature}, but this an optional signature')
optional_and_missing.append(signature)
else:
print(f'pt {pt} has {n_cand_files} candidate par/recs for {signature}. will be skipped')
has_required_files = False
if not has_required_files: # if we don't have all the files specified, just move on
continue
os.mkdir(master_output_folder)
os.mkdir(bin_folder)
os.mkdir(processed_folder)
try:
for signature, subdict in signature_relationships.items():
if signature in optional_and_missing:
continue
# move the file, convert to NiFTI and rename
shutil.copyfile(sig_tracker[signature]['original_par'], sig_tracker[signature]['moved_par'])
shutil.copyfile(sig_tracker[signature]['original_rec'], sig_tracker[signature]['moved_rec'])
moved_par_without_ext = sig_tracker[signature]['moved_par'][:-4]
conversion_command = f'{path_to_dcm2nii} -o {bin_folder} -a n -i n -d n -p n -e n -f y -v n {sig_tracker[signature]["moved_par"]}'
with suppress_stdout():
os.system(conversion_command)
sig_tracker[signature]['raw_nifti'] = os.path.join(bin_folder, f'{subdict["basename"]}_raw.nii.gz')
os.rename(f'{moved_par_without_ext}.nii.gz', sig_tracker[signature]['raw_nifti'])
except:
print(f'\n!!!!!!!!!! warning: encountered unexpected error while copying and converting images for pt {pt}. folder will be deleted !!!!!!!!!!\n')
shutil.rmtree(master_output_folder)
continue
# skullstripping
for signature, subdict in signature_relationships.items():
if signature in optional_and_missing:
continue
if subdict['skullstrip'] == 'yes':
sig_tracker[signature]['skullstripped_nifti'] = os.path.join(bin_folder, f'{subdict["basename"]}_stripped.nii.gz')
stripping_command = f"bet {sig_tracker[signature]['raw_nifti']} {sig_tracker[signature]['skullstripped_nifti']} -f {skullstrip_f_val}"
os.system(stripping_command)
else:
sig_tracker[signature]['skullstripped_nifti'] = sig_tracker[signature]['raw_nifti']
# registration
for signature, subdict in signature_relationships.items():
if signature in optional_and_missing:
continue
if subdict['register'] == 'master':
master_ref = sig_tracker[signature]['skullstripped_nifti']
omat_path = os.path.join(processed_folder, 'master2mni.mat')
mni_path = os.path.join(bin_folder, f'{subdict["basename"]}_mni.nii.gz')
omat_cmd = f'flirt -in {master_ref} -ref {mni_standard} -out {mni_path} -omat omat_path'
os.system(omat_cmd)
for signature, subdict in signature_relationships.items():
if signature in optional_and_missing:
continue
if subdict['register'] not in ('master', 'no'):
sig_tracker[signature]['registered_nifti'] = os.path.join(bin_folder, f'{subdict["basename"]}_registered.nii.gz')
register_command = f"flirt -in {sig_tracker[signature]['skullstripped_nifti']} -ref {master_ref} -out {sig_tracker[signature]['registered_nifti']}"
os.system(register_command)
else:
sig_tracker[signature]['registered_nifti'] = sig_tracker[signature]['skullstripped_nifti']
# move files to their final home :)
for signature, subdict in signature_relationships.items():
if signature in optional_and_missing:
continue
sig_tracker[signature]['final_nifti'] = os.path.join(master_output_folder, f'{subdict["basename"]}.nii.gz')
shutil.copyfile(sig_tracker[signature]['registered_nifti'], sig_tracker[signature]['final_nifti'])
# delete the subfolders
folder_glob = np.array(glob.glob(os.path.join(master_output_folder, '*/'))) # list of all possible subdirectories
for f in folder_glob:
shutil.rmtree(f)
# if the master folder is empty, that means that all files were optionally, but none were found. I'd call that a failure and delete the folder
# but otherwise it's fine
file_glob = np.array(glob.glob(os.path.join(master_output_folder, '*'))) # list of all files
if len(file_glob) == 0:
print(f'No files transferred to {master_output_folder}: deleting folder and marking as failure')
pt_status[pt]['successful'] = 0
successful += 0
shutil.rmtree(master_output_folder)
else:
pt_status[pt]['successful'] = 1
successful += 1
# write status log
end = time()
runtime = end-start
runtime_minutes_pretty = round(runtime/60, 2)
message_file.write(f'Successfully preprocessed {successful} of {len(pt_ids)} scans from {n_unique_pts} unique patients. Running time: {runtime_minutes_pretty} minutes\n\n\n')
df = pd.DataFrame()
for key, val in pt_status.items():
message_file.write(f'Patient {key}\n\t{str(val)}\n\n')
appender =
|
pd.Series(val, name=key)
|
pandas.Series
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
def test_where_unsafe_int(any_signed_int_numpy_dtype):
s = Series(np.arange(10), dtype=any_signed_int_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(
list(range(2, 7)) + list(range(5, 10)),
dtype=any_signed_int_numpy_dtype,
)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_numpy_dtype):
s = Series(np.arange(10), dtype=float_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = Series(data, dtype=float_numpy_dtype)
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
"dtype,expected_dtype",
[
(np.int8, np.float64),
(np.int16, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
(np.float32, np.float32),
(np.float64, np.float64),
],
)
def test_where_unsafe_upcast(dtype, expected_dtype):
# see gh-9743
s = Series(np.arange(10), dtype=dtype)
values = [2.5, 3.5, 4.5, 5.5, 6.5]
mask = s < 5
expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
s[mask] = values
tm.assert_series_equal(s, expected)
def test_where_unsafe():
# see gh-9731
s = Series(np.arange(10), dtype="int64")
values = [2.5, 3.5, 4.5, 5.5]
mask = s > 5
expected = Series(list(range(6)) + values, dtype="float64")
s[mask] = values
tm.assert_series_equal(s, expected)
# see gh-3235
s = Series(np.arange(10), dtype="int64")
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
tm.assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype="int64")
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
tm.assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[mask] = [5, 4, 3, 2, 1]
with pytest.raises(ValueError, match=msg):
s[mask] = [0] * 5
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
tm.assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
tm.assert_series_equal(result, expected)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
tm.assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
tm.assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert s.shape == rs.shape
assert rs is not s
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
tm.assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
tm.assert_series_equal(rs, expected)
def test_where_non_keyword_deprecation():
# GH 41485
s = Series(range(5))
msg = (
"In a future version of pandas all arguments of "
"Series.where except for the arguments 'cond' "
"and 'other' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = s.where(s > 1, 10, False)
expected = Series([10, 10, 2, 3, 4])
tm.assert_series_equal(expected, result)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where(1)
with pytest.raises(ValueError, match=msg):
s.where(cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
|
tm.assert_series_equal(s, expected)
|
pandas._testing.assert_series_equal
|
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import re
import sklearn.metrics.pairwise as pw
from scipy import sparse
from sklearn.metrics.pairwise import pairwise_distances
from scipy.sparse.linalg import svds
def recommend_books_userbased(userID):
books_details_df = pd.read_csv('C:/Users/Nikhita/Desktop/Dataset/Final/final_book_details.csv')
df_ratings = pd.read_csv('C:/Users/Nikhita/Desktop/Dataset/Final/ratings.csv')
df_books_ratings = df_ratings.pivot(
index='user_id',
columns='book_id',
values='rating'
).fillna(0)
R = df_books_ratings.values
user_ratings_mean = np.mean(R, axis=1)
R_demeaned = R - user_ratings_mean.reshape(-1, 1)
U, sigma, Vt = svds(R_demeaned, k=50)
sigma = np.diag(sigma)
all_user_predicted_ratings = np.dot(np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)
preds_df =
|
pd.DataFrame(all_user_predicted_ratings, columns=df_books_ratings.columns)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from backtester.constants import *
class InstrumentData(object):
'''
'''
def __init__(self, instrumentId, tradeSymbol, fileName=None, chunkSize=None):
self.__instrumentId = instrumentId
self.__tradeSymbol = tradeSymbol
self.__fileName = fileName
self.__bookDataSize = None
if chunkSize is None:
if fileName:
self.__bookData = pd.read_csv(fileName, index_col=0, parse_dates=True, dtype=float)
self.__bookData.dropna(inplace=True)
self.__bookDataSize = len(self.__bookData)
self.getBookDataChunk = self.__getBookDataInChunksFromDataFrame
else:
self.__bookData = pd.read_csv(fileName, index_col=0, parse_dates=True, dtype=float, chunksize=chunkSize)
self.getBookDataChunk = self.__getBookDataInChunksFromFile
def getInstrumentId(self):
return self.__instrumentId
def getTradeSymbol(self):
return self.__tradeSymbol
def getBookDataSize(self):
if self.__bookDataSize is None:
self.__bookDataSize = len(pd.read_csv(self.__fileName, index_col=0, usecols=[0]))
return self.__bookDataSize
def setBookData(self, data):
self.__bookData = data
self.__bookDataSize = len(self.__bookData)
def getBookData(self):
return self.__bookData
def getBookDataByFeature(self, feature):
return self.__bookData[feature]
# returns a chunk from already completely loaded data
def __getBookDataInChunksFromDataFrame(self, chunkSize):
if chunkSize <=0 :
logError("chunkSize must be a positive integer")
for chunkNumber, bookDataChunk in self.__bookData.groupby(np.arange(self.__bookDataSize) // chunkSize):
yield (chunkNumber, bookDataChunk)
# returns a chunk from __bookData generator after processing data
# TODO: implement proper padding such that all instruments have same index set (timeUpdates)
def __getBookDataInChunksFromFile(self, dateRange):
chunkNumber = -1
for bookDataChunk in self.__bookData:
chunkNumber += 1
bookDataChunk = self.filterDataByDates(bookDataChunk, dateRange)
yield (chunkNumber, bookDataChunk)
# returns all timestamps in pandas series format
def getAllTimestamps(self):
if isinstance(self.__bookData, pd.DataFrame):
return self.__bookData.index
else:
return pd.read_csv(self.__fileName, index_col=0, usecols=[0]).index
# returns list of bookDataFeatures (columns)
def getBookDataFeatures(self):
if isinstance(self.__bookData, pd.DataFrame):
return list(self.__bookData.columns)
else:
return list(pd.read_csv(self.__fileName, index_col=0, nrows=1).columns)
def getTypeOfInstrument(self):
return INSTRUMENT_TYPE_STOCK
def filterDataByDates(self, dateRange):
if (dateRange is []) or (dateRange is ()):
return
elif type(dateRange) is list:
frames = []
for dr in dateRange:
frames.append(self.__bookData[dr[0]:dr[1]])
self.__bookData = pd.concat(frames)
else:
self.__bookData = self.__bookData[dateRange[0]:dateRange[1]]
self.__bookDataSize = len(self.__bookData)
def padInstrumentData(self, timeUpdates, method='ffill'):
timeUpdateSeries = pd.Series(timeUpdates)
if not timeUpdateSeries.isin(self.__bookData.index).all():
df = pd.DataFrame(index=timeUpdates, columns=self.__bookData.columns)
df.at[self.__bookData.index] = self.__bookData.copy()
df.fillna(method=method, inplace=True)
df.fillna(0.0, inplace=True)
del self.__bookData
self.__bookData = df
self.__bookDataSize = len(self.__bookData)
# For internal use only
def __padInstrumentData(self, timeUpdates, data, method='ffill'):
timeUpdateSeries = pd.Series(timeUpdates)
if not timeUpdateSeries.isin(data.index).all():
newDataDf = pd.DataFrame(index=timeUpdates, columns=data.columns)
newDataDf.at[data.index] = data
newDataDf.fillna(method=method, inplace=True)
newDataDf.fillna(0.0, inplace=True)
return newDataDf
return data
def __filterDataByDates(self, data, dateRange):
if (dateRange is []) or (dateRange is ()) or data is None:
return data
elif type(dateRange) is list:
frames = []
for dr in dateRange:
frames.append(data[dr[0]:dr[1]])
return
|
pd.concat(frames)
|
pandas.concat
|
import numpy as np
import math
import pandas as pd
import pandasql as ps
import matplotlib.pyplot as plt
import pickle
import multiprocessing
## Function to divide the GRID Area into Pixels
## Parameter Needed - 1. pixlatmax - float - Maximum Value of Lattitude( GRID Boundary) 2. pixlatmin - float - Minimum value of the lattitudes( GRID Boundary)
## 3. pixlonmax - float - Maximum value of Longitude( GRID Boundary) 4. pixlonmin - float - Minimum value of longitude( GRID Boundary)
## 5. pixelsize - Number - Size of Earch Pixel in GRID(Number of Pixel in Grid) 6. Grid No - Number - The Id of Grid
def GetPixelDF(pixlatmin,pixlatmax,pixlonmin,pixlonmax,pixelsize,grid_no):
fact=100000000
latmin = np.int(pixlatmin*fact)
latmax = np.int(pixlatmax*fact)
longmin = np.int(pixlonmin*fact)
longmax = np.int(pixlonmax*fact)
pixelLatRangeStep = np.int((latmax-latmin)/(pixelsize))
pixelLonRangeStep = np.int((longmax-longmin)/(pixelsize))
pixlatvals = list(np.round(np.arange(latmin,latmax,pixelLatRangeStep)/fact,5))
if len(pixlatvals) == pixelsize:
pixlatvals.append(pixlatmax)
pixlonvals = list(np.round(np.arange(longmin,longmax,pixelLonRangeStep)/fact,5))
if len(pixlonvals) == pixelsize:
pixlonvals.append(pixlonmax)
ret_df = []
pixno = 1
for i in range(len(pixlatvals)-1):
minlat = pixlatvals[i]
maxlat = pixlatvals[i+1]
for j in range(len(pixlonvals)-1):
minlong = pixlonvals[j]
maxlong = pixlonvals[j+1]
ret_df.append([grid_no,pixno,minlat,maxlat,minlong,maxlong])
pixno +=1
ret_df = pd.DataFrame(ret_df,columns =['grid','pixno','minlat','maxlat','minlong','maxlong'])
return ret_df
## Function to divide the whole country into GRIDS and Pixels
## Parameter Needed - 1. latlongrange - Tuple - Coordinate boundary of the country(south, north, west, east) 2. latstep - number -Number of division under lattitude range
## 3. longstep - Number - Number of division under longitude range 4. margin - Number - Overlapping adjustment for pixel boundaries
## 5. pixelsize - Number - Pixelsize of each subpixel 6. counties - Dataframe - The county Dataframe containing the lattitude longitude and population data
def get_The_Area_Grid(latlongrange,latstep,longstep,margin,pixelsize, counties):
fact=100000000
(min_lat,max_lat,min_long,max_long) = latlongrange#(23, 49,-124.5, -66.31)
min_lat = np.int(min_lat*fact)
max_lat = np.int(max_lat*fact)
min_long = np.int(min_long*fact)
max_long = np.int(max_long*fact)
range_of_longitude = max_long - min_long
range_of_latitude = max_lat - min_lat
block_longitude = np.int(range_of_longitude/(longstep))
block_latitude = np.int(range_of_latitude/(latstep))
lattitudes = list(np.round(np.arange(min_lat,max_lat,block_latitude)/fact,5))
if len(lattitudes) == latstep:
lattitudes.append(max_lat/fact)
longitudes = list(np.round(np.arange(min_long,max_long,block_longitude)/fact,5))
if len(longitudes) == longstep:
longitudes.append(max_long/fact)
print(len(lattitudes),len(longitudes))
#print(longitudes)
Area_Grid = {}
Area_pixel_Grid =
|
pd.DataFrame()
|
pandas.DataFrame
|
from glob import glob
import os
import pandas as pd
from tqdm import tqdm
import yaml
def read_lf_list(path: str, twinting: bool = True) -> list:
"""
input:
---------
path [str]: path to alternative writing dictionary with the following structure:
```
муу|muu|mu|...
...
```
return:
---------
list of list of words
"""
data = []
with open(path, "r") as reader:
for l in reader.readlines():
l = l.rstrip("\n")
words = [w.strip() for w in l.split("|")]
data.append(words)
if twinting and len(data[0]) > 0:
from .twint_helper import getAndSaveTweetsByKeyword
for words in data:
for word in words:
getAndSaveTweetsByKeyword(word)
return data
def read_config(path: str) -> dict:
config: str
with open(path, 'r') as stream:
try:
config = yaml.safe_load(stream)
print(f"""---------- CONFIG ----------\n{config}\n""")
except yaml.YAMLError as exc:
print(exc)
return config
def read_tweet_dump(directory: str) -> pd.DataFrame:
from .config import config
ll = sorted(glob(os.path.join(directory, "*.csv")))
if len(ll) == 0:
raise Exception(f"No csv data found inside {directory}")
df = pd.DataFrame(columns=config['data']['columns'])
for l in tqdm(ll):
df = df.append(
|
pd.read_csv(l, usecols=config['data']['columns'])
|
pandas.read_csv
|
"""
Tests for the pandas.io.common functionalities
"""
import mmap
import os
import re
import pytest
from pandas.compat import FileNotFoundError, StringIO, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
import pandas.util.testing as tm
import pandas.io.common as icom
class CustomFSPath(object):
"""For testing fspath on unknown objects"""
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
# Functions that consume a string path and return a string or path-like object
path_types = [str, CustomFSPath]
try:
from pathlib import Path
path_types.append(Path)
except ImportError:
pass
try:
from py.path import local as LocalPath
path_types.append(LocalPath)
except ImportError:
pass
HERE = os.path.abspath(os.path.dirname(__file__))
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestCommonIOCapabilities(object):
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = '~/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name != filename
assert os.path.isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = '/somefolder/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
@td.skip_if_no('pathlib')
def test_stringify_path_pathlib(self):
rel_path = icom._stringify_path(Path('.'))
assert rel_path == '.'
redundant_path = icom._stringify_path(Path('foo//bar'))
assert redundant_path == os.path.join('foo', 'bar')
@td.skip_if_no('py.path')
def test_stringify_path_localpath(self):
path = os.path.join('foo', 'bar')
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
assert icom._stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath('foo/bar.csv')
result = icom._stringify_path(p)
assert result == 'foo/bar.csv'
@pytest.mark.parametrize('extension,expected', [
('', None),
('.gz', 'gzip'),
('.bz2', 'bz2'),
('.zip', 'zip'),
('.xz', 'xz'),
])
@pytest.mark.parametrize('path_type', path_types)
def test_infer_compression_from_path(self, extension, expected, path_type):
path = path_type('foo/bar.csv' + extension)
compression = icom._infer_compression(path, compression='infer')
assert compression == expected
def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(
filename)
assert filepath_or_buffer != filename
assert os.path.isabs(filepath_or_buffer)
assert os.path.expanduser(filename) == filepath_or_buffer
assert not should_close
def test_get_filepath_or_buffer_with_buffer(self):
input_buffer = StringIO()
filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(
input_buffer)
assert filepath_or_buffer == input_buffer
assert not should_close
def test_iterator(self):
reader = pd.read_csv(StringIO(self.data1), chunksize=1)
result = pd.concat(reader, ignore_index=True)
expected = pd.read_csv(StringIO(self.data1))
tm.assert_frame_equal(result, expected)
# GH12153
it = pd.read_csv(StringIO(self.data1), chunksize=1)
first = next(it)
tm.assert_frame_equal(first, expected.iloc[[0]])
tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])
@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
(pd.read_json, 'os', ValueError, 'json'),
(pd.read_msgpack, 'os', ValueError, 'mp'),
(pd.read_pickle, 'os', FileNotFoundError, 'pickle'),
])
def test_read_non_existant(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, 'data', 'does_not_exist.' + fn_ext)
with pytest.raises(error_class):
reader(path)
@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
(pd.read_json, 'os', ValueError, 'json'),
(pd.read_msgpack, 'os', ValueError, 'mp'),
(pd.read_pickle, 'os', FileNotFoundError, 'pickle'),
])
def test_read_expands_user_home_dir(self, reader, module,
error_class, fn_ext, monkeypatch):
pytest.importorskip(module)
path = os.path.join('~', 'does_not_exist.' + fn_ext)
monkeypatch.setattr(icom, '_expand_user',
lambda x: os.path.join('foo', x))
message = "".join(["foo", os.path.sep, "does_not_exist.", fn_ext])
with pytest.raises(error_class, message=re.escape(message)):
reader(path)
def test_read_non_existant_read_table(self):
path = os.path.join(HERE, 'data', 'does_not_exist.' + 'csv')
with pytest.raises(FileNotFoundError):
with tm.assert_produces_warning(FutureWarning):
pd.read_table(path)
@pytest.mark.parametrize('reader, module, path', [
(pd.read_csv, 'os', ('io', 'data', 'iris.csv')),
(pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')),
(pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')),
(pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')),
(pd.read_hdf, 'tables', ('io', 'data', 'legacy_hdf',
'datetimetz_object.h5')),
(pd.read_stata, 'os', ('io', 'data', 'stata10_115.dta')),
(pd.read_sas, 'os', ('io', 'sas', 'data', 'test1.sas7bdat')),
(pd.read_json, 'os', ('io', 'json', 'data', 'tsframe_v012.json')),
(pd.read_msgpack, 'os', ('io', 'msgpack', 'data', 'frame.mp')),
(pd.read_pickle, 'os', ('io', 'data', 'categorical_0_14_1.pickle')),
])
def test_read_fspath_all(self, reader, module, path, datapath):
pytest.importorskip(module)
path = datapath(*path)
mypath = CustomFSPath(path)
result = reader(mypath)
expected = reader(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def test_read_fspath_all_read_table(self, datapath):
path = datapath('io', 'data', 'iris.csv')
mypath = CustomFSPath(path)
with tm.assert_produces_warning(FutureWarning):
result = pd.read_table(mypath)
with tm.assert_produces_warning(FutureWarning):
expected = pd.read_table(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('writer_name, writer_kwargs, module', [
('to_csv', {}, 'os'),
('to_excel', {'engine': 'xlwt'}, 'xlwt'),
('to_feather', {}, 'feather'),
('to_html', {}, 'os'),
('to_json', {}, 'os'),
('to_latex', {}, 'os'),
('to_msgpack', {}, 'os'),
('to_pickle', {}, 'os'),
('to_stata', {}, 'os'),
])
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
p1 = tm.ensure_clean('string')
p2 = tm.ensure_clean('fspath')
df = pd.DataFrame({"A": [1, 2]})
with p1 as string, p2 as fspath:
pytest.importorskip(module)
mypath = CustomFSPath(fspath)
writer = getattr(df, writer_name)
writer(string, **writer_kwargs)
with open(string, 'rb') as f:
expected = f.read()
writer(mypath, **writer_kwargs)
with open(fspath, 'rb') as f:
result = f.read()
assert result == expected
def test_write_fspath_hdf5(self):
# Same test as write_fspath_all, except HDF5 files aren't
# necessarily byte-for-byte identical for a given dataframe, so we'll
# have to read and compare equality
pytest.importorskip('tables')
df = pd.DataFrame({"A": [1, 2]})
p1 = tm.ensure_clean('string')
p2 = tm.ensure_clean('fspath')
with p1 as string, p2 as fspath:
mypath = CustomFSPath(fspath)
df.to_hdf(mypath, key='bar')
df.to_hdf(string, key='bar')
result = pd.read_hdf(fspath, key='bar')
expected = pd.read_hdf(string, key='bar')
tm.assert_frame_equal(result, expected)
@pytest.fixture
def mmap_file(datapath):
return datapath('io', 'data', 'test_mmap.csv')
class TestMMapWrapper(object):
def test_constructor_bad_file(self, mmap_file):
non_file = StringIO('I am not a file')
non_file.fileno = lambda: -1
# the error raised is different on Windows
if
|
is_platform_windows()
|
pandas.compat.is_platform_windows
|
from evalutils.exceptions import ValidationError
from evalutils.io import CSVLoader, FileLoader, ImageLoader
import json
import nibabel as nib
import numpy as np
import os.path
from pathlib import Path
from pandas import DataFrame, MultiIndex
import scipy.ndimage
from scipy.ndimage.interpolation import map_coordinates, zoom
from surface_distance import *
##### paths #####
DEFAULT_INPUT_PATH = Path("/input/")
DEFAULT_GROUND_TRUTH_PATH = Path("/opt/evaluation/ground-truth/")
DEFAULT_EVALUATION_OUTPUT_FILE_PATH = Path("/output/metrics.json")
##### metrics #####
def jacobian_determinant(disp):
_, _, H, W, D = disp.shape
gradx = np.array([-0.5, 0, 0.5]).reshape(1, 3, 1, 1)
grady = np.array([-0.5, 0, 0.5]).reshape(1, 1, 3, 1)
gradz = np.array([-0.5, 0, 0.5]).reshape(1, 1, 1, 3)
gradx_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], gradx, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], gradx, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], gradx, mode='constant', cval=0.0)], axis=1)
grady_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], grady, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], grady, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], grady, mode='constant', cval=0.0)], axis=1)
gradz_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], gradz, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], gradz, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], gradz, mode='constant', cval=0.0)], axis=1)
grad_disp = np.concatenate([gradx_disp, grady_disp, gradz_disp], 0)
jacobian = grad_disp + np.eye(3, 3).reshape(3, 3, 1, 1, 1)
jacobian = jacobian[:, :, 2:-2, 2:-2, 2:-2]
jacdet = jacobian[0, 0, :, :, :] * (jacobian[1, 1, :, :, :] * jacobian[2, 2, :, :, :] - jacobian[1, 2, :, :, :] * jacobian[2, 1, :, :, :]) -\
jacobian[1, 0, :, :, :] * (jacobian[0, 1, :, :, :] * jacobian[2, 2, :, :, :] - jacobian[0, 2, :, :, :] * jacobian[2, 1, :, :, :]) +\
jacobian[2, 0, :, :, :] * (jacobian[0, 1, :, :, :] * jacobian[1, 2, :, :, :] - jacobian[0, 2, :, :, :] * jacobian[1, 1, :, :, :])
return jacdet
def compute_tre(x, y, spacing):
return np.linalg.norm((x - y) * spacing, axis=1)
##### file loader #####
class NiftiLoader(ImageLoader):
@staticmethod
def load_image(fname):
return nib.load(str(fname))
@staticmethod
def hash_image(image):
return hash(image.get_fdata().tostring())
class NumpyLoader(ImageLoader):
@staticmethod
def load_image(fname):
return np.load(str(fname))['arr_0']
@staticmethod
def hash_image(image):
return hash(image.tostring())
class CURIOUSLmsLoader(FileLoader):
def load(self, fname):
lms_fixed = []
lms_moving = []
f = open(fname, 'r')
for line in f.readlines()[5:]:
lms = [float(lm) for lm in line.split(' ')[1:-1]]
lms_fixed.append(lms[:3])
lms_moving.append(lms[3:])
return {'lms_fixed': lms_fixed, 'lms_moving': lms_moving}
class L2RLmsLoader(FileLoader):
def load(self, fname):
lms_fixed = []
lms_moving = []
f = open(fname, 'r')
for line in f.readlines():
lms = [float(lm) for lm in line.split(',')]
lms_fixed.append(lms[:3])
lms_moving.append(lms[3:])
return {'lms_fixed': lms_fixed, 'lms_moving': lms_moving}
##### validation errors #####
def raise_missing_file_error(fname):
message = (
f"The displacement field {fname} is missing. "
f"Please provide all required displacement fields."
)
raise ValidationError(message)
def raise_dtype_error(fname, dtype):
message = (
f"The displacement field {fname} has a wrong dtype ('{dtype}'). "
f"All displacement fields should have dtype 'float16'."
)
raise ValidationError(message)
def raise_shape_error(fname, shape, expected_shape):
message = (
f"The displacement field {fname} has a wrong shape ('{shape[0]}x{shape[1]}x{shape[2]}x{shape[3]}'). "
f"The expected shape of displacement fields for this task is {expected_shape[0]}x{expected_shape[1]}x{expected_shape[2]}x{expected_shape[3]}."
)
raise ValidationError(message)
##### eval val #####
class EvalVal():
def __init__(self):
self.ground_truth_path = DEFAULT_GROUND_TRUTH_PATH
self.predictions_path = DEFAULT_INPUT_PATH
self.output_file = DEFAULT_EVALUATION_OUTPUT_FILE_PATH
self.csv_loader = CSVLoader()
self.nifti_loader = NiftiLoader()
self.numpy_loader = NumpyLoader()
self.curious_lms_loader = CURIOUSLmsLoader()
self.l2r_lms_loader = L2RLmsLoader()
self.pairs_task_01 = DataFrame()
self.imgs_task_01 = DataFrame()
self.lms_task_01 = DataFrame()
self.disp_fields_task_01 = DataFrame()
self.cases_task_01 = DataFrame()
self.pairs_task_02 = DataFrame()
self.imgs_task_02 = DataFrame()
self.lms_task_02 = DataFrame()
self.disp_fields_task_02 = DataFrame()
self.cases_task_02 = DataFrame()
self.pairs_task_03 = DataFrame()
self.segs_task_03 = DataFrame()
self.disp_fields_task_03 = DataFrame()
self.cases_task_03 = DataFrame()
self.pairs_task_04 = DataFrame()
self.segs_task_04 = DataFrame()
self.disp_fields_task_04 = DataFrame()
self.cases_task_04 = DataFrame()
def evaluate(self):
self.load_task_01()
self.merge_ground_truth_and_predictions_task_01()
self.score_task_01()
self.load_task_02()
self.merge_ground_truth_and_predictions_task_02()
self.score_task_02()
self.load_task_03()
self.merge_ground_truth_and_predictions_task_03()
self.score_task_03()
self.load_task_04()
self.merge_ground_truth_and_predictions_task_04()
self.score_task_04()
self.save()
def load_task_01(self):
self.pairs_task_01 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_01' / 'pairs_val.csv')
self.imgs_task_01 = self.load_imgs_task_01()
self.lms_task_01 = self.load_lms_task_01()
self.disp_fields_task_01 = self.load_disp_fields(self.pairs_task_01, DEFAULT_INPUT_PATH / 'task_01', np.array([3, 128, 128, 144]))
def load_task_02(self):
self.pairs_task_02 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_02' / 'pairs_val.csv')
self.imgs_task_02 = self.load_imgs_task_02()
self.lms_task_02 = self.load_lms_task_02()
self.disp_fields_task_02 = self.load_disp_fields(self.pairs_task_02, DEFAULT_INPUT_PATH / 'task_02', np.array([3, 96, 96, 104]))
def load_task_03(self):
self.pairs_task_03 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_03' / 'pairs_val.csv')
self.segs_task_03 = self.load_segs_task_03()
self.disp_fields_task_03 = self.load_disp_fields(self.pairs_task_03, DEFAULT_INPUT_PATH / 'task_03', np.array([3, 96, 80, 128]))
def load_task_04(self):
self.pairs_task_04 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_04' / 'pairs_val.csv')
self.segs_task_04 = self.load_segs_task_04()
self.disp_fields_task_04 = self.load_disp_fields(self.pairs_task_04, DEFAULT_INPUT_PATH / 'task_04', np.array([3, 64, 64, 64]))
def load_imgs_task_01(self):
cases = None
for _, row in self.pairs_task_01.iterrows():
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_01' / 'EASY-RESECT' / 'NIFTI' / 'Case{}'.format(row['fixed']) / 'Case{}-FLAIR-resize.nii'.format(row['fixed']))
if cases is None:
cases = case
index = [row['fixed']]
else:
cases += case
index += [row['fixed']]
return DataFrame(cases, index=index)
def load_imgs_task_02(self):
cases = None
for _, row in self.pairs_task_02.iterrows():
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_02' / 'training' / 'lungMasks' / 'case_{:03d}_exp.nii.gz'.format(row['fixed']))
if cases is None:
cases = case
index = [row['fixed']]
else:
cases += case
index += [row['fixed']]
return DataFrame(cases, index=index)
def load_segs_task_03(self):
cases = None
indices = []
for _, row in self.pairs_task_03.iterrows():
indices.append(row['fixed'])
indices.append(row['moving'])
indices = np.array(indices)
for i in np.unique(indices):
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_03' / 'Training' / 'label' / 'label{:04d}.nii.gz'.format(i))
if cases is None:
cases = case
index = [i]
else:
cases += case
index += [i]
return DataFrame(cases, index=index)
def load_segs_task_04(self):
cases = None
indices = []
for _, row in self.pairs_task_04.iterrows():
indices.append(row['fixed'])
indices.append(row['moving'])
indices = np.array(indices)
for i in np.unique(indices):
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_04' / 'Training' / 'label' / 'hippocampus_{}.nii.gz'.format(i))
if cases is None:
cases = case
index = [i]
else:
cases += case
index += [i]
return DataFrame(cases, index=index)
def load_lms_task_01(self):
cases = None
for _, row in self.pairs_task_01.iterrows():
case = self.curious_lms_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_01' / 'EASY-RESECT' / 'landmarks' / 'Coordinates' / 'Case{}-MRI-beforeUS.tag'.format(row['fixed']))
if cases is None:
cases = [case]
index = [row['fixed']]
else:
cases += [case]
index += [row['fixed']]
return DataFrame(cases, index=index)
def load_lms_task_02(self):
cases = None
for _, row in self.pairs_task_02.iterrows():
case = self.l2r_lms_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_02' / 'training' / 'lms' / 'case_{:03d}.txt'.format(row['fixed']))
if cases is None:
cases = [case]
index = [row['fixed']]
else:
cases += [case]
index += [row['fixed']]
return DataFrame(cases, index=index)
def merge_ground_truth_and_predictions_task_01(self):
cases = []
for _, row in self.pairs_task_01.iterrows():
case = {'img' : self.imgs_task_01.loc[row['fixed']],
'lms_fixed' : self.lms_task_01.loc[row['fixed']]['lms_fixed'],
'lms_moving' : self.lms_task_01.loc[row['moving']]['lms_moving'],
'disp_field' : self.disp_fields_task_01.loc[(row['fixed'], row['moving'])]}
cases += [case]
self.cases_task_01 =
|
DataFrame(cases)
|
pandas.DataFrame
|
import streamlit as st
import base64
from io import BytesIO
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # For creating plots
import matplotlib.ticker as mtick # For specifying the axes tick format
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from itertools import cycle, islice
import xlsxwriter
def download_link(object_to_download, download_filename, download_link_text):
"""
Generates a link to download the given object_to_download.
object_to_download (str, pd.DataFrame): The object to be downloaded.
download_filename (str): filename and extension of file. e.g. mydata.csv, some_txt_output.txt
download_link_text (str): Text to display for download link.
Examples:
download_link(YOUR_DF, 'YOUR_DF.csv', 'Click here to download data!')
download_link(YOUR_STRING, 'YOUR_STRING.txt', 'Click here to download your text!')
"""
if isinstance(object_to_download,pd.DataFrame):
object_to_download = object_to_download.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
def main():
def MLmodels(df):
columns = ["Churn"]
try:
for i in columns:
df[i].replace(to_replace='Yes', value=1, inplace=True)
df[i].replace(to_replace='No', value=0, inplace=True)
except:
None
df_train = df[df['Churn']>-1]
df_train = df_train.iloc[:,1:] #removing customer ID
column_means_train = df_train.mean()
df_train = df_train.fillna(column_means_train)
#df_train.dropna(inplace = True)
df_train = pd.get_dummies(df_train)
df_predict = df[df['Churn'].isna()]
y = df_train['Churn'].values
X = df_train.drop(columns = ['Churn'])
features = X.columns.values
scaler = MinMaxScaler(feature_range = (0,1))
scaler.fit(X)
X = pd.DataFrame(scaler.transform(X))
X.columns = features
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=101)
def gridsearchfunction(grid,model):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = grid_search.fit(X_train, y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
return grid_result
def logisticmodel(X_train,y_train,X_test,y_test):
model = LogisticRegression()
#solvers = ['newton-cg', 'lbfgs', 'liblinear']
#penalty = ['l2']
#c_values = [100, 10, 1.0, 0.1, 0.01]
solvers = ['newton-cg']
penalty = ['l2']
c_values = [10]
grid = dict(solver=solvers,penalty=penalty,C=c_values)
grid_result = gridsearchfunction(grid,model)
model = LogisticRegression(**grid_result.best_params_)
model.fit(X_train,y_train)
y_predicted = model.predict(X_test)
y_proba = model.predict_proba(X_test)
logaccuracy = round((metrics.accuracy_score(y_test, y_predicted)),3)
print (f"Test accuracy is {logaccuracy}")
return y_predicted,logaccuracy,grid_result.best_params_
def ridgemodel(X_train,y_train,X_test,y_test):
model = RidgeClassifier()
#alpha = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
alpha = [0.1]
grid = dict(alpha=alpha)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = gridsearchfunction(grid,model)
model = RidgeClassifier(**grid_result.best_params_)
model.fit(X_train,y_train)
y_predicted = model.predict(X_test)
ridgeaccuracy = round((metrics.accuracy_score(y_test, y_predicted)),3)
print (f"Test accuracy is {ridgeaccuracy}")
return y_predicted,ridgeaccuracy,grid_result.best_params_
def KNNmodel(X_train,y_train,X_test,y_test):
model = KNeighborsClassifier()
#n_neighbors = range(1, 21, 2)
#weights = ['uniform', 'distance']
#metric = ['euclidean', 'manhattan', 'minkowski']
n_neighbors = [10]
weights = ['uniform']
metric = ['euclidean']
grid = dict(n_neighbors=n_neighbors,weights=weights,metric=metric)
grid_result = gridsearchfunction(grid,model)
model = KNeighborsClassifier(**grid_result.best_params_)
model.fit(X_train,y_train)
y_predicted = model.predict(X_test)
KNNaccuracy = round((metrics.accuracy_score(y_test, y_predicted)),3)
print (f"Test accuracy is {KNNaccuracy}")
return y_predicted,KNNaccuracy,grid_result.best_params_
def SVMmodel(X_train,y_train,X_test,y_test):
model = SVC()
model.fit(X_train,y_train)
y_predicted = model.predict(X_test)
SVMaccuracy = round((metrics.accuracy_score(y_test, y_predicted)),3)
print (f"Test accuracy is {SVMaccuracy}")
return y_predicted,SVMaccuracy#,grid_result.best_params_
def bagging(X_train,y_train,X_test,y_test):
# define models and parameters
model = BaggingClassifier()
#n_estimators = [10, 100, 1000]
n_estimators = [10]
grid = dict(n_estimators=n_estimators)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = gridsearchfunction(grid,model)
model = BaggingClassifier(**grid_result.best_params_)
model.fit(X_train,y_train)
y_predicted = model.predict(X_test)
bagaccuracy = round((metrics.accuracy_score(y_test, y_predicted)),3)
print (f"Test accuracy is {bagaccuracy}")
return y_predicted,bagaccuracy,grid_result.best_params_
def RF(X_train,y_train,X_test,y_test):
# define models and parameters
model = RandomForestClassifier()
#n_estimators = [10, 100, 1000]
#max_features = ['sqrt', 'log2']
n_estimators = [10]
max_features = ['sqrt']
grid = dict(n_estimators=n_estimators,max_features=max_features)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = gridsearchfunction(grid,model)
model = RandomForestClassifier(**grid_result.best_params_)
model.fit(X_train,y_train)
y_predicted = model.predict(X_test)
RFaccuracy = round((metrics.accuracy_score(y_test, y_predicted)),3)
print (f"Test accuracy is {RFaccuracy}")
return y_predicted,RFaccuracy,grid_result.best_params_
def SGD(X_train,y_train,X_test,y_test):
model = GradientBoostingClassifier()
#n_estimators = [10, 100]
#learning_rate = [0.001, 0.01, 0.1]
#subsample = [0.5, 0.7, 1.0]
#max_depth = [3, 7, 9]
n_estimators = [10]
learning_rate = [0.1]
subsample = [0.5]
max_depth = [3]
grid = dict(learning_rate=learning_rate, n_estimators=n_estimators, subsample=subsample, max_depth=max_depth)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = gridsearchfunction(grid,model)
model = GradientBoostingClassifier(**grid_result.best_params_)
model.fit(X_train,y_train)
y_predicted = model.predict(X_test)
SGDaccuracy = round((metrics.accuracy_score(y_test, y_predicted)),3)
print (f"Test accuracy is {SGDaccuracy}")
return y_predicted,SGDaccuracy, grid_result.best_params_
ylogpredicted,logaccuracy,logbestparam = logisticmodel(X_train,y_train,X_test,y_test)
yridgepredicted,ridgeaccuracy,ridgebestparam = ridgemodel(X_train,y_train,X_test,y_test)
yKNNpredicted,KNNaccuracy,KNNbestparam = KNNmodel(X_train,y_train,X_test,y_test)
ySVMpredicted,SVMaccuracy = SVMmodel(X_train,y_train,X_test,y_test)
ybagpredicted,bagaccuracy,bagbestparam = bagging(X_train,y_train,X_test,y_test)
yRFpredicted,RFaccuracy,RFbestparam = RF(X_train,y_train,X_test,y_test)
ySGDpredicted,SGDaccuracy,SGDbestparam = SGD(X_train,y_train,X_test,y_test)
data = [['Logistic', round(logaccuracy,3)],
['Ridge', round(ridgeaccuracy,3)],
['KNN', round(KNNaccuracy,3)],
['SVM', round(SVMaccuracy,3)],
['Bagging',round(bagaccuracy,3)],
['RF',round(RFaccuracy,3)],
['SGD',round(SGDaccuracy,3)]]
df = pd.DataFrame(data, columns = ['Model', 'Accuracy'])
df.sort_values(by='Accuracy', ascending=False)
df.reset_index(drop=True)
Xestimate = df_predict.iloc[:,1:] #removing customer ID
Xestimate = Xestimate.drop(columns = ['Churn'])
column_means_estimate = Xestimate.mean()
Xestimate = Xestimate.fillna(column_means_estimate)
#Xestimate = Xestimate.dropna()
df_predict["Churn"] = 2
column_means_predict = df_predict.mean()
df_predict = df_predict.fillna(column_means_predict)
#df_predict = df_predict.dropna()
Xestimate =
|
pd.get_dummies(Xestimate)
|
pandas.get_dummies
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.